hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a0b9a966bb3d4dc9cb061e53454eaa10aa3dca96
| 1,311
|
py
|
Python
|
q51-75/q63.py
|
ljn1999/LeetCode-problems
|
a1ec54c45ef66530bdb58efae6ef2bb059626484
|
[
"MIT"
] | 4
|
2020-07-10T06:56:36.000Z
|
2020-07-21T02:39:05.000Z
|
q51-75/q63.py
|
ljn1999/LeetCode-problems
|
a1ec54c45ef66530bdb58efae6ef2bb059626484
|
[
"MIT"
] | null | null | null |
q51-75/q63.py
|
ljn1999/LeetCode-problems
|
a1ec54c45ef66530bdb58efae6ef2bb059626484
|
[
"MIT"
] | null | null | null |
# 2020.08.30
# maybe won't do leetcode tomorrow
# Problem Statement:
# https://leetcode.com/problems/unique-paths-ii/
class Solution:
def uniquePathsWithObstacles(self, obstacleGrid: List[List[int]]) -> int:
m = len(obstacleGrid[0])
n = len(obstacleGrid)
# check corner cases and do early return
if m == 1 or n == 1:
s = 0
for i in range(0, n):
for j in range(0, m):
if obstacleGrid[i][j] == 1:
return 0
return 1
# initialize the answer
answer = [[0 for i in range(m)] for j in range(n)]
# do it line by line
for i in range(0, n):
for j in range(0, m):
# can not reach the obstacle, therefore 0 is filled in
if obstacleGrid[i][j] == 1:
answer[i][j] = 0
# side values, depend on their neighbour
elif (i == 0 and j == 0) or \
(i == 0 and j>=1 and answer[0][j-1] != 0) or \
(j == 0 and i>=1 and answer[i-1][j] != 0):
answer[i][j] = 1
else:
answer[i][j] = answer[i][j-1] + answer[i-1][j]
return answer[n-1][m-1]
| 35.432432
| 77
| 0.450038
|
217234f670f5aa8c3734ea8867c9fb2cac426d3a
| 2,768
|
py
|
Python
|
Authors' code/Few_shot_learning/utils/helpers.py
|
onicolini/zero-shot_knowledge_transfer
|
9dd6d08eadb8243881f0fb8e9ac2d5653dd25229
|
[
"MIT"
] | null | null | null |
Authors' code/Few_shot_learning/utils/helpers.py
|
onicolini/zero-shot_knowledge_transfer
|
9dd6d08eadb8243881f0fb8e9ac2d5653dd25229
|
[
"MIT"
] | null | null | null |
Authors' code/Few_shot_learning/utils/helpers.py
|
onicolini/zero-shot_knowledge_transfer
|
9dd6d08eadb8243881f0fb8e9ac2d5653dd25229
|
[
"MIT"
] | 1
|
2019-10-27T15:44:17.000Z
|
2019-10-27T15:44:17.000Z
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import argparse
import os
class AggregateScalar(object):
"""
Computes and stores the average and std of stream.
Mostly used to average losses and accuracies.
"""
def __init__(self):
self.reset()
def reset(self):
self.count = 0.0001 # DIV/0!
self.sum = 0
def update(self, val, w=1):
"""
:param val: new running value
:param w: weight, e.g batch size
"""
self.sum += w * (val)
self.count += w
def avg(self):
return self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if len(target.shape) > 1:
target=torch.argmax(target, dim=1)
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(1/batch_size))
return res
def plot_image(input):
if input.shape[0]==1:
input = torch.cat([input, input, input])
npimg = np.transpose(input.numpy(), (1, 2, 0))
fig = plt.figure(figsize=(20, 20))
ax = fig.add_subplot(111)
ax.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.imshow(npimg, cmap='gray')
plt.show()
return fig
def plot_images(batch, ncol=None):
n_images = batch.shape[0]
ncol = ncol if ncol is not None else int(np.ceil(np.sqrt(n_images)))
grid_img = torchvision.utils.make_grid(batch,
nrow=ncol,
padding=2,
pad_value=0)
# black and white images converted to color
plot_image(grid_img)
def str2bool(v):
# codes from : https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def set_torch_seeds(seed):
import random
import numpy as np
import torch
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def delete_files_from_name(folder_path, file_name, type='contains'):
assert type in ['is', 'contains']
for f in os.listdir(folder_path):
if (type=='is' and file_name==f) or (type=='contains' and file_name in f):
os.remove(os.path.join(folder_path, f))
| 28.536082
| 100
| 0.597182
|
358e92d6333eb6ce95756612c6bae9f525cb55c2
| 503
|
py
|
Python
|
blog/migrations/0014_alter_post_type_food.py
|
GhasemMatoo/Mysite_Restaurants
|
f44e0b0374016850cc47f212db0d5693d6de2ee6
|
[
"MIT"
] | null | null | null |
blog/migrations/0014_alter_post_type_food.py
|
GhasemMatoo/Mysite_Restaurants
|
f44e0b0374016850cc47f212db0d5693d6de2ee6
|
[
"MIT"
] | null | null | null |
blog/migrations/0014_alter_post_type_food.py
|
GhasemMatoo/Mysite_Restaurants
|
f44e0b0374016850cc47f212db0d5693d6de2ee6
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.10 on 2021-12-11 15:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0013_alter_post_type_food'),
]
operations = [
migrations.AlterField(
model_name='post',
name='Type_Food',
field=models.CharField(choices=[('drinks', 'drinks'), ('lunch', 'lunch'), ('dinner', 'dinner')], default='drinks', max_length=50, verbose_name='نوع غذا'),
),
]
| 26.473684
| 166
| 0.604374
|
6eae337e9a8d9ec657bbe082ad616278c5707f89
| 1,370
|
py
|
Python
|
custom_components/marshall/__init__.py
|
patrickbusch/ha-marshall
|
5d3e7de779650a0b0f3c8ff6ad3041299da53b93
|
[
"MIT"
] | null | null | null |
custom_components/marshall/__init__.py
|
patrickbusch/ha-marshall
|
5d3e7de779650a0b0f3c8ff6ad3041299da53b93
|
[
"MIT"
] | null | null | null |
custom_components/marshall/__init__.py
|
patrickbusch/ha-marshall
|
5d3e7de779650a0b0f3c8ff6ad3041299da53b93
|
[
"MIT"
] | null | null | null |
"""The Marshall integration."""
import logging
# import voluptuous as vol
# import homeassistant.helpers.config_validation as cv
# from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import DOMAIN
from .marshallDevice import get_device
_LOGGER = logging.getLogger(__name__)
CONF_ADDRESSES = 'addresses'
PLATFORMS = ["binary_sensor", "sensor", "media_player"]
def setup(hass: HomeAssistant, config: dict):
"""Set up the Marshall component."""
hass.data.setdefault(DOMAIN, {})
_LOGGER.debug("Setting up Marshall integration")
addresses_cfg = config[DOMAIN][CONF_ADDRESSES]
_LOGGER.debug(f"addresses: {addresses_cfg}")
for address in addresses_cfg:
device = get_device(hass, address['address'])
hass.data[DOMAIN] = {
'device': device
}
for component in PLATFORMS:
hass.helpers.discovery.load_platform(component, DOMAIN, {}, config)
_LOGGER.debug("Finished setting up Marshall integration")
return True
# async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
# """Set up Marshall from a config entry."""
# # TODO: implement
# return False
# async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
# """Unload a config entry."""
# # TODO: implement
# return False
| 26.346154
| 75
| 0.708029
|
1f234db2966c5ca2e402ee0f965bd2fe8f811f22
| 68,911
|
py
|
Python
|
versioneer.py
|
loopinf/alphalens-reloaded
|
a43eef10dc1812c42ba9d2c7ec9b7fe33c6e5dd6
|
[
"Apache-2.0"
] | 1
|
2022-02-16T06:53:08.000Z
|
2022-02-16T06:53:08.000Z
|
versioneer.py
|
kalam360/alphalens-reloaded
|
7a667af09ba44b808075310924f32c98e019668b
|
[
"Apache-2.0"
] | 1
|
2022-03-03T10:05:52.000Z
|
2022-03-03T10:05:52.000Z
|
versioneer.py
|
kalam360/alphalens-reloaded
|
7a667af09ba44b808075310924f32c98e019668b
|
[
"Apache-2.0"
] | null | null | null |
# Version: 0.18
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
commit date in ISO 8601 format. This will be None if the date is not
available.
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See [details.md](details.md) in the Versioneer
source tree for descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Known Limitations
Some situations are known to cause problems for Versioneer. This details the
most significant ones. More can be found on Github
[issues page](https://github.com/warner/python-versioneer/issues).
### Subprojects
Versioneer has limited support for source trees in which `setup.py` is not in
the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
two common reasons why `setup.py` might not be in the root:
* Source trees which contain multiple subprojects, such as
[Buildbot](https://github.com/buildbot/buildbot), which contains both
"master" and "slave" subprojects, each with their own `setup.py`,
`setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
distributions (and upload multiple independently-installable tarballs).
* Source trees whose main purpose is to contain a C library, but which also
provide bindings to Python (and perhaps other langauges) in subdirectories.
Versioneer will look for `.git` in parent directories, and most operations
should get the right version string. However `pip` and `setuptools` have bugs
and implementation details which frequently cause `pip install .` from a
subproject directory to fail to find a correct version string (so it usually
defaults to `0+unknown`).
`pip install --editable .` should work correctly. `setup.py install` might
work too.
Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
some later version.
[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
this issue. The discussion in
[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
issue from the Versioneer side in more detail.
[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
pip to let Versioneer work correctly.
Versioneer-0.16 and earlier only looked for a `.git` directory next to the
`setup.cfg`, so subprojects were completely unsupported with those releases.
### Editable installs with setuptools <= 18.5
`setup.py develop` and `pip install --editable .` allow you to install a
project into a virtualenv once, then continue editing the source code (and
test) without re-installing after every change.
"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
convenient way to specify executable scripts that should be installed along
with the python package.
These both work as expected when using modern setuptools. When using
setuptools-18.5 or earlier, however, certain operations will cause
`pkg_resources.DistributionNotFound` errors when running the entrypoint
script, which must be resolved by re-installing the package. This happens
when the install happens with one version, then the egg_info data is
regenerated while a different version is checked out. Many setup.py commands
cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
a different virtualenv), so this can be surprising.
[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
this one, but upgrading to a newer version of setuptools should probably
resolve it.
### Unicode version strings
While Versioneer works (and is continually tested) with both Python 2 and
Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
Newer releases probably generate unicode version strings on py2. It's not
clear that this is wrong, but it may be surprising for applications when then
write these strings to a network connection or include them in bytes-oriented
APIs like cryptographic checksums.
[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
this question.
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = (
"Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND')."
)
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
me_dir = os.path.normcase(os.path.splitext(me)[0])
vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
if me_dir != vsr_dir:
print(
"Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py)
)
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(
commands, args, cwd=None, verbose=False, hide_stderr=False, env=None
):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
LONG_VERSION_PY[
"git"
] = '''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
print("stdout was %%s" %% stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %%s but none started with prefix %%s" %%
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(refs - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %%s not under git control" %% root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%%s'" %% style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(
GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True
)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = (
"unable to parse git-describe output: '%s'" % describe_out
)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(
GITS, ["rev-list", "HEAD", "--count"], cwd=root
)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-subst keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.18) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(
r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents,
re.M | re.S,
)
if not mo:
mo = re.search(
r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
contents,
re.M | re.S,
)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(
versions, sort_keys=True, indent=1, separators=(",", ": ")
)
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert (
cfg.versionfile_source is not None
), "please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
print(" date: %s" % vers.get("date"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# pip install:
# copies source tree to a tempdir before running egg_info/etc
# if .git isn't copied too, 'git describe' will fail
# then does setup.py bdist_wheel, or sometimes setup.py install
# setup.py egg_info -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(
self.build_lib, cfg.versionfile_build
)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
# nczeczulin reports that py2exe won't like the pep440-style string
# as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
# setup(console=[{
# "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
# "product_version": versioneer.get_version(),
# ...
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
if "py2exe" in sys.modules: # py2exe enabled?
try:
from py2exe.distutils_buildexe import py2exe as _py2exe # py3
except ImportError:
from py2exe.build_exe import py2exe as _py2exe # py2
class cmd_py2exe(_py2exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_py2exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
cmds["py2exe"] = cmd_py2exe
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(
target_versionfile, self._versioneer_generated_versions
)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (
EnvironmentError,
configparser.NoSectionError,
configparser.NoOptionError,
) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print(
"Adding sample versioneer config to setup.cfg", file=sys.stderr
)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(
LONG
% {
"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
}
)
ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(
" appending versionfile_source ('%s') to MANIFEST.in"
% cfg.versionfile_source
)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-subst keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
| 36.211771
| 79
| 0.62096
|
cab0b2f4c7befcc68761924440da1a02b48be84f
| 29,279
|
py
|
Python
|
openstackclient/volume/v2/volume.py
|
mydevice/python-openstackclient
|
4891bb38208fdcd1a2ae60e47b056841e14fbdf7
|
[
"Apache-2.0"
] | 262
|
2015-01-29T20:10:49.000Z
|
2022-03-23T01:59:23.000Z
|
openstackclient/volume/v2/volume.py
|
mydevice/python-openstackclient
|
4891bb38208fdcd1a2ae60e47b056841e14fbdf7
|
[
"Apache-2.0"
] | 5
|
2015-01-21T02:37:35.000Z
|
2021-11-23T02:26:00.000Z
|
openstackclient/volume/v2/volume.py
|
mydevice/python-openstackclient
|
4891bb38208fdcd1a2ae60e47b056841e14fbdf7
|
[
"Apache-2.0"
] | 194
|
2015-01-08T07:39:27.000Z
|
2022-03-30T13:51:23.000Z
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Volume V2 Volume action implementations"""
import argparse
import copy
import functools
import logging
from cliff import columns as cliff_columns
from osc_lib.cli import format_columns
from osc_lib.cli import parseractions
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from openstackclient.i18n import _
from openstackclient.identity import common as identity_common
LOG = logging.getLogger(__name__)
class AttachmentsColumn(cliff_columns.FormattableColumn):
"""Formattable column for attachments column.
Unlike the parent FormattableColumn class, the initializer of the
class takes server_cache as the second argument.
osc_lib.utils.get_item_properties instantiate cliff FormattableColumn
object with a single parameter "column value", so you need to pass
a partially initialized class like
``functools.partial(AttachmentsColumn, server_cache)``.
"""
def __init__(self, value, server_cache=None):
super(AttachmentsColumn, self).__init__(value)
self._server_cache = server_cache or {}
def human_readable(self):
"""Return a formatted string of a volume's attached instances
:rtype: a string of formatted instances
"""
msg = ''
for attachment in self._value:
server = attachment['server_id']
if server in self._server_cache.keys():
server = self._server_cache[server].name
device = attachment['device']
msg += 'Attached to %s on %s ' % (server, device)
return msg
def _check_size_arg(args):
"""Check whether --size option is required or not.
Require size parameter only in case when snapshot or source
volume is not specified.
"""
if ((args.snapshot or args.source)
is None and args.size is None):
msg = _("--size is a required option if snapshot "
"or source volume is not specified.")
raise exceptions.CommandError(msg)
class CreateVolume(command.ShowOne):
_description = _("Create new volume")
def get_parser(self, prog_name):
parser = super(CreateVolume, self).get_parser(prog_name)
parser.add_argument(
"name",
metavar="<name>",
help=_("Volume name"),
)
parser.add_argument(
"--size",
metavar="<size>",
type=int,
help=_("Volume size in GB (Required unless --snapshot or "
"--source is specified)"),
)
parser.add_argument(
"--type",
metavar="<volume-type>",
help=_("Set the type of volume"),
)
source_group = parser.add_mutually_exclusive_group()
source_group.add_argument(
"--image",
metavar="<image>",
help=_("Use <image> as source of volume (name or ID)"),
)
source_group.add_argument(
"--snapshot",
metavar="<snapshot>",
help=_("Use <snapshot> as source of volume (name or ID)"),
)
source_group.add_argument(
"--source",
metavar="<volume>",
help=_("Volume to clone (name or ID)"),
)
source_group.add_argument(
"--source-replicated",
metavar="<replicated-volume>",
help=argparse.SUPPRESS,
)
parser.add_argument(
"--description",
metavar="<description>",
help=_("Volume description"),
)
parser.add_argument(
"--availability-zone",
metavar="<availability-zone>",
help=_("Create volume in <availability-zone>"),
)
parser.add_argument(
"--consistency-group",
metavar="consistency-group>",
help=_("Consistency group where the new volume belongs to"),
)
parser.add_argument(
"--property",
metavar="<key=value>",
action=parseractions.KeyValueAction,
help=_("Set a property to this volume "
"(repeat option to set multiple properties)"),
)
parser.add_argument(
"--hint",
metavar="<key=value>",
action=parseractions.KeyValueAction,
help=_("Arbitrary scheduler hint key-value pairs to help boot "
"an instance (repeat option to set multiple hints)"),
)
bootable_group = parser.add_mutually_exclusive_group()
bootable_group.add_argument(
"--bootable",
action="store_true",
help=_("Mark volume as bootable")
)
bootable_group.add_argument(
"--non-bootable",
action="store_true",
help=_("Mark volume as non-bootable (default)")
)
readonly_group = parser.add_mutually_exclusive_group()
readonly_group.add_argument(
"--read-only",
action="store_true",
help=_("Set volume to read-only access mode")
)
readonly_group.add_argument(
"--read-write",
action="store_true",
help=_("Set volume to read-write access mode (default)")
)
return parser
def take_action(self, parsed_args):
_check_size_arg(parsed_args)
volume_client = self.app.client_manager.volume
image_client = self.app.client_manager.image
source_volume = None
if parsed_args.source:
source_volume = utils.find_resource(
volume_client.volumes,
parsed_args.source).id
consistency_group = None
if parsed_args.consistency_group:
consistency_group = utils.find_resource(
volume_client.consistencygroups,
parsed_args.consistency_group).id
image = None
if parsed_args.image:
image = image_client.find_image(parsed_args.image,
ignore_missing=False).id
size = parsed_args.size
snapshot = None
if parsed_args.snapshot:
snapshot_obj = utils.find_resource(
volume_client.volume_snapshots,
parsed_args.snapshot)
snapshot = snapshot_obj.id
# Cinder requires a value for size when creating a volume
# even if creating from a snapshot. Cinder will create the
# volume with at least the same size as the snapshot anyway,
# so since we have the object here, just override the size
# value if it's either not given or is smaller than the
# snapshot size.
size = max(size or 0, snapshot_obj.size)
volume = volume_client.volumes.create(
size=size,
snapshot_id=snapshot,
name=parsed_args.name,
description=parsed_args.description,
volume_type=parsed_args.type,
availability_zone=parsed_args.availability_zone,
metadata=parsed_args.property,
imageRef=image,
source_volid=source_volume,
consistencygroup_id=consistency_group,
scheduler_hints=parsed_args.hint,
)
if parsed_args.bootable or parsed_args.non_bootable:
try:
volume_client.volumes.set_bootable(
volume.id, parsed_args.bootable)
except Exception as e:
LOG.error(_("Failed to set volume bootable property: %s"), e)
if parsed_args.read_only or parsed_args.read_write:
try:
volume_client.volumes.update_readonly_flag(
volume.id,
parsed_args.read_only)
except Exception as e:
LOG.error(_("Failed to set volume read-only access "
"mode flag: %s"), e)
# Remove key links from being displayed
volume._info.update(
{
'properties':
format_columns.DictColumn(volume._info.pop('metadata')),
'type': volume._info.pop('volume_type')
}
)
volume._info.pop("links", None)
return zip(*sorted(volume._info.items()))
class DeleteVolume(command.Command):
_description = _("Delete volume(s)")
def get_parser(self, prog_name):
parser = super(DeleteVolume, self).get_parser(prog_name)
parser.add_argument(
"volumes",
metavar="<volume>",
nargs="+",
help=_("Volume(s) to delete (name or ID)")
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--force",
action="store_true",
help=_("Attempt forced removal of volume(s), regardless of state "
"(defaults to False)")
)
group.add_argument(
"--purge",
action="store_true",
help=_("Remove any snapshots along with volume(s) "
"(defaults to False)")
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
result = 0
for i in parsed_args.volumes:
try:
volume_obj = utils.find_resource(
volume_client.volumes, i)
if parsed_args.force:
volume_client.volumes.force_delete(volume_obj.id)
else:
volume_client.volumes.delete(volume_obj.id,
cascade=parsed_args.purge)
except Exception as e:
result += 1
LOG.error(_("Failed to delete volume with "
"name or ID '%(volume)s': %(e)s"),
{'volume': i, 'e': e})
if result > 0:
total = len(parsed_args.volumes)
msg = (_("%(result)s of %(total)s volumes failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListVolume(command.Lister):
_description = _("List volumes")
def get_parser(self, prog_name):
parser = super(ListVolume, self).get_parser(prog_name)
parser.add_argument(
'--project',
metavar='<project>',
help=_('Filter results by project (name or ID) (admin only)')
)
identity_common.add_project_domain_option_to_parser(parser)
parser.add_argument(
'--user',
metavar='<user>',
help=_('Filter results by user (name or ID) (admin only)')
)
identity_common.add_user_domain_option_to_parser(parser)
parser.add_argument(
'--name',
metavar='<name>',
help=_('Filter results by volume name'),
)
parser.add_argument(
'--status',
metavar='<status>',
help=_('Filter results by status'),
)
parser.add_argument(
'--all-projects',
action='store_true',
default=False,
help=_('Include all projects (admin only)'),
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help=_('List additional fields in output'),
)
parser.add_argument(
'--marker',
metavar='<volume>',
help=_('The last volume ID of the previous page'),
)
parser.add_argument(
'--limit',
type=int,
action=parseractions.NonNegativeAction,
metavar='<num-volumes>',
help=_('Maximum number of volumes to display'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
compute_client = self.app.client_manager.compute
identity_client = self.app.client_manager.identity
if parsed_args.long:
columns = [
'ID',
'Name',
'Status',
'Size',
'Volume Type',
'Bootable',
'Attachments',
'Metadata',
]
column_headers = copy.deepcopy(columns)
column_headers[4] = 'Type'
column_headers[6] = 'Attached to'
column_headers[7] = 'Properties'
else:
columns = [
'ID',
'Name',
'Status',
'Size',
'Attachments',
]
column_headers = copy.deepcopy(columns)
column_headers[4] = 'Attached to'
# Cache the server list
server_cache = {}
try:
for s in compute_client.servers.list():
server_cache[s.id] = s
except Exception:
# Just forget it if there's any trouble
pass
AttachmentsColumnWithCache = functools.partial(
AttachmentsColumn, server_cache=server_cache)
project_id = None
if parsed_args.project:
project_id = identity_common.find_project(
identity_client,
parsed_args.project,
parsed_args.project_domain).id
user_id = None
if parsed_args.user:
user_id = identity_common.find_user(identity_client,
parsed_args.user,
parsed_args.user_domain).id
# set value of 'all_tenants' when using project option
all_projects = bool(parsed_args.project) or parsed_args.all_projects
search_opts = {
'all_tenants': all_projects,
'project_id': project_id,
'user_id': user_id,
'name': parsed_args.name,
'status': parsed_args.status,
}
data = volume_client.volumes.list(
search_opts=search_opts,
marker=parsed_args.marker,
limit=parsed_args.limit,
)
column_headers = utils.backward_compat_col_lister(
column_headers, parsed_args.columns, {'Display Name': 'Name'})
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={'Metadata': format_columns.DictColumn,
'Attachments': AttachmentsColumnWithCache},
) for s in data))
class MigrateVolume(command.Command):
_description = _("Migrate volume to a new host")
def get_parser(self, prog_name):
parser = super(MigrateVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar="<volume>",
help=_("Volume to migrate (name or ID)")
)
parser.add_argument(
'--host',
metavar="<host>",
required=True,
help=_("Destination host (takes the form: host@backend-name#pool)")
)
parser.add_argument(
'--force-host-copy',
action="store_true",
help=_("Enable generic host-based force-migration, "
"which bypasses driver optimizations")
)
parser.add_argument(
'--lock-volume',
action="store_true",
help=_("If specified, the volume state will be locked "
"and will not allow a migration to be aborted "
"(possibly by another operation)")
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = utils.find_resource(volume_client.volumes, parsed_args.volume)
volume_client.volumes.migrate_volume(volume.id, parsed_args.host,
parsed_args.force_host_copy,
parsed_args.lock_volume,)
class SetVolume(command.Command):
_description = _("Set volume properties")
def get_parser(self, prog_name):
parser = super(SetVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar='<volume>',
help=_('Volume to modify (name or ID)'),
)
parser.add_argument(
'--name',
metavar='<name>',
help=_('New volume name'),
)
parser.add_argument(
'--size',
metavar='<size>',
type=int,
help=_('Extend volume size in GB'),
)
parser.add_argument(
'--description',
metavar='<description>',
help=_('New volume description'),
)
parser.add_argument(
"--no-property",
dest="no_property",
action="store_true",
help=_("Remove all properties from <volume> "
"(specify both --no-property and --property to "
"remove the current properties before setting "
"new properties.)"),
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help=_('Set a property on this volume '
'(repeat option to set multiple properties)'),
)
parser.add_argument(
'--image-property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help=_('Set an image property on this volume '
'(repeat option to set multiple image properties)'),
)
parser.add_argument(
"--state",
metavar="<state>",
choices=['available', 'error', 'creating', 'deleting',
'in-use', 'attaching', 'detaching', 'error_deleting',
'maintenance'],
help=_('New volume state ("available", "error", "creating", '
'"deleting", "in-use", "attaching", "detaching", '
'"error_deleting" or "maintenance") (admin only) '
'(This option simply changes the state of the volume '
'in the database with no regard to actual status, '
'exercise caution when using)'),
)
attached_group = parser.add_mutually_exclusive_group()
attached_group.add_argument(
"--attached",
action="store_true",
help=_('Set volume attachment status to "attached" '
'(admin only) '
'(This option simply changes the state of the volume '
'in the database with no regard to actual status, '
'exercise caution when using)'),
)
attached_group.add_argument(
"--detached",
action="store_true",
help=_('Set volume attachment status to "detached" '
'(admin only) '
'(This option simply changes the state of the volume '
'in the database with no regard to actual status, '
'exercise caution when using)'),
)
parser.add_argument(
'--type',
metavar='<volume-type>',
help=_('New volume type (name or ID)'),
)
parser.add_argument(
'--retype-policy',
metavar='<retype-policy>',
choices=['never', 'on-demand'],
help=_('Migration policy while re-typing volume '
'("never" or "on-demand", default is "never" ) '
'(available only when --type option is specified)'),
)
bootable_group = parser.add_mutually_exclusive_group()
bootable_group.add_argument(
"--bootable",
action="store_true",
help=_("Mark volume as bootable")
)
bootable_group.add_argument(
"--non-bootable",
action="store_true",
help=_("Mark volume as non-bootable")
)
readonly_group = parser.add_mutually_exclusive_group()
readonly_group.add_argument(
"--read-only",
action="store_true",
help=_("Set volume to read-only access mode")
)
readonly_group.add_argument(
"--read-write",
action="store_true",
help=_("Set volume to read-write access mode")
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = utils.find_resource(volume_client.volumes, parsed_args.volume)
result = 0
if parsed_args.size:
try:
if parsed_args.size <= volume.size:
msg = (_("New size must be greater than %s GB")
% volume.size)
raise exceptions.CommandError(msg)
if volume.status != 'available' and \
not volume_client.api_version.matches('3.42'):
msg = (_("Volume is in %s state, it must be available "
"before size can be extended") % volume.status)
raise exceptions.CommandError(msg)
volume_client.volumes.extend(volume.id, parsed_args.size)
except Exception as e:
LOG.error(_("Failed to set volume size: %s"), e)
result += 1
if parsed_args.no_property:
try:
volume_client.volumes.delete_metadata(
volume.id, volume.metadata.keys())
except Exception as e:
LOG.error(_("Failed to clean volume properties: %s"), e)
result += 1
if parsed_args.property:
try:
volume_client.volumes.set_metadata(
volume.id, parsed_args.property)
except Exception as e:
LOG.error(_("Failed to set volume property: %s"), e)
result += 1
if parsed_args.image_property:
try:
volume_client.volumes.set_image_metadata(
volume.id, parsed_args.image_property)
except Exception as e:
LOG.error(_("Failed to set image property: %s"), e)
result += 1
if parsed_args.state:
try:
volume_client.volumes.reset_state(
volume.id, parsed_args.state)
except Exception as e:
LOG.error(_("Failed to set volume state: %s"), e)
result += 1
if parsed_args.attached:
try:
volume_client.volumes.reset_state(
volume.id, state=None,
attach_status="attached")
except Exception as e:
LOG.error(_("Failed to set volume attach-status: %s"), e)
result += 1
if parsed_args.detached:
try:
volume_client.volumes.reset_state(
volume.id, state=None,
attach_status="detached")
except Exception as e:
LOG.error(_("Failed to set volume attach-status: %s"), e)
result += 1
if parsed_args.bootable or parsed_args.non_bootable:
try:
volume_client.volumes.set_bootable(
volume.id, parsed_args.bootable)
except Exception as e:
LOG.error(_("Failed to set volume bootable property: %s"), e)
result += 1
if parsed_args.read_only or parsed_args.read_write:
try:
volume_client.volumes.update_readonly_flag(
volume.id,
parsed_args.read_only)
except Exception as e:
LOG.error(_("Failed to set volume read-only access "
"mode flag: %s"), e)
result += 1
if parsed_args.type:
# get the migration policy
migration_policy = 'never'
if parsed_args.retype_policy:
migration_policy = parsed_args.retype_policy
try:
# find the volume type
volume_type = utils.find_resource(
volume_client.volume_types,
parsed_args.type)
# reset to the new volume type
volume_client.volumes.retype(
volume.id,
volume_type.id,
migration_policy)
except Exception as e:
LOG.error(_("Failed to set volume type: %s"), e)
result += 1
elif parsed_args.retype_policy:
# If the "--retype-policy" is specified without "--type"
LOG.warning(_("'--retype-policy' option will not work "
"without '--type' option"))
kwargs = {}
if parsed_args.name:
kwargs['display_name'] = parsed_args.name
if parsed_args.description:
kwargs['display_description'] = parsed_args.description
if kwargs:
try:
volume_client.volumes.update(volume.id, **kwargs)
except Exception as e:
LOG.error(_("Failed to update volume display name "
"or display description: %s"), e)
result += 1
if result > 0:
raise exceptions.CommandError(_("One or more of the "
"set operations failed"))
class ShowVolume(command.ShowOne):
_description = _("Display volume details")
def get_parser(self, prog_name):
parser = super(ShowVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar="<volume>",
help=_("Volume to display (name or ID)")
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = utils.find_resource(volume_client.volumes, parsed_args.volume)
# Special mapping for columns to make the output easier to read:
# 'metadata' --> 'properties'
# 'volume_type' --> 'type'
volume._info.update(
{
'properties':
format_columns.DictColumn(volume._info.pop('metadata')),
'type': volume._info.pop('volume_type'),
},
)
# Remove key links from being displayed
volume._info.pop("links", None)
return zip(*sorted(volume._info.items()))
class UnsetVolume(command.Command):
_description = _("Unset volume properties")
def get_parser(self, prog_name):
parser = super(UnsetVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar='<volume>',
help=_('Volume to modify (name or ID)'),
)
parser.add_argument(
'--property',
metavar='<key>',
action='append',
help=_('Remove a property from volume '
'(repeat option to remove multiple properties)'),
)
parser.add_argument(
'--image-property',
metavar='<key>',
action='append',
help=_('Remove an image property from volume '
'(repeat option to remove multiple image properties)'),
)
return parser
def take_action(self, parsed_args):
volume_client = self.app.client_manager.volume
volume = utils.find_resource(
volume_client.volumes, parsed_args.volume)
result = 0
if parsed_args.property:
try:
volume_client.volumes.delete_metadata(
volume.id, parsed_args.property)
except Exception as e:
LOG.error(_("Failed to unset volume property: %s"), e)
result += 1
if parsed_args.image_property:
try:
volume_client.volumes.delete_image_metadata(
volume.id, parsed_args.image_property)
except Exception as e:
LOG.error(_("Failed to unset image property: %s"), e)
result += 1
if result > 0:
raise exceptions.CommandError(_("One or more of the "
"unset operations failed"))
| 36.236386
| 79
| 0.542915
|
3bbc7beff352602e5e17375eff054d731c93c4c2
| 8,576
|
py
|
Python
|
pyHarvest_build_151223/packages/pyHarvest_SMB.py
|
bl305/pyHarvest
|
d4c62d443ca657f9d31245c3c3f24c741cf2ae0b
|
[
"CC0-1.0"
] | null | null | null |
pyHarvest_build_151223/packages/pyHarvest_SMB.py
|
bl305/pyHarvest
|
d4c62d443ca657f9d31245c3c3f24c741cf2ae0b
|
[
"CC0-1.0"
] | null | null | null |
pyHarvest_build_151223/packages/pyHarvest_SMB.py
|
bl305/pyHarvest
|
d4c62d443ca657f9d31245c3c3f24c741cf2ae0b
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
####
#TODO
#skipdirs
#sizelimit
#permissions
####
from smb.SMBConnection import SMBConnection
import time
import os
import errno
import socket
#from datetime import datetime
#import re
#from StringIO import StringIO
def convert_to_unicode(string):
if not isinstance(string, unicode):
string = unicode(string, "utf-8")
return string
#######PARAMETERS
# SMBusername='user1'
# SMBusername=convert_to_unicode(SMBusername)
# #in main
# #SMBpassword='Asdf1234'
# #SMBpassword = raw_input('password:')
# #SMBpassword=convert_to_unicode(SMBpassword)
# #SMBremotesystem = 'FileServer'
# SMBremotesystem = 'REMOTESERVER'
# SMBremotesystem=convert_to_unicode(SMBremotesystem)
# SMBremoteip = '127.0.0.1'
# SMBremoteip=convert_to_unicode(SMBremoteip)
# #SMBlocalsystem='LOCALHOSTNAME'
# SMBlocalsystem = socket.gethostname()
# SMBlocalsystem=convert_to_unicode(SMBlocalsystem)
# #SMBdomain='WORKGROUP'
# SMBmydomain=convert_to_unicode(SMBdomain)
# SMBfileshare='TestShare'
# SMBfileshare=convert_to_unicode(SMBfileshare)
# SMBoutdir="./DataGathered/SMB/"
# SMBoutdir=convert_to_unicode(SMBoutdir)
# SMBtop="/"
# SMBtop=convert_to_unicode(SMBtop)
# SMBtop=os.path.normpath(SMBtop).replace('//','/')
# #if you want to find all shares and use them set this to 1, else the myfileshare will be used
# SMBgetshares=0
#
# #create empty directories?
# SMBcreateemptydirs=0
#
# #download files?
# SMBdownloadfiles=1
#
# #set verbosity
# #-1 - no messages
# #0 - tuple of results
# #1 - summary information
# #2 - basic information, positive info
# #3 - detailed information, positive, negative
# #4 - go crazy about it...
# SMBverbosity=1
def smb_connect(username,password,localsystem,remotesystem,remoteIP,domain,averbosity=0):
try:
if averbosity>=1:
print '[+] Analyzing system: ', remotesystem
# parameterize an smb connection with a system
conn = SMBConnection(username,
password,
localsystem,
remotesystem,
domain,
use_ntlm_v2=True,
#this kills it sign_options=SMBConnection.SIGN_WHEN_SUPPORTED,
is_direct_tcp=True)
# establish the actual connection
connected = conn.connect(remoteIP,445)
if connected:
if averbosity>=2:
print "[+] Connected"
else:
if averbosity>=3:
print "[-] Connection failed"
exit(1)
return conn
except Exception, e:
if averbosity>=3:
print('[-] Can not access the system')
if averbosity>=4:
print('[-] Can not access the system'),e
#exit(2)
#this doesn't work for some reason...
def smb_list_shares(conn,aremotesystem,averbosity=0):
if averbosity>=2:
print '[+] Shares on: ', aremotesystem
try:
Response = conn.listShares(timeout=30) # obtain a list of shares
#for i in range(len(Response)): # iterate through the list of shares
# print Response[i].name
return tuple(Response)
except Exception, e:
if averbosity>=3:
print('[-] Can not list shares')
if averbosity>=4:
print('[-] Can not list shares'),e
def smb_list_subfolders(conn,aremotesystem,ashare,apath,averbosity=0):
if averbosity>=2:
print "[+] Listing subfolders in share:\"%s\" folder: \"%s\""%(ashare,apath)
try:
Response = conn.listPath(ashare,apath,timeout=30)
for i in range(len(Response)):
filename=Response[i].filename
fileisdir=Response[i].isDirectory
filetype=""
filecreatetime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(Response[i].create_time))
fileattributes=Response[i].file_attributes
fileallocsize=Response[i].alloc_size
filesize=Response[i].file_size
isreadonly=Response[i].isReadOnly
lastaccesstime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(Response[i].last_access_time))
filelastattrchangetime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(Response[i].last_attr_change_time))
filelastwritetime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(Response[i].last_write_time))
if filename == ".":
pass
elif filename == "..":
pass
else:
newpath=os.path.join(apath,filename)
newpath=os.path.normpath(newpath).replace('//','/')
if averbosity>=4:
print newpath
if fileisdir:
#print "DIR: ",filename
filetype="d"
for i in smb_list_subfolders(conn,aremotesystem,ashare,newpath):
yield i
else:
filetype="f"
#print "FIL: ",filename
pass
newfile=(ashare,apath,filename,filetype,filesize,filecreatetime,fileattributes,fileallocsize,isreadonly,lastaccesstime,filelastattrchangetime,filelastwritetime)
yield newfile
except Exception,e:
if averbosity>=3:
print '[-] Can not access the resource %s%s'% (ashare,apath)
if averbosity>=4:
print '[-] Can not access the resource %s%s%s'% (ashare,apath,e)
pass
def smb_get_file(conn, remotesystem,filename,aoutdir,averbosity=0):
apath = os.path.join(aoutdir,remotesystem)
abspath=apath+filename
abspath = os.path.normpath(abspath).replace('//','/')
xpath, xfilename = os.path.split(filename)
localdir=apath+xpath
mycounter1=0
mycounter2=0
try:
if not os.path.exists(localdir):
os.makedirs(localdir)
mycounter1+=1
#print "[+] Creating directory %s" % apath
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
else:
#print "[-] Directory already exist %s" % apath
pass
try:
try:
if averbosity>=2:
print "[+] Creating local file:",abspath
temp_fh = open(abspath, 'wb')
except:
if averbosity>=3:
"[-] Could not create local file",abspath
try:
if averbosity>=3:
print "[+] Pulling %s"%(abspath)
file_attributes, filesize = conn.retrieveFile(remotesystem, filename, temp_fh, timeout=30)
mycounter2+=1
except:
if averbosity>=3:
"[-] Could not retrieve file %s %s",(remotesystem, filename)
temp_fh.close()
if averbosity>=1:
print "[+] Got file %s"%abspath
except Exception,e:
if averbosity>=3:
print "[-] Could not create file: %s"%abspath
if averbosity>=4:
print "[-] Could not create file: %s %s"%(abspath,e)
return mycounter1,mycounter2
def smb_prepare_dir(remotesystem,abspath):
mycounter1=0
try:
if not os.path.exists(abspath):
os.makedirs(abspath)
mycounter1+=1
#print "[+] Creating directory %s" % path
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
else:
#print "[-] Directory already exist %s" % path
pass
return mycounter1
def smb_main(ausername,apassword,alocalsystem,aremotesystem,aremoteip,adomain,afileshare,atop,aoutdir,adownloadfiles=1,acreateemptydirs=0,agetshares=0,averbosity=0):
if averbosity>=1:
#print summary:
print "Server :",aremotesystem
print "File Share :",afileshare
print "Skipped directories: TBD"
print "Skipped files : TBD"
print "Maximum depth : TBD"
print "Max filesize : TBD"
myconn=smb_connect(ausername,apassword,alocalsystem,aremotesystem,aremoteip,adomain)
#this will be populated based on the agetshares parameter
shares=()
if agetshares == 1:
allshares = smb_list_shares(myconn,aremotesystem)
for i1 in range(len(allshares)):
shares+=(allshares[i1].name),
if averbosity>=4:
print "[+] Shares in server:",allshares[i1].name
else:
if averbosity>=4:
print "[+] Shares in server:",fileshare
shares=(afileshare),
allitems=()
fcount=0
dcount=0
dnum1=0
fnum2=0
dnum2=0
for a1 in range(len(shares)):
if averbosity>=4:
print shares[a1]
for i in smb_list_subfolders(myconn,aremotesystem,shares[a1],atop):
item=os.path.join(aoutdir+"/"+i[0]+"/"+i[1],i[2])
item=os.path.normpath(item).replace('//','/')
fileitem=os.path.join(i[1],i[2]).replace('//','/')
if i[3] == "d":
dcount+=1
if acreateemptydirs==1:
dnum1+=smb_prepare_dir(aremotesystem,item)
if averbosity>=4:
print "DIR: %s"%item
elif i[3] == "f":
fcount+=1
if averbosity>=4:
print "FIL: %s %s"%(i[0],fileitem)
if adownloadfiles==1:
dnumTMP,fnumTMP=smb_get_file(myconn, i[0],fileitem,aoutdir)
dnum2+=dnumTMP
fnum2+=fnumTMP
if averbosity>=4:
print "(%s) %s%s%s (%s) (%s) (%s) (%s) (%s) (%s) (%s) (%s)"%(i[3],i[0],i[1],i[2],i[4],i[5],i[6],i[7],i[8],i[9],i[10],i[11])
allitems+=(i[0],i[1],i[2],i[3],i[4],i[5],i[6],i[7],i[8],i[9],i[10],i[11],'','','','',aoutdir),
pass
if averbosity==0:
print allitems
if averbosity>=1:
print "Found %d directories and %d files"%(dcount,fcount)
print "Created %d directories using directory downloader (absolute path)"%(dnum1)
print "Created %d directories (absolute path) and %d files (if existed, overwritten)"%(dnum2,fnum2)
myconn.close()
return allitems
| 29.369863
| 165
| 0.692048
|
852f7c01fb7434bba23cec1b442351afb0937449
| 213
|
py
|
Python
|
rdkit/sping/PIL/pilfonts/removemedium.py
|
kazuyaujihara/rdkit
|
06027dcd05674787b61f27ba46ec0d42a6037540
|
[
"BSD-3-Clause"
] | 1,609
|
2015-01-05T02:41:13.000Z
|
2022-03-30T21:57:24.000Z
|
rdkit/sping/PIL/pilfonts/removemedium.py
|
kazuyaujihara/rdkit
|
06027dcd05674787b61f27ba46ec0d42a6037540
|
[
"BSD-3-Clause"
] | 3,412
|
2015-01-06T12:13:33.000Z
|
2022-03-31T17:25:41.000Z
|
rdkit/sping/PIL/pilfonts/removemedium.py
|
bp-kelley/rdkit
|
e0de7c9622ce73894b1e7d9568532f6d5638058a
|
[
"BSD-3-Clause"
] | 811
|
2015-01-11T03:33:48.000Z
|
2022-03-28T11:57:49.000Z
|
import os
import string
for fname in filter(lambda x: '-' in x, os.listdir(os.curdir)):
newname = string.replace(fname, '-medium-', '-')
if newname != fname:
print(newname)
os.rename(fname, newname)
| 21.3
| 63
| 0.657277
|
b58b304564861bc816989ec1d92c3fd2c9226740
| 3,599
|
py
|
Python
|
packages/models-library/src/models_library/projects_nodes.py
|
GitHK/osparc-simcore-forked
|
5b01a28d1b8028afcf9a735e1d46a73daa13686e
|
[
"MIT"
] | null | null | null |
packages/models-library/src/models_library/projects_nodes.py
|
GitHK/osparc-simcore-forked
|
5b01a28d1b8028afcf9a735e1d46a73daa13686e
|
[
"MIT"
] | 17
|
2020-10-15T16:06:05.000Z
|
2022-03-21T18:48:21.000Z
|
packages/models-library/src/models_library/projects_nodes.py
|
GitHK/osparc-simcore-forked
|
5b01a28d1b8028afcf9a735e1d46a73daa13686e
|
[
"MIT"
] | null | null | null |
"""
Models Node as a central element in a project's pipeline
"""
from typing import Dict, List, Optional, Union
from pydantic import BaseModel, Extra, Field, HttpUrl, constr, validator
from .basic_regex import VERSION_RE
from .projects_access import AccessEnum
from .projects_nodes_io import (
DatCoreFileLink,
DownloadLink,
NodeID,
PortLink,
SimCoreFileLink,
)
from .projects_nodes_ui import Position
from .projects_state import RunningState
from .services import SERVICE_KEY_RE, PROPERTY_KEY_RE
InputTypes = Union[
int, bool, str, float, PortLink, SimCoreFileLink, DatCoreFileLink, DownloadLink
]
OutputTypes = Union[
int, bool, str, float, SimCoreFileLink, DatCoreFileLink, DownloadLink
]
InputID = OutputID = constr(regex=PROPERTY_KEY_RE)
Inputs = Dict[InputID, InputTypes]
Outputs = Dict[OutputID, OutputTypes]
class Node(BaseModel):
key: str = Field(
...,
description="distinctive name for the node based on the docker registry path",
regex=SERVICE_KEY_RE,
example=[
"simcore/services/comp/sleeper",
"simcore/services/dynamic/3dviewer",
"simcore/services/frontend/file-picker",
],
)
version: str = Field(
...,
description="semantic version number of the node",
regex=VERSION_RE,
example=["1.0.0", "0.0.1"],
)
label: str = Field(
..., description="The short name of the node", example=["JupyterLab"]
)
progress: Optional[float] = Field(
None, ge=0, le=100, description="the node progress value"
)
thumbnail: Optional[HttpUrl] = Field(
None,
description="url of the latest screenshot of the node",
example=["https://placeimg.com/171/96/tech/grayscale/?0.jpg"],
)
# INPUT PORTS ---
inputs: Optional[Inputs] = Field(
default_factory=dict, description="values of input properties"
)
input_access: Optional[Dict[InputID, AccessEnum]] = Field(
None, description="map with key - access level pairs", alias="inputAccess"
)
input_nodes: Optional[List[NodeID]] = Field(
default_factory=list,
description="node IDs of where the node is connected to",
example=["nodeUuid1", "nodeUuid2"],
alias="inputNodes",
)
# OUTPUT PORTS ---
outputs: Optional[Outputs] = Field(
default_factory=dict, description="values of output properties"
)
output_node: Optional[bool] = Field(None, deprecated=True, alias="outputNode")
output_nodes: Optional[List[NodeID]] = Field(
None,
description="Used in group-nodes. Node IDs of those connected to the output",
example=["nodeUuid1", "nodeUuid2"],
alias="outputNodes",
)
parent: Optional[NodeID] = Field(
None,
description="Parent's (group-nodes') node ID s. Used to group",
example=["nodeUUid1", "nodeUuid2"],
)
state: Optional[RunningState] = Field(
RunningState.NOT_STARTED,
description="the node's running state",
example=["RUNNING", "FAILED"],
)
# NOTE: use projects_ui.py
position: Optional[Position] = Field(None, deprecated=True)
@validator("thumbnail", pre=True)
@classmethod
def convert_empty_str_to_none(v):
if isinstance(v, str) and v == "":
return None
return v
@validator("state", pre=True)
@classmethod
def convert_old_enum_name(v):
if v == "FAILURE":
return RunningState.FAILED
return v
class Config:
extra = Extra.forbid
| 29.743802
| 86
| 0.646291
|
e4e07c24a0267b5feb3b5ea03a7b13abda8277b7
| 38
|
py
|
Python
|
pycoinbase/wallet/__init__.py
|
anton-stefanovich/pycoinbase
|
74a47fdabfeac69f50cc91dac8ac6bc1190756d7
|
[
"Apache-2.0"
] | 3
|
2021-03-16T14:07:06.000Z
|
2021-06-20T04:11:58.000Z
|
pycoinbase/wallet/__init__.py
|
anton-stefanovich/pycoinbase
|
74a47fdabfeac69f50cc91dac8ac6bc1190756d7
|
[
"Apache-2.0"
] | null | null | null |
pycoinbase/wallet/__init__.py
|
anton-stefanovich/pycoinbase
|
74a47fdabfeac69f50cc91dac8ac6bc1190756d7
|
[
"Apache-2.0"
] | 1
|
2021-04-12T12:22:35.000Z
|
2021-04-12T12:22:35.000Z
|
# coding: utf-8
__version__ = '2.1.2'
| 12.666667
| 21
| 0.631579
|
e7549ff73ed12bda06d3315929155a3c65d8a845
| 6,123
|
py
|
Python
|
build/android/tombstones.py
|
aranajhonny/chromium
|
caf5bcb822f79b8997720e589334266551a50a13
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2019-01-16T03:57:39.000Z
|
2019-01-16T03:57:39.000Z
|
build/android/tombstones.py
|
aranajhonny/chromium
|
caf5bcb822f79b8997720e589334266551a50a13
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2018-02-10T21:00:08.000Z
|
2018-03-20T05:09:50.000Z
|
build/android/tombstones.py
|
aranajhonny/chromium
|
caf5bcb822f79b8997720e589334266551a50a13
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Find the most recent tombstone file(s) on all connected devices
# and prints their stacks.
#
# Assumes tombstone file was created with current symbols.
import datetime
import multiprocessing
import os
import subprocess
import sys
import optparse
from pylib import android_commands
from pylib.device import device_utils
def _ListTombstones(device):
"""List the tombstone files on the device.
Args:
device: An instance of DeviceUtils.
Yields:
Tuples of (tombstone filename, date time of file on device).
"""
lines = device.RunShellCommand('TZ=UTC su -c ls -a -l /data/tombstones')
for line in lines:
if 'tombstone' in line and not 'No such file or directory' in line:
details = line.split()
t = datetime.datetime.strptime(details[-3] + ' ' + details[-2],
'%Y-%m-%d %H:%M')
yield details[-1], t
def _GetDeviceDateTime(device):
"""Determine the date time on the device.
Args:
device: An instance of DeviceUtils.
Returns:
A datetime instance.
"""
device_now_string = device.RunShellCommand('TZ=UTC date')
return datetime.datetime.strptime(
device_now_string[0], '%a %b %d %H:%M:%S %Z %Y')
def _GetTombstoneData(device, tombstone_file):
"""Retrieve the tombstone data from the device
Args:
device: An instance of DeviceUtils.
tombstone_file: the tombstone to retrieve
Returns:
A list of lines
"""
return device.ReadFile('/data/tombstones/' + tombstone_file, as_root=True)
def _EraseTombstone(device, tombstone_file):
"""Deletes a tombstone from the device.
Args:
device: An instance of DeviceUtils.
tombstone_file: the tombstone to delete.
"""
return device.RunShellCommand(
'rm /data/tombstones/' + tombstone_file, as_root=True)
def _ResolveSymbols(tombstone_data, include_stack):
"""Run the stack tool for given tombstone input.
Args:
tombstone_data: a list of strings of tombstone data.
include_stack: boolean whether to include stack data in output.
Yields:
A string for each line of resolved stack output.
"""
stack_tool = os.path.join(os.path.dirname(__file__), '..', '..',
'third_party', 'android_platform', 'development',
'scripts', 'stack')
proc = subprocess.Popen(stack_tool, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
output = proc.communicate(input='\n'.join(tombstone_data))[0]
for line in output.split('\n'):
if not include_stack and 'Stack Data:' in line:
break
yield line
def _ResolveTombstone(tombstone):
lines = []
lines += [tombstone['file'] + ' created on ' + str(tombstone['time']) +
', about this long ago: ' +
(str(tombstone['device_now'] - tombstone['time']) +
' Device: ' + tombstone['serial'])]
print '\n'.join(lines)
print 'Resolving...'
lines += _ResolveSymbols(tombstone['data'], tombstone['stack'])
return lines
def _ResolveTombstones(jobs, tombstones):
"""Resolve a list of tombstones.
Args:
jobs: the number of jobs to use with multiprocess.
tombstones: a list of tombstones.
"""
if not tombstones:
print 'No device attached? Or no tombstones?'
return
if len(tombstones) == 1:
data = _ResolveTombstone(tombstones[0])
else:
pool = multiprocessing.Pool(processes=jobs)
data = pool.map(_ResolveTombstone, tombstones)
data = ['\n'.join(d) for d in data]
print '\n'.join(data)
def _GetTombstonesForDevice(device, options):
"""Returns a list of tombstones on a given device.
Args:
device: An instance of DeviceUtils.
options: command line arguments from OptParse
"""
ret = []
all_tombstones = list(_ListTombstones(device))
if not all_tombstones:
print 'No device attached? Or no tombstones?'
return ret
# Sort the tombstones in date order, descending
all_tombstones.sort(cmp=lambda a, b: cmp(b[1], a[1]))
# Only resolve the most recent unless --all-tombstones given.
tombstones = all_tombstones if options.all_tombstones else [all_tombstones[0]]
device_now = _GetDeviceDateTime(device)
for tombstone_file, tombstone_time in tombstones:
ret += [{'serial': device.old_interface.Adb().GetSerialNumber(),
'device_now': device_now,
'time': tombstone_time,
'file': tombstone_file,
'stack': options.stack,
'data': _GetTombstoneData(device, tombstone_file)}]
# Erase all the tombstones if desired.
if options.wipe_tombstones:
for tombstone_file, _ in all_tombstones:
_EraseTombstone(device, tombstone_file)
return ret
def main():
parser = optparse.OptionParser()
parser.add_option('--device',
help='The serial number of the device. If not specified '
'will use all devices.')
parser.add_option('-a', '--all-tombstones', action='store_true',
help="""Resolve symbols for all tombstones, rather than just
the most recent""")
parser.add_option('-s', '--stack', action='store_true',
help='Also include symbols for stack data')
parser.add_option('-w', '--wipe-tombstones', action='store_true',
help='Erase all tombstones from device after processing')
parser.add_option('-j', '--jobs', type='int',
default=4,
help='Number of jobs to use when processing multiple '
'crash stacks.')
options, _ = parser.parse_args()
if options.device:
devices = [options.device]
else:
devices = android_commands.GetAttachedDevices()
tombstones = []
for device_serial in devices:
device = device_utils.DeviceUtils(device_serial)
tombstones += _GetTombstonesForDevice(device, options)
_ResolveTombstones(options.jobs, tombstones)
if __name__ == '__main__':
sys.exit(main())
| 30.768844
| 80
| 0.662257
|
4b50abebe243a3b2d101a59438982d6c79f6539c
| 59
|
py
|
Python
|
basic/niki.py
|
fuyongde/python
|
f73407a1cc981d081760500d8e74d9e4d8873ca3
|
[
"Apache-2.0"
] | null | null | null |
basic/niki.py
|
fuyongde/python
|
f73407a1cc981d081760500d8e74d9e4d8873ca3
|
[
"Apache-2.0"
] | null | null | null |
basic/niki.py
|
fuyongde/python
|
f73407a1cc981d081760500d8e74d9e4d8873ca3
|
[
"Apache-2.0"
] | null | null | null |
def niki_abs(x, y):
return x + y
print(niki_abs(1, 3))
| 14.75
| 21
| 0.610169
|
761ee14213563c123de2681c5067c82478a8bfd4
| 280
|
py
|
Python
|
test/test_one_var_equations.py
|
Zibusiso-Mangoye/nma-python
|
a38e66997291aec07dd7bc34962c80912bc5b020
|
[
"MIT"
] | 1
|
2021-07-26T14:21:57.000Z
|
2021-07-26T14:21:57.000Z
|
test/test_one_var_equations.py
|
Zibusiso-Mangoye/nma-python
|
a38e66997291aec07dd7bc34962c80912bc5b020
|
[
"MIT"
] | null | null | null |
test/test_one_var_equations.py
|
Zibusiso-Mangoye/nma-python
|
a38e66997291aec07dd7bc34962c80912bc5b020
|
[
"MIT"
] | null | null | null |
"""
Tests for one_var_equations.py
"""
from nma.one_var_equations import bisection
def test_bisection():
"""
This test is Example 1 in chapter 2 of the textbook.
"""
f = lambda x : x**3 + 4*x**2 - 10
assert bisection(f, 1, 2, 0.00001, 13) == 1.3651123046875
| 23.333333
| 61
| 0.639286
|
afc7364ab5f5267c6c9d88c9106afaf0f0b5675b
| 5,478
|
py
|
Python
|
src/sender/wecom_sender.py
|
baboon-king/2c
|
2b1093c9f15c713dac51aec5c1244ec285e49782
|
[
"Apache-2.0"
] | null | null | null |
src/sender/wecom_sender.py
|
baboon-king/2c
|
2b1093c9f15c713dac51aec5c1244ec285e49782
|
[
"Apache-2.0"
] | null | null | null |
src/sender/wecom_sender.py
|
baboon-king/2c
|
2b1093c9f15c713dac51aec5c1244ec285e49782
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
Created by howie.hu at 2021/4/12.
Description:分发到企业微信终端
Changelog: all notable changes to this file will be documented
"""
import json
import time
import requests
from src.config import Config
from src.sender.base import SenderBase
from src.sender.utils import send_post_request
from src.utils import LOGGER
class WeComSender(SenderBase):
"""
企业微信分发类
"""
def __init__(self, send_config: dict):
"""
初始化相关变量
:param send_config:
"""
super().__init__(send_type="wecom", send_config=send_config)
self.wecom_id = send_config.get("wecom_id", Config.WECOM_ID)
self.wecom_agent_id = send_config.get("wecom_agent_id", Config.WECOM_AGENT_ID)
self.wecom_secret = send_config.get("wecom_secret", Config.WECOM_SECRET)
self.url = f"https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={self.get_token()}"
def get_token(self):
"""
获取Token
:return:
"""
data = {
"corpid": self.wecom_id,
"corpsecret": self.wecom_secret,
}
token_url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken"
json_data = requests.get(token_url, params=data).json()
return json_data.get("access_token", "")
def send_text_card(self, send_data):
"""
发送卡片消息
:param send_data:
:return:
"""
doc_name = send_data["doc_name"]
doc_source = send_data["doc_source"]
doc_link = send_data["doc_link"]
doc_content = send_data["doc_content"]
doc_cus_des = send_data["doc_cus_des"]
doc_source_name = send_data["doc_source_name"]
doc_keywords = send_data["doc_keywords"]
doc_ts = send_data["doc_ts"]
doc_date = time.strftime("%Y-%m-%d", time.localtime(doc_ts))
doc_des_info = f"亲,来自 {doc_source} 源的 {doc_source_name} 有更新啦! \n\n{doc_content}\n\n文章关键字:{doc_keywords}"
doc_des = f'<div class="black">{doc_date} | {doc_cus_des}</div>\n<div class="normal">{doc_des_info}</div>\n来自[2c]👉技术支持❤️'
data = {
"toparty": 1,
"msgtype": "textcard",
"agentid": self.wecom_agent_id,
"textcard": {
"title": f"[{doc_source_name}]{doc_name}",
"description": doc_des,
"url": doc_link,
"btntxt": "更多",
},
"safe": 0,
}
data = json.dumps(data, ensure_ascii=False)
try:
resp_dict = requests.post(
url=self.url,
data=data.encode("utf-8").decode("latin1"),
headers={"Content-Type": "application/json"},
).json()
return resp_dict
except Exception as e:
resp_dict = {}
LOGGER.error(f"请求出错:{e}")
return resp_dict
def send(self, send_data) -> bool:
"""
下发到钉钉终端
:param send_data: 下发内容字典,字段开发者自定义
:return:
"""
doc_name = send_data["doc_name"]
doc_cus_des = send_data["doc_cus_des"]
doc_id = send_data["doc_id"]
doc_link = send_data["doc_link"]
doc_source_name = send_data["doc_source_name"]
is_send = self.is_send(doc_id=doc_id)
send_status = True
if not is_send:
# 开始进行下发
resp_dict = self.send_text_card(send_data=send_data)
notice_msg = f"{doc_cus_des}👉{doc_source_name}_{doc_name}:{doc_link} 分发到 {self.send_type}"
if resp_dict:
if resp_dict.get("errcode") == 0:
# 将状态持久化到数据库
self.sl_coll.insert_one(
{
"send_type": self.send_type,
"doc_id": doc_id,
"ts": time.time(),
}
)
# 下发成功
LOGGER.info(f"{notice_msg} 成功!")
send_status = True
else:
LOGGER.error(f"{notice_msg} 失败:{resp_dict.get('errmsg')}")
else:
LOGGER.error(f"{notice_msg} 失败!")
return send_status
def send(send_config: dict, send_data: dict) -> bool:
"""
下发到钉钉终端
:param send_config: 下发终端配置
:param send_data: 下发内容字典,字段开发者自定义
:return:
"""
return WeComSender(send_config=send_config).send(send_data)
if __name__ == "__main__":
send(
send_config={"wecom_id": "", "wecom_agent_id": 0, "wecom_secret": "",},
send_data={
"doc_id": "f42460107f69c9e929f8d591243efeb2",
"doc_content": "普通人搞一百万有多难?",
"doc_date": "2021-04-11",
"doc_des": "",
"doc_ext": {},
"doc_link": "https://mp.weixin.qq.com/s/J9Ejaw9x9fXDZ4-hsrhhtw",
"doc_name": "普通人搞一百万有多难?",
"doc_source": "wechat",
"doc_source_des": "前码农&产品人,现自由职业者,创业者。",
"doc_source_name": "stormzhang",
"doc_cus_des": "广告",
"doc_keywords": [],
"doc_ts": 1618136819.0,
"cos_model": {
"model_name": "cos",
"result": 0,
"probability": 0.0,
"feature_dict": {
"is_black": False,
"is_white": False,
"text": "普通人搞一百万有多难?",
},
},
},
)
| 32.223529
| 129
| 0.528112
|
7efe049c2e528aad7f11357945450cb5ee918e90
| 20,921
|
py
|
Python
|
octoprint_discordremote/command.py
|
shaynemk/OctoPrint-DiscordRemote
|
bbf1c1b7fec49b2dbda6d79411c2a8254a6a2c5f
|
[
"MIT"
] | null | null | null |
octoprint_discordremote/command.py
|
shaynemk/OctoPrint-DiscordRemote
|
bbf1c1b7fec49b2dbda6d79411c2a8254a6a2c5f
|
[
"MIT"
] | null | null | null |
octoprint_discordremote/command.py
|
shaynemk/OctoPrint-DiscordRemote
|
bbf1c1b7fec49b2dbda6d79411c2a8254a6a2c5f
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import collections
import os
import urllib
import humanfriendly
import re
import time
import requests
from octoprint.printer import InvalidFileLocation, InvalidFileType
from octoprint_discordremote.command_plugins import plugin_list
from octoprint_discordremote.embedbuilder import EmbedBuilder, success_embed, error_embed, info_embed, upload_file
class Command:
def __init__(self, plugin):
assert plugin
self.plugin = plugin
self.command_dict = collections.OrderedDict()
self.command_dict['connect'] = {'cmd': self.connect, 'params': "[port] [baudrate]",
'description': "Connect to a printer."}
self.command_dict['disconnect'] = {'cmd': self.disconnect, 'description': "Disconnect from a printer."}
self.command_dict['print'] = {'cmd': self.start_print, 'params': "{filename}", 'description': "Print a file."}
self.command_dict['files'] = {'cmd': self.list_files,
'description': "List all files and respective download links."}
self.command_dict['abort'] = {'cmd': self.cancel_print, 'description': "Abort a print."}
self.command_dict['snapshot'] = {'cmd': self.snapshot, 'description': "Take a snapshot with the camera."}
self.command_dict['status'] = {'cmd': self.status, 'description': "Get the current printer status."}
self.command_dict['help'] = {'cmd': self.help, 'description': "Print this help."}
self.command_dict['pause'] = {'cmd': self.pause, 'description': "Pause current print."}
self.command_dict['resume'] = {'cmd': self.resume, 'description': "Resume current print."}
self.command_dict['timelapse'] = {'cmd': self.timelapse,
'description': "List all timelapses and respective download links."}
self.command_dict['mute'] = {'cmd': self.mute,
'description': "Mute notifications"}
self.command_dict['unmute'] = {'cmd': self.unmute,
'description': "Unmute notifications"}
self.command_dict['gcode'] = {'cmd': self.gcode, 'params': 'GCODE lines, seperated by \';\'',
'description': "Send a set of GCODE commands directly to the printer"}
self.command_dict['getfile'] = {'cmd': self.getfile, 'params': "filename",
'description': "Get a gcode file and upload to discord."}
self.command_dict['gettimelapse'] = {'cmd': self.gettimelapse, 'params': "filename",
'description': "Get a timelapse file and upload to discord."}
# Load plugins
for command_plugin in plugin_list:
command_plugin.setup(self, plugin)
def parse_command(self, string, user=None):
prefix_str = self.plugin.get_settings().get(["prefix"])
prefix_len = len(prefix_str)
parts = re.split(r'\s+', string)
if len(parts[0]) < prefix_len or prefix_str != parts[0][:prefix_len]:
return None, None
command_string = parts[0][prefix_len:]
command = self.command_dict.get(command_string, {'cmd': self.help})
if user and not self.check_perms(command_string, user):
return None, error_embed(author=self.plugin.get_printer_name(),
title="Permission Denied")
if command.get('params'):
return command['cmd'](parts)
else:
return command['cmd']()
def timelapse(self):
path = os.path.join(os.getcwd(), self.plugin._data_folder, '..', '..', 'timelapse')
path = os.path.abspath(path)
builder = EmbedBuilder()
builder.set_title('Files and Details')
builder.set_description('Download with /gettimelapse {filename}')
builder.set_author(name=self.plugin.get_printer_name())
baseurl = self.plugin.get_settings().get(["baseurl"])
port = self.plugin.get_port()
if baseurl is None or baseurl == "":
baseurl = "%s:%s" % (self.plugin.get_ip_address(), port)
for root, dirs, files in os.walk(path):
for name in files:
try:
file_path = os.path.join(root, name)
title = os.path.basename(file_path)
description = ''
description += 'Size: %s\n' % os.path.getsize(file_path)
description += 'Date of Creation: %s\n' % time.ctime(os.path.getctime(file_path))
description += 'Download Path: %s\n' %\
("http://" + baseurl + "/downloads/timelapse/" + urllib.quote(title))
builder.add_field(title=title, text=description)
except Exception as e:
pass
return None, builder.get_embeds()
def help(self):
builder = EmbedBuilder()
builder.set_title('Commands, Parameters and Description')
builder.set_author(self.plugin.get_printer_name())
for command, details in self.command_dict.items():
builder.add_field(
title='%s %s' % (self.plugin.get_settings().get(["prefix"]) + command, details.get('params') or ''),
text=details.get('description'))
return None, builder.get_embeds()
def cancel_print(self):
self.plugin.get_printer().cancel_print()
return None, error_embed(author=self.plugin.get_printer_name(),
title='Print aborted')
def start_print(self, params):
if len(params) != 2:
return None, error_embed(author=self.plugin.get_printer_name(),
title='Wrong number of arguments',
description='try "%sprint [filename]"' % self.plugin.get_settings().get(
["prefix"]))
if not self.plugin.get_printer().is_ready():
return None, error_embed(author=self.plugin.get_printer_name(),
title='Printer is not ready')
file = self.find_file(params[1])
if file is None:
return None, error_embed(author=self.plugin.get_printer_name(),
title='Failed to find the file')
is_sdcard = (file['location'] == 'sdcard')
try:
file_path = self.plugin.get_file_manager().path_on_disk(file['location'], file['path'])
self.plugin.get_printer().select_file(file_path, is_sdcard, printAfterSelect=True)
except InvalidFileType:
return None, error_embed(author=self.plugin.get_printer_name(),
title='Invalid file type selected')
except InvalidFileLocation:
return None, error_embed(author=self.plugin.get_printer_name(),
title='Invalid file location?')
return None, success_embed(author=self.plugin.get_printer_name(),
title='Successfully started print',
description=file['path'])
def list_files(self):
port = self.plugin.get_port()
baseurl = self.plugin.get_settings().get(["baseurl"])
if baseurl is None or baseurl == "":
baseurl = "%s:%s" % (self.plugin.get_ip_address(), port)
builder = EmbedBuilder()
builder.set_title('Files and Details')
builder.set_author(name=self.plugin.get_printer_name())
file_list = self.get_flat_file_list()
for details in file_list:
description = ''
title = ''
try:
title = details['path'].lstrip('/')
except:
pass
try:
description += 'Location: %s\n' % details['location']
except:
pass
try:
estimated_print_time = humanfriendly.format_timespan(details['analysis']['estimatedPrintTime'],
max_units=2)
description += 'Estimated Print Time: %s\n' % estimated_print_time
except:
pass
try:
average_print_time = humanfriendly.format_timespan(
details['statistics']['averagePrintTime']['_default'], max_units=2)
description += 'Average Print Time: %s\n' % average_print_time
except:
pass
try:
filament_required = humanfriendly.format_length(
details['analysis']['filament']['tool0']['length'] / 1000)
description += 'Filament Required: %s\n' % filament_required
except:
pass
try:
url = "http://" + baseurl + "/downloads/files/" + details['location'] + "/" + details['path'].lstrip('/')
description += 'Download Path: %s\n' % url
except:
pass
builder.add_field(title=title, text=description)
return None, builder.get_embeds()
def snapshot(self):
snapshots = self.plugin.get_snapshot()
if snapshots and len(snapshots) == 1:
return None, info_embed(author=self.plugin.get_printer_name(),
snapshot=snapshots[0])
return None, None
def find_file(self, file_name):
flat_filelist = self.get_flat_file_list()
for file in flat_filelist:
if file_name.upper() in file.get('path').upper():
return file
return None
def get_flat_file_list(self):
file_list = self.plugin.get_file_manager().list_files(recursive=True)
flat_filelist = []
for (location, files) in file_list.items():
self.flatten_file_list_recursive(flat_filelist, location, files, '')
return flat_filelist
def flatten_file_list_recursive(self, file_array, location, files, path):
for filename, details in files.items():
if details.get('children') is not None:
# This is a folder, recurse into it
self.flatten_file_list_recursive(file_array, location, details['children'], filename)
else:
if path == '' or not path.endswith('/'):
path += '/'
details['path'] = path + filename
details['location'] = location
file_array.append(details)
def connect(self, params):
if len(params) > 3:
return None, error_embed(author=self.plugin.get_printer_name(),
title='Too many parameters',
description='Should be: %sconnect [port] [baudrate]' % self.plugin.get_settings().get(
["prefix"]))
if self.plugin.get_printer().is_operational():
return None, error_embed(author=self.plugin.get_printer_name(),
title='Printer already connected',
description='Disconnect first')
port = None
baudrate = None
if len(params) >= 2:
port = params[1]
if len(params) == 3:
try:
baudrate = int(params[2])
except ValueError:
return None, error_embed(author=self.plugin.get_printer_name(),
title='Wrong format for baudrate',
description='should be a number')
self.plugin.get_printer().connect(port=port, baudrate=baudrate, profile=None)
# Check every second for 30 seconds, to see if it has connected.
for i in range(30):
time.sleep(1)
if self.plugin.get_printer().is_operational():
return None, success_embed('Connected to printer')
return None, error_embed(author=self.plugin.get_printer_name(),
title='Failed to connect',
description='try: "%sconnect [port] [baudrate]"' % self.plugin.get_settings().get(
["prefix"]))
def disconnect(self):
if not self.plugin.get_printer().is_operational():
return None, error_embed(author=self.plugin.get_printer_name(),
title='Printer is not connected')
self.plugin.get_printer().disconnect()
# Sleep a while before checking if disconnected
time.sleep(10)
if self.plugin.get_printer().is_operational():
return None, error_embed(author=self.plugin.get_printer_name(),
title='Failed to disconnect')
return None, success_embed(author=self.plugin.get_printer_name(),
title='Disconnected to printer')
def status(self):
builder = EmbedBuilder()
builder.set_title('Status')
builder.set_author(name=self.plugin.get_printer_name())
if self.plugin.get_settings().get(['show_local_ip'], merged=True):
ip_addr = self.plugin.get_ip_address()
if ip_addr != '127.0.0.1':
builder.add_field(title='Local IP', text=ip_addr, inline=True)
if self.plugin.get_settings().get(['show_external_ip'], merged=True):
builder.add_field(title='External IP', text=self.plugin.get_external_ip_address(), inline=True)
operational = self.plugin.get_printer().is_operational()
builder.add_field(title='Operational', text='Yes' if operational else 'No', inline=True)
current_data = self.plugin.get_printer().get_current_data()
if current_data.get('currentZ'):
builder.add_field(title='Current Z', text=str(current_data['currentZ']), inline=True)
if operational:
temperatures = self.plugin.get_printer().get_current_temperatures()
for heater in temperatures.keys():
if heater == 'bed':
continue
if temperatures[heater]['actual'] is None or len(str(temperatures[heater]['actual'])) == 0:
continue
builder.add_field(title='Extruder Temp (%s)' % heater,
text=str(temperatures[heater]['actual']),
inline=True)
if temperatures['bed']['actual']:
builder.add_field(title='Bed Temp', text=str(temperatures['bed']['actual']), inline=True)
printing = self.plugin.get_printer().is_printing()
builder.add_field(title='Printing', text='Yes' if printing else 'No', inline=True)
if printing:
builder.add_field(title='File', text=current_data['job']['file']['name'], inline=True)
completion = current_data['progress']['completion']
if completion:
builder.add_field(title='Progress', text='%d%%' % completion, inline=True)
builder.add_field(title='Time Spent', text=self.plugin.get_print_time_spent(), inline=True)
builder.add_field(title='Time Remaining', text=self.plugin.get_print_time_remaining(), inline=True)
snapshots = self.plugin.get_snapshot()
if snapshots and len(snapshots) == 1:
builder.set_image(snapshots[0])
return None, builder.get_embeds()
def pause(self):
self.plugin.get_printer().pause_print()
snapshot = None
snapshots = self.plugin.get_snapshot()
if snapshots and len(snapshots) == 1:
snapshot = snapshots[0]
return None, success_embed(author=self.plugin.get_printer_name(),
title='Print paused', snapshot=snapshot)
def resume(self):
self.plugin.get_printer().resume_print()
snapshot = None
snapshots = self.plugin.get_snapshot()
if snapshots and len(snapshots) == 1:
snapshot = snapshots[0]
return None, success_embed(author=self.plugin.get_printer_name(),
title='Print resumed', snapshot=snapshot)
def download_file(self, filename, url, user):
if user and not self.check_perms('upload', user):
return None, error_embed(author=self.plugin.get_printer_name(),
title="Permission Denied")
upload_file_path = self.plugin.get_file_manager().path_on_disk('local', filename)
r = requests.get(url, stream=True)
with open(upload_file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return None, success_embed(author=self.plugin.get_printer_name(),
title='File Received',
description=filename)
def mute(self):
self.plugin.mute()
return None, success_embed(author=self.plugin.get_printer_name(),
title='Notifications Muted')
def unmute(self):
self.plugin.unmute()
return None, success_embed(author=self.plugin.get_printer_name(),
title='Notifications Unmuted')
@staticmethod
def _parse_array(string):
# noinspection PyBroadException
try:
return re.split("[^a-zA-Z0-9*]+", string)
except:
return None
def check_perms(self, command, user):
permissions = self.plugin.get_settings().get(['permissions'], merged=True)
for rulename in permissions:
rule = permissions.get(rulename)
users = self._parse_array(rule['users'])
commands = self._parse_array(rule['commands'])
if users is None or commands is None:
continue
if ('*' in users or user in users) and \
('*' in commands or command in commands):
return True
return False
def gcode(self, params):
if not self.plugin.get_printer().is_operational():
return None, error_embed(author=self.plugin.get_printer_name(),
title="Printer not connected",
description="Connect to printer first.")
allowed_gcodes = self.plugin.get_settings().get(["allowed_gcode"])
allowed_gcodes = re.split('[^0-9a-zA-Z]+', allowed_gcodes.upper())
script = "".join(params[1:]).upper()
lines = script.split(';')
for line in lines:
first = line.strip().replace(' ', '').replace('\t', '')
first = re.findall('^[a-zA-Z]+[0-9]+', first)
if first is None or \
len(first) == 0 or \
first[0] not in allowed_gcodes:
return None, error_embed(author=self.plugin.get_printer_name(),
title="Invalid GCODE",
description="If you want to use \"%s\", add it to the allowed GCODEs" % line)
try:
self.plugin.get_printer().commands(lines)
except Exception as e:
return None, error_embed(author=self.plugin.get_printer_name(),
title="Failed to execute gcode",
description="Error: %s" % e)
return None, success_embed(author=self.plugin.get_printer_name(),
title="Sent script")
def getfile(self, params):
filename = " ".join(params[1:])
foundfile = self.find_file(filename)
if foundfile is None:
return None, error_embed(author=self.plugin.get_printer_name(),
title="Failed to find file matching the name given")
file_path = self.plugin.get_file_manager().path_on_disk(foundfile['location'], foundfile['path'])
return upload_file(file_path)
def gettimelapse(self, params):
filename = " ".join(params[1:]).upper()
path = os.path.join(os.getcwd(), self.plugin._data_folder, '..', '..', 'timelapse')
path = os.path.abspath(path)
for root, dirs, files in os.walk(path):
for name in files:
file_path = os.path.join(root, name)
if filename in file_path.upper():
return upload_file(file_path)
return None, error_embed(author=self.plugin.get_printer_name(),
title="Failed to find file matching the name given")
| 45.879386
| 123
| 0.561876
|
e51d6b52fb134027eed01ee0d8cb5de4356dc3c5
| 801
|
py
|
Python
|
Curso de Python USP Part1/Exercicios/maior_primo.py
|
JorgeTranin/Cursos_Coursera
|
37d26b5f92d9324225f6701d0eb0fd466cff9d86
|
[
"MIT"
] | null | null | null |
Curso de Python USP Part1/Exercicios/maior_primo.py
|
JorgeTranin/Cursos_Coursera
|
37d26b5f92d9324225f6701d0eb0fd466cff9d86
|
[
"MIT"
] | null | null | null |
Curso de Python USP Part1/Exercicios/maior_primo.py
|
JorgeTranin/Cursos_Coursera
|
37d26b5f92d9324225f6701d0eb0fd466cff9d86
|
[
"MIT"
] | null | null | null |
'''
Escreva a função maior_primo que recebe um número inteiro maior ou igual a 2 como parâmetro e devolve o maior número primo menor ou igual ao número passado à função
Dica: escreva uma função éPrimo(k) e faça um laço percorrendo os números até o número dado checando se o número é primo ou não; se for, guarde numa variável. Ao fim do laço, o valor armazenado na variável é o maior primo encontrado.
'''
def eprimo(n):
cont = 0
for i in range(1, n):
if n % i == 0:
cont += 1
if cont > 1:
break
if cont > 1:
return False
else:
return True
def maior_primo(n):
primo = n
j = 0
while j <= n:
if eprimo(j):
primo = j
j += 1
return primo
print(maior_primo(100))
print(maior_primo(7))
| 25.83871
| 232
| 0.615481
|
9c48e5c2b43536dc03b0f7006a4bf59419c054c2
| 14,718
|
py
|
Python
|
celeba19/train.py
|
liuyangdh/multimodal-vae-public
|
ba5941d010b0164094f5818b93baad9df546494e
|
[
"MIT"
] | 98
|
2018-05-28T17:07:36.000Z
|
2022-03-16T03:54:11.000Z
|
celeba19/train.py
|
jannik-w/multimodal-vae-public
|
2a358eb3593e9942e0846eb0095519acef462fa6
|
[
"MIT"
] | 4
|
2019-04-15T00:40:21.000Z
|
2020-03-04T06:24:56.000Z
|
celeba19/train.py
|
jannik-w/multimodal-vae-public
|
2a358eb3593e9942e0846eb0095519acef462fa6
|
[
"MIT"
] | 36
|
2018-08-07T05:02:03.000Z
|
2022-03-28T05:21:42.000Z
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import numpy as np
from tqdm import tqdm
from itertools import combinations
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from model import MVAE
sys.path.append('../celeba')
from datasets import N_ATTRS
from datasets import CelebAttributes
def elbo_loss(recon, data, mu, logvar, lambda_image=1.0,
lambda_attrs=1.0, annealing_factor=1.):
"""Compute the ELBO for an arbitrary number of data modalities.
@param recon: list of torch.Tensors/Variables
Contains one for each modality.
@param data: list of torch.Tensors/Variables
Size much agree with recon.
@param mu: Torch.Tensor
Mean of the variational distribution.
@param logvar: Torch.Tensor
Log variance for variational distribution.
@param lambda_image: float [default: 1.0]
weight for image BCE
@param lambda_attr: float [default: 1.0]
weight for attribute BCE
@param annealing_factor: float [default: 1]
Beta - how much to weight the KL regularizer.
"""
assert len(recon) == len(data), "must supply ground truth for every modality."
n_modalities = len(recon)
batch_size = mu.size(0)
BCE = 0 # reconstruction cost
for ix in xrange(n_modalities):
# dimensionality > 1 implies an image
if len(recon[ix].size()) > 1:
recon_ix = recon[ix].view(batch_size, -1)
data_ix = data[ix].view(batch_size, -1)
BCE += lambda_image * torch.sum(binary_cross_entropy_with_logits(recon_ix, data_ix), dim=1)
else: # this is for an attribute
BCE += lambda_attrs * binary_cross_entropy_with_logits(recon[ix], data[ix])
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp(), dim=1)
ELBO = torch.mean(BCE + annealing_factor * KLD)
return ELBO
def binary_cross_entropy_with_logits(input, target):
"""Sigmoid Activation + Binary Cross Entropy
@param input: torch.Tensor (size N)
@param target: torch.Tensor (size N)
@return loss: torch.Tensor (size N)
"""
if not (target.size() == input.size()):
raise ValueError("Target size ({}) must be the same as input size ({})".format(
target.size(), input.size()))
return (torch.clamp(input, 0) - input * target
+ torch.log(1 + torch.exp(-torch.abs(input))))
def tensor_2d_to_list(x):
# convert a 2D tensor to a list of 1D tensors.
n_dims = x.size(1)
list_of_tensors = []
for i in xrange(n_dims):
list_of_tensors.append(x[:, i])
return list_of_tensors
def enumerate_combinations(n):
"""Enumerate entire pool of combinations.
We use this to define the domain of ELBO terms,
(the pool of 2^19 ELBO terms).
@param n: integer
number of features (19 for Celeb19)
@return: a list of ALL permutations
"""
combos = []
for i in xrange(2, n): # 1 to n - 1
_combos = list(combinations(range(n), i))
combos += _combos
combos_np = np.zeros((len(combos), n))
for i in xrange(len(combos)):
for idx in combos[i]:
combos_np[i][idx] = 1
combos_np = combos_np.astype(np.bool)
return combos_np
def sample_combinations(pool, size=1):
"""Return boolean list of which data points to use to compute a modality.
Ignore combinations that are all True or only contain a single True.
@param pool: np.array
enumerating all possible combinations.
@param size: integer (default: 1)
number of combinations to sample.
"""
n_modalities = pool.shape[1]
pool_size = len(pool)
pool_sums = np.sum(pool, axis=1)
pool_dist = np.bincount(pool_sums)
pool_space = np.where(pool_dist > 0)[0]
sample_pool = np.random.choice(pool_space, size, replace=True)
sample_dist = np.bincount(sample_pool)
if sample_dist.size < n_modalities:
zeros_pad = np.zeros(n_modalities - sample_dist.size).astype(np.int)
sample_dist = np.concatenate((sample_dist, zeros_pad))
sample_combo = []
for ix in xrange(n_modalities):
if sample_dist[ix] > 0:
pool_i = pool[pool_sums == ix]
combo_i = np.random.choice(range(pool_i.shape[0]),
size=sample_dist[ix],
replace=False)
sample_combo.append(pool_i[combo_i])
sample_combo = np.concatenate(sample_combo)
return sample_combo
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
model = MVAE(checkpoint['n_latents'])
model.load_state_dict(checkpoint['state_dict'])
return model
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--n-latents', type=int, default=100,
help='size of the latent embedding [default: 100]')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training [default: 100]')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train [default: 100]')
parser.add_argument('--annealing-epochs', type=int, default=20, metavar='N',
help='number of epochs to anneal KL for [default: 20]')
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR',
help='learning rate [default: 1e-4]')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status [default: 10]')
parser.add_argument('--approx-m', type=int, default=1,
help='number of ELBO terms to approx. the full MVAE objective [default: 1]')
parser.add_argument('--lambda-image', type=float, default=1.,
help='multipler for image reconstruction [default: 1]')
parser.add_argument('--lambda-attrs', type=float, default=10.,
help='multipler for attributes reconstruction [default: 10]')
parser.add_argument('--cuda', action='store_true', default=False,
help='enables CUDA training [default: False]')
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
if not os.path.isdir('./trained_models'):
os.makedirs('./trained_models')
# crop the input image to 64 x 64
preprocess_data = transforms.Compose([transforms.Resize(64),
transforms.CenterCrop(64),
transforms.ToTensor()])
train_loader = torch.utils.data.DataLoader(
CelebAttributes(partition='train', data_dir='./data',
image_transform=preprocess_data),
batch_size=args.batch_size, shuffle=True)
N_mini_batches = len(train_loader)
test_loader = torch.utils.data.DataLoader(
CelebAttributes(partition='val', data_dir='./data',
image_transform=preprocess_data),
batch_size=args.batch_size, shuffle=False)
model = MVAE(args.n_latents)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
if args.cuda:
model.cuda()
# enumerate all combinations so we can sample from this
# every gradient step. NOTE: probably not the most efficient
# way to do this but oh well.
combination_pool = enumerate_combinations(19)
def train(epoch):
model.train()
train_loss_meter = AverageMeter()
for batch_idx, (image, attrs) in enumerate(train_loader):
if epoch < args.annealing_epochs:
# compute the KL annealing factor for the current mini-batch in the current epoch
annealing_factor = (float(batch_idx + (epoch - 1) * N_mini_batches + 1) /
float(args.annealing_epochs * N_mini_batches))
else:
# by default the KL annealing factor is unity
annealing_factor = 1.0
if args.cuda:
image = image.cuda()
attrs = attrs.cuda()
image = Variable(image)
attrs = Variable(attrs)
attrs = tensor_2d_to_list(attrs) # convert tensor to list
batch_size = len(image)
# refresh the optimizer
optimizer.zero_grad()
train_loss = 0 # accumulate train loss here so we don't store a lot of things.
n_elbo_terms = 0 # track number of ELBO terms
# compute ELBO using all data (``complete")
recon_image, recon_attrs, mu, logvar = model(image, attrs)
train_loss += elbo_loss([recon_image] + recon_attrs, [image] + attrs, mu, logvar,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs,
annealing_factor=annealing_factor)
n_elbo_terms += 1 # keep track of how many terms there are
# compute ELBO using only image data
recon_image, _, mu, logvar = model(image=image)
train_loss += elbo_loss([recon_image], [image], mu, logvar,
lambda_image=args.lambda_image, lambda_attrs=args.lambda_attrs,
annealing_factor=annealing_factor)
n_elbo_terms += 1 # keep track of how many terms there are
# compute ELBO using only text data
for ix in xrange(len(attrs)):
_, recon_attrs, mu, logvar = model(attrs=[attrs[k] if k == ix else None
for k in xrange(len(attrs))])
train_loss += elbo_loss([recon_attrs[ix]], [attrs[ix]], mu, logvar,
annealing_factor=annealing_factor)
n_elbo_terms += 1
# sample some number of terms
if args.approx_m > 0:
sample_combos = sample_combinations(combination_pool, size=args.approx_m)
for sample_combo in sample_combos:
attrs_combo = sample_combo[1:]
recon_image, recon_attrs, mu, logvar = model(image=image if sample_combo[0] else None,
attrs=[attrs[ix] if attrs_combo[ix] else None
for ix in xrange(attrs_combo.size)])
if sample_combo[0]: # check if image is present
elbo = elbo_loss([recon_image] + [recon_attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],
[image] + [attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],
mu, logvar, annealing_factor=annealing_factor)
else:
elbo = elbo_loss([recon_attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],
[attrs[ix] for ix in xrange(attrs_combo.size) if attrs_combo[ix]],
mu, logvar, annealing_factor=annealing_factor)
train_loss += elbo
n_elbo_terms += 1
assert n_elbo_terms == (len(attrs) + 1) + 1 + args.approx_m # N + 1 + M
train_loss_meter.update(train_loss.data[0], len(image))
# compute and take gradient step
train_loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAnnealing-Factor: {:.3f}'.format(
epoch, batch_idx * batch_size, len(train_loader.dataset),
100. * batch_idx / len(train_loader), train_loss_meter.avg, annealing_factor))
print('====> Epoch: {}\tLoss: {:.4f}'.format(epoch, train_loss_meter.avg))
def test(epoch):
model.eval()
test_loss = 0
# for simplicitly, here i'm only going to track the joint loss.
pbar = tqdm(total=len(test_loader))
for batch_idx, (image, attrs) in enumerate(test_loader):
if args.cuda:
image, attrs = image.cuda(), attrs.cuda()
image = Variable(image, volatile=True)
attrs = Variable(attrs, volatile=True)
batch_size = image.size(0)
attrs = tensor_2d_to_list(attrs)
# compute the elbo using all data.
recon_image, recon_attrs, mu, logvar = model(image, attrs)
test_loss += elbo_loss([recon_image] + recon_attrs, [image] + attrs, mu, logvar).data[0]
pbar.update()
pbar.close()
test_loss /= len(test_loader)
print('====> Test Loss: {:.4f}'.format(test_loss))
return test_loss
best_loss = sys.maxint
for epoch in range(1, args.epochs + 1):
train(epoch)
loss = test(epoch)
is_best = loss < best_loss
best_loss = min(loss, best_loss)
# save the best model and current model
save_checkpoint({
'state_dict': model.state_dict(),
'best_loss': best_loss,
'n_latents': args.n_latents,
'optimizer' : optimizer.state_dict(),
}, is_best, folder=args.out_dir)
| 41.342697
| 129
| 0.586425
|
aade95021b5b45cf12ba9987d9cf63abd061473d
| 1,369
|
py
|
Python
|
osrsapi/pricetrend.py
|
XaKingas/osrsapi
|
14b93e0f6902724e57ebb1f50d817bd557e41c3d
|
[
"MIT"
] | null | null | null |
osrsapi/pricetrend.py
|
XaKingas/osrsapi
|
14b93e0f6902724e57ebb1f50d817bd557e41c3d
|
[
"MIT"
] | null | null | null |
osrsapi/pricetrend.py
|
XaKingas/osrsapi
|
14b93e0f6902724e57ebb1f50d817bd557e41c3d
|
[
"MIT"
] | 1
|
2020-07-03T11:24:55.000Z
|
2020-07-03T11:24:55.000Z
|
import logging
logger = logging.getLogger(__name__)
class PriceTrend:
_money_shorthands = {"k": 1000, "m": 1000000, "b": 1000000000}
def __init__(self, price, trend, change):
self.price = self._extract_price(price)
self.trend = trend
self.change = self._extract_change(change)
def _extract_price(self, price):
if price is None:
return None
price = str(price).replace(" ", "").replace(",", "")
last = price[-1] # Get the last character
# check if this price is in shorthand notation. EX. '1.6m'
if last in PriceTrend._money_shorthands.keys():
# if it is, convert it to be a floating point num.
# EX. '1.6m' -> 1000000 * 1.6 -> 1600000.0
return PriceTrend._money_shorthands[last] * float(price[:-1])
return float(price)
def _extract_change(self, change):
if change is None:
return None
try:
return float(change[:-1].replace(",",""))
except ValueError as e:
logger.error(f'PriceTrend._extract_change error: {str(e)}')
return None
def __str__(self):
v = vars(self)
details = ", ".join([f"{n}={v}" for n, v in v.items() if v is not None])
return f"PriceTrend({details})"
def __repr__(self):
return self.__str__()
| 29.76087
| 80
| 0.578524
|
555f6cbdb382c7259dc8d2b6a431783b5aacdcc1
| 5,181
|
py
|
Python
|
okta/api_response.py
|
kclha/okta-sdk-python
|
8397d09d006f98a253f5adcb4f8f5b63501f8a52
|
[
"Apache-2.0"
] | 1
|
2021-07-07T15:04:14.000Z
|
2021-07-07T15:04:14.000Z
|
okta/api_response.py
|
kclha/okta-sdk-python
|
8397d09d006f98a253f5adcb4f8f5b63501f8a52
|
[
"Apache-2.0"
] | null | null | null |
okta/api_response.py
|
kclha/okta-sdk-python
|
8397d09d006f98a253f5adcb4f8f5b63501f8a52
|
[
"Apache-2.0"
] | null | null | null |
import json
import xmltodict
from okta.api_client import APIClient
class OktaAPIResponse():
"""
Class for defining the wrapper of an Okta API response.
Allows for paginated results to be retrieved easily.
"""
def __init__(self, request_executor, req, res_details, response_body="",
data_type=None):
self._url = res_details.url
self._headers = req["headers"]
self._self = None # Link to first page of results
self._body = None # First page of results
self._type = data_type
self._status = res_details.status
# Status on if there's a next page of results (based on generator)
self._next = None
# Request Executor for future calls
self._request_executor = request_executor
# Build response body based on content type
if "application/xml" in res_details.content_type:
self.build_xml_response(response_body)
elif "application/json" in res_details.content_type or \
"" == res_details.content_type:
self.build_json_response(response_body)
else:
# Save response as text
self._body = response_body
# Get links for next if more results exist
if self._body is not None:
self.extract_pagination(res_details.links)
self._current = self._body
def get_body(self):
"""
Returns the response body of the Okta API Response.
Returns:
dict: Dictionary format of response
"""
return self._body
def get_status(self):
"""
Returns HTTP Status Code of response
Returns:
int: HTTP Code
"""
return self._status
def get_type(self):
"""
Returns datatype of the API response
Returns:
class: type of object
"""
return self._type
def build_json_response(self, response_body):
"""
Converts JSON response text into Python dictionary.
Args:
response_body (str): Response text
"""
self._body = json.loads(response_body)
def build_xml_response(self, response_body):
"""
Converts XML response text into Python dictionary.
Args:
response_body ([type]): [description]
"""
self._body = xmltodict.parse(response_body, xml_attribs=False)
def extract_pagination(self, links):
"""
Parses the Link Headers in the Okta API Response to see if there
are more results.
Args:
links (dict): Dictionary object of values in the 'Link' header
"""
API = "/api/"
# Check for 'self' link
if "self" in links:
self._self = API + \
links["self"]["url"].human_repr().partition(API)[2]
# Check for 'next' link
if "next" in links:
self._next = API + \
links["next"]["url"].human_repr().partition(API)[2]
def has_next(self):
"""
Returns if there is another page after the last response retrieved
(Determined by generator).
Returns:
bool: Existence of next page of results
"""
return self._next is not None
async def next(self):
"""
Generator iterating function. Retrieves the next page of results
from the API.
Returns:
json: Next page of results
"""
next_page, error = await self.get_next().__anext__()
if error:
return (None, error)
if self._type is not None:
result = []
for item in next_page:
result.append(
self._type(
APIClient.form_response_body(item)
)
)
return (result, None)
return (next_page, error)
async def get_next(self):
"""
Async Generator function for results pagination.
Yields:
(json, Exception): Next page of results, Error raised
"""
while self._next:
# Retrieve next when next page exists
# Create and fire request
next_request, error = await self._request_executor.create_request(
"GET", self._next, {}, self._headers)
if error:
# Return None if error and set next to none
self._next = None
yield (None, error)
req, res_details, resp_body, error = await \
self._request_executor.fire_request(next_request)
if error:
# Return None if error and set next to none
self._next = None
yield (None, error)
if next_request:
# create new response and update generator values
next_response = OktaAPIResponse(
self._request_executor, req, res_details, resp_body)
self._next = next_response._next
# yield next page
yield (next_response.get_body(), None)
| 30.122093
| 78
| 0.562054
|
da15ab4a8b3081f1bde5f8149975a7020edfc145
| 2,435
|
py
|
Python
|
alipay/aop/api/domain/AccessReturnQrcode.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 213
|
2018-08-27T16:49:32.000Z
|
2021-12-29T04:34:12.000Z
|
alipay/aop/api/domain/AccessReturnQrcode.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 29
|
2018-09-29T06:43:00.000Z
|
2021-09-02T03:27:32.000Z
|
alipay/aop/api/domain/AccessReturnQrcode.py
|
snowxmas/alipay-sdk-python-all
|
96870ced60facd96c5bce18d19371720cbda3317
|
[
"Apache-2.0"
] | 59
|
2018-08-27T16:59:26.000Z
|
2022-03-25T10:08:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AccessReturnQrcode(object):
def __init__(self):
self._asset_purchase_id = None
self._express_no = None
self._out_biz_no = None
self._qrcode = None
@property
def asset_purchase_id(self):
return self._asset_purchase_id
@asset_purchase_id.setter
def asset_purchase_id(self, value):
self._asset_purchase_id = value
@property
def express_no(self):
return self._express_no
@express_no.setter
def express_no(self, value):
self._express_no = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def qrcode(self):
return self._qrcode
@qrcode.setter
def qrcode(self, value):
self._qrcode = value
def to_alipay_dict(self):
params = dict()
if self.asset_purchase_id:
if hasattr(self.asset_purchase_id, 'to_alipay_dict'):
params['asset_purchase_id'] = self.asset_purchase_id.to_alipay_dict()
else:
params['asset_purchase_id'] = self.asset_purchase_id
if self.express_no:
if hasattr(self.express_no, 'to_alipay_dict'):
params['express_no'] = self.express_no.to_alipay_dict()
else:
params['express_no'] = self.express_no
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.qrcode:
if hasattr(self.qrcode, 'to_alipay_dict'):
params['qrcode'] = self.qrcode.to_alipay_dict()
else:
params['qrcode'] = self.qrcode
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AccessReturnQrcode()
if 'asset_purchase_id' in d:
o.asset_purchase_id = d['asset_purchase_id']
if 'express_no' in d:
o.express_no = d['express_no']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'qrcode' in d:
o.qrcode = d['qrcode']
return o
| 28.313953
| 85
| 0.595072
|
b4500d97deb396458a6739ff5ee0f18abe02db07
| 2,292
|
py
|
Python
|
hf/protocol/op_power.py
|
HashFast/hashfast-tools
|
9617691ac997f12085b688c3ecc6746e8510976d
|
[
"BSD-3-Clause"
] | 1
|
2020-12-15T02:49:36.000Z
|
2020-12-15T02:49:36.000Z
|
hf/protocol/op_power.py
|
HashFast/hashfast-tools
|
9617691ac997f12085b688c3ecc6746e8510976d
|
[
"BSD-3-Clause"
] | null | null | null |
hf/protocol/op_power.py
|
HashFast/hashfast-tools
|
9617691ac997f12085b688c3ecc6746e8510976d
|
[
"BSD-3-Clause"
] | 3
|
2015-09-02T00:31:06.000Z
|
2020-12-15T02:52:06.000Z
|
# Copyright (c) 2014, HashFast Technologies LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of HashFast Technologies LLC nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL HASHFAST TECHNOLOGIES LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .frame import HF_Frame, hf_frame_data, opcodes, opnames
from .frame import lebytes_to_int, int_to_lebytes
class HF_OP_POWER(HF_Frame):
def __init__(self, bytes=None, power=0x1):
if bytes is None:
# REQUEST
HF_Frame.__init__(self,{'operation_code': opcodes['OP_POWER'],
'chip_address': 0xFF,
'core_address': 0x00,
'hdata': power }) # 0x1 diagnostic_power_on, 0x2 diagnostic_power_off
self.construct_framebytes()
else:
# READ
HF_Frame.__init__(self, bytes)
def __str__(self):
string = "HF_OP_POWER\n"
string += HF_Frame.__str__(self)
return string
| 50.933333
| 110
| 0.720332
|
466bb43d18e073f15c8dfb5e12833740cddcfe2f
| 2,206
|
py
|
Python
|
stinkies/game1/migrations/0001_initial.py
|
jordan-dimov/stinky-games
|
175c3fa21276456cb1b58703b835f8393f5b2b4a
|
[
"CC0-1.0"
] | null | null | null |
stinkies/game1/migrations/0001_initial.py
|
jordan-dimov/stinky-games
|
175c3fa21276456cb1b58703b835f8393f5b2b4a
|
[
"CC0-1.0"
] | null | null | null |
stinkies/game1/migrations/0001_initial.py
|
jordan-dimov/stinky-games
|
175c3fa21276456cb1b58703b835f8393f5b2b4a
|
[
"CC0-1.0"
] | null | null | null |
# Generated by Django 3.1.3 on 2020-11-03 16:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Stinky',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('price', models.PositiveIntegerField(default=2)),
],
options={
'verbose_name': 'stinky',
'verbose_name_plural': 'stinkies',
},
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('coins', models.PositiveIntegerField(default=12)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='InventoryItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('bought_for', models.PositiveIntegerField(blank=True, null=True)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='game1.stinky')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inventory', to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'modified',
'abstract': False,
},
),
]
| 40.851852
| 144
| 0.601088
|
33087f1a3cedd77a3ec42aec43b7b227382620b7
| 5,225
|
py
|
Python
|
src/main/python/apache/aurora/client/cli/cron.py
|
brinick/aurora
|
963700727fe31c75ccf3506bb150e085df80cf3c
|
[
"Apache-2.0"
] | 11
|
2016-05-25T15:44:34.000Z
|
2021-07-24T19:37:30.000Z
|
src/main/python/apache/aurora/client/cli/cron.py
|
brinick/aurora
|
963700727fe31c75ccf3506bb150e085df80cf3c
|
[
"Apache-2.0"
] | 1
|
2022-01-21T23:08:28.000Z
|
2022-01-21T23:08:28.000Z
|
src/main/python/apache/aurora/client/cli/cron.py
|
brinick/aurora
|
963700727fe31c75ccf3506bb150e085df80cf3c
|
[
"Apache-2.0"
] | 6
|
2016-05-30T06:41:24.000Z
|
2022-02-27T10:57:58.000Z
|
#
# Copyright 2014 Apache Software Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import textwrap
import webbrowser
from apache.aurora.client.base import get_job_page
from apache.aurora.client.cli import (
EXIT_COMMAND_FAILURE,
EXIT_INVALID_PARAMETER,
EXIT_OK,
Noun,
Verb
)
from apache.aurora.client.cli.context import AuroraCommandContext
from apache.aurora.client.cli.options import (
BIND_OPTION,
BROWSER_OPTION,
CONFIG_ARGUMENT,
CONFIG_OPTION,
JOBSPEC_ARGUMENT,
JSON_READ_OPTION
)
class Schedule(Verb):
@property
def name(self):
return "schedule"
@property
def help(self):
return textwrap.dedent("""\
Create a cron schedule for a job or replace the existing cron template with a new one.
Only future runs will be affected, any existing active tasks are left intact.""")
def get_options(self):
return [BIND_OPTION, JSON_READ_OPTION, JOBSPEC_ARGUMENT, CONFIG_ARGUMENT]
def execute(self, context):
api = context.get_api(context.options.jobspec.cluster)
config = context.get_job_config(context.options.jobspec, context.options.config_file)
if not config.raw().has_cron_schedule():
raise context.CommandError(
EXIT_COMMAND_FAILURE,
"Non-cron jobs may only be created with \"aurora job create\" command")
resp = api.schedule_cron(config)
context.log_response_and_raise(resp,
err_msg=("Error scheduling cron job %s:" % context.options.jobspec))
context.print_out("Cron job scheduled, status can be viewed at %s"
% get_job_page(api, context.options.jobspec))
return EXIT_OK
class Deschedule(Verb):
@property
def name(self):
return "deschedule"
@property
def help(self):
return textwrap.dedent("""\
Remove the cron schedule for a job. Any active tasks are not affected.
Use \"aurora job kill\" command to terminate active tasks.""")
def get_options(self):
return [JOBSPEC_ARGUMENT]
def execute(self, context):
api = context.get_api(context.options.jobspec.cluster)
resp = api.deschedule_cron(context.options.jobspec)
context.log_response_and_raise(resp,
err_msg=("Error descheduling cron job %s:" % context.options.jobspec))
context.print_out("Cron descheduling succeeded.")
return EXIT_OK
class Start(Verb):
@property
def name(self):
return "start"
@property
def help(self):
return """Start a cron job immediately, outside of its normal cron schedule."""
def get_options(self):
return [BIND_OPTION, BROWSER_OPTION, CONFIG_OPTION, JSON_READ_OPTION, JOBSPEC_ARGUMENT]
def execute(self, context):
api = context.get_api(context.options.jobspec.cluster)
config = (context.get_job_config(context.options.jobspec, context.options.config)
if context.options.config else None)
resp = api.start_cronjob(context.options.jobspec, config=config)
context.log_response_and_raise(resp,
err_msg=("Error starting cron job %s:" % context.options.jobspec))
if context.options.open_browser:
webbrowser.open_new_tab(get_job_page(api, context.options.jobspec))
return EXIT_OK
class Show(Verb):
@property
def name(self):
return "show"
@property
def help(self):
return """Get the scheduling status of a cron job"""
def get_options(self):
return [JOBSPEC_ARGUMENT]
def execute(self, context):
# TODO(mchucarroll): do we want to support wildcards here?
jobkey = context.options.jobspec
api = context.get_api(jobkey.cluster)
resp = api.get_jobs(jobkey.role)
context.log_response_and_raise(resp, err_code=EXIT_INVALID_PARAMETER,
err_msg=("Error getting cron status for %self from server" % jobkey))
for job in resp.result.getJobsResult.configs:
if job.key.environment == jobkey.env and job.key.name == jobkey.name:
if job.cronSchedule is None or job.cronSchedule == "":
context.print_err("No cron entry found for job %s" % jobkey)
return EXIT_INVALID_PARAMETER
else:
context.print_out("%s\t %s" % (jobkey, job.cronSchedule))
return EXIT_OK
context.print_err("No cron entry found for job %s" % jobkey)
return EXIT_INVALID_PARAMETER
class CronNoun(Noun):
@property
def name(self):
return "cron"
@property
def help(self):
return "Work with entries in the aurora cron scheduler"
@classmethod
def create_context(cls):
return AuroraCommandContext()
def __init__(self):
super(CronNoun, self).__init__()
self.register_verb(Schedule())
self.register_verb(Deschedule())
self.register_verb(Start())
self.register_verb(Show())
| 30.735294
| 93
| 0.716746
|
3569cc911d5295ff855a4209450ea1ceed459178
| 477
|
py
|
Python
|
SS/offer6_reversePrint.py
|
MTandHJ/leetcode
|
f3832ed255d259cb881666ec8bd3de090d34e883
|
[
"MIT"
] | null | null | null |
SS/offer6_reversePrint.py
|
MTandHJ/leetcode
|
f3832ed255d259cb881666ec8bd3de090d34e883
|
[
"MIT"
] | null | null | null |
SS/offer6_reversePrint.py
|
MTandHJ/leetcode
|
f3832ed255d259cb881666ec8bd3de090d34e883
|
[
"MIT"
] | null | null | null |
from typing import List
class ListNode:
def __init__(self, val, next=None) -> None:
self.val = val
self.next = next
class Solution:
def reversePrint(self, head: ListNode) -> List[int]:
stack = list()
temp = head
while not temp:
stack.append(temp)
temp = temp.next
size = len(stack)
pri = []
for i in range(size):
pri[i] = stack.pop().val
return pri
| 22.714286
| 56
| 0.515723
|
22f6daaf46499ff64b6bd96f8c7258c89d79ef6e
| 890
|
py
|
Python
|
setup.py
|
JerryChenn07/CrawlUtils
|
727e4668ae35975135a0f29bbaa64ab738b6bd29
|
[
"MIT"
] | 1
|
2021-03-11T03:00:10.000Z
|
2021-03-11T03:00:10.000Z
|
setup.py
|
JerryChenn07/CrawlUtils
|
727e4668ae35975135a0f29bbaa64ab738b6bd29
|
[
"MIT"
] | null | null | null |
setup.py
|
JerryChenn07/CrawlUtils
|
727e4668ae35975135a0f29bbaa64ab738b6bd29
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
# 参考
# https://mp.weixin.qq.com/s/SyMRQ6KUDTGLB9Px9oBPIg
# https://github.com/Gerapy/GerapyAutoExtractor/blob/master/setup.py
# https://github.com/kingname/GeneralNewsExtractor/blob/master/setup.py
def read_file(filename):
with open(filename) as fp:
return fp.read().strip()
def read_requirements(filename):
return [line.strip() for line in read_file(filename).splitlines()
if not line.startswith('#')]
REQUIRED = read_requirements('requirements.txt')
setup(
name='crawl_utils',
version='0.4.1',
description='Commonly Used Crawl Utils',
author='cjr',
author_email='cjr0707@qq.com',
python_requires='>=3.8.0',
url='https://github.com/JerryChenn07/CrawlUtils',
packages=find_packages(exclude=[]),
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
)
| 26.176471
| 71
| 0.707865
|
01f3245128a2e3f7c7c7b0e8f77a336a9f070fe4
| 1,074
|
py
|
Python
|
backend/users/models.py
|
crowdbotics-apps/soccermatcher-34472
|
110df5ebc69cfb7c29bc7f219172b9fd0c688c63
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/users/models.py
|
crowdbotics-apps/soccermatcher-34472
|
110df5ebc69cfb7c29bc7f219172b9fd0c688c63
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/users/models.py
|
crowdbotics-apps/soccermatcher-34472
|
110df5ebc69cfb7c29bc7f219172b9fd0c688c63
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
# WARNING!
"""
Some officially supported features of Crowdbotics Dashboard depend on the initial
state of this User model (Such as the creation of superusers using the CLI
or password reset in the dashboard). Changing, extending, or modifying this model
may lead to unexpected bugs and or behaviors in the automated flows provided
by Crowdbotics. Change it at your own risk.
This model represents the User instance of the system, login system and
everything that relates with an `User` is represented by this model.
"""
name = models.CharField(
null=True,
blank=True,
max_length=255,
)
age = models.IntegerField(
null=True,
blank=True,
)
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
| 32.545455
| 85
| 0.716015
|
04df8ddbc918271d3d86a31900397bd7875f4d1d
| 14,547
|
py
|
Python
|
jax/experimental/optix.py
|
nirum/jax
|
3f8c73593d41d4d6711f71851890e2e1ada6063f
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-03-27T03:01:52.000Z
|
2021-04-24T20:24:12.000Z
|
jax/experimental/optix.py
|
nirum/jax
|
3f8c73593d41d4d6711f71851890e2e1ada6063f
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2019-08-15T18:22:52.000Z
|
2019-08-20T18:19:42.000Z
|
jax/experimental/optix.py
|
nirum/jax
|
3f8c73593d41d4d6711f71851890e2e1ada6063f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-11-22T17:50:23.000Z
|
2020-11-22T17:50:23.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A composable gradient processing and optimization library for JAX.
The ``optix`` module implements a number of composable gradient transformations,
typically used in the context of optimizing neural nets.
Each transformation defines:
* an ``init_fn``, to initialize a (possibly empty) set of statistics, or ``state``.
* an ``update_fn`` to transform an input gradient and update the state.
An (optional) ``chain`` utility can be used to build custom optimizers by
chaining arbitrary sequences of transformations. For any sequence of
transformations ``chain`` returns a single ``init_fn`` and ``update_fn``.
An (optional) ``apply_updates`` function can be used to eventually apply the
transformed gradients to the set of parameters of interest.
Separating gradient transformations from the parameter update allows to flexibly
chain a sequence of transformations of the same gradients, as well as combine
multiple updates to the same parameters (e.g. in multi-task settings where the
different tasks may benefit from different sets of gradient transformations).
Many popular optimizers can be implemented using ``optix`` as one-liners, and,
for convenience, we provide aliases for some of the most popular ones.
"""
from typing import Any, Callable, NamedTuple, Sequence, Tuple, Union
from jax import numpy as jnp
from jax import random as jrandom
from jax.tree_util import tree_leaves
from jax.tree_util import tree_multimap
from jax.tree_util import tree_structure
from jax.tree_util import tree_unflatten
###
# Composable gradient transformations.
# TODO(jaslanides): Make these more specific.
OptState = NamedTuple # Optimizer state is a (possibly empty) namedtuple.
Params = Any # Parameters are nests of `jnp.ndarrays`.
Updates = Params # Gradient updates are of the same type as parameters.
InitFn = Callable[[Params], Union[OptState, Sequence[OptState]]]
UpdateFn = Callable[[Updates, OptState], Tuple[Updates, OptState]]
class InitUpdate(NamedTuple):
"""Optix optimizers consists of a pair of functions: (initialiser, update)."""
init: InitFn
update: UpdateFn
class ClipState(OptState):
"""The `clip` transformation is stateless."""
def clip(max_delta) -> InitUpdate:
"""Clip updates element-wise, to be between -max_delta and +max_delta.
Args:
max_delta: the maximum absolute value for each element in the update.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_):
return ClipState()
def update_fn(updates, state):
updates = tree_multimap(
lambda g: jnp.clip(g, -max_delta, max_delta), updates)
return updates, state
return InitUpdate(init_fn, update_fn)
def global_norm(updates: Updates) -> Updates:
return jnp.sqrt(jnp.sum([jnp.sum(x**2) for x in tree_leaves(updates)]))
class ClipByGlobalNormState(OptState):
"""The `clip_by_global_norm` transformation is stateless."""
def clip_by_global_norm(max_norm) -> InitUpdate:
"""Clip updates using their global norm.
References:
[Pascanu et al, 2012](https://arxiv.org/abs/1211.5063)
Args:
max_norm: the maximum global norm for an update.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_):
return ClipByGlobalNormState()
def update_fn(updates, state):
g_norm = global_norm(updates)
trigger = g_norm < max_norm
updates = tree_multimap(
lambda t: jnp.where(trigger, t, (t / g_norm) * max_norm), updates)
return updates, state
return InitUpdate(init_fn, update_fn)
class TraceState(OptState):
"""Holds an aggregation of past updates."""
trace: Params
def trace(decay: float, nesterov: bool) -> InitUpdate:
"""Compute a trace of past updates.
Args:
decay: the decay rate for the tracing of past updates.
nesterov: whether to use Nesterov momentum.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(params):
return TraceState(trace=tree_multimap(jnp.zeros_like, params))
def update_fn(updates, state):
f = lambda g, t: g + decay * t
update_trace = tree_multimap(f, updates, state.trace)
updates = (
tree_multimap(f, updates, update_trace) if nesterov else update_trace)
return updates, TraceState(trace=update_trace)
return InitUpdate(init_fn, update_fn)
class ScaleByRmsState(OptState):
"""State for exponential root mean-squared (RMS)-normalized updates."""
nu: Updates
def _update_moment(updates, moments, decay, order):
return tree_multimap(
lambda g, t: (1 - decay) * (g ** order) + decay * t, updates, moments)
def scale_by_rms(decay: float = 0.9, eps: float = 1e-8):
"""Rescale updates by the root of the exp. moving avg of the square.
References:
[Hinton](www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
Args:
decay: decay rate for the exponentially weighted average of squared grads.
eps: term added to the denominator to improve numerical stability.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(params):
nu = tree_multimap(jnp.zeros_like, params) # second moment
return ScaleByRmsState(nu=nu)
def update_fn(updates, state):
nu = _update_moment(updates, state.nu, decay, 2)
updates = tree_multimap(lambda g, n: g / (jnp.sqrt(n + eps)), updates, nu)
return updates, ScaleByRmsState(nu=nu)
return InitUpdate(init_fn, update_fn)
class ScaleByRStdDevState(OptState):
"""State for centered exponential moving average of squares of updates."""
mu: Updates
nu: Updates
def scale_by_stddev(decay: float = 0.9, eps: float = 1e-8) -> InitUpdate:
"""Rescale updates by the root of the centered exp. moving average of squares.
References:
[Hinton](www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
Args:
decay: decay rate for the exponentially weighted average of squared grads.
eps: term added to the denominator to improve numerical stability.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(params):
mu = tree_multimap(jnp.zeros_like, params) # First moment
nu = tree_multimap(jnp.zeros_like, params) # Second moment
return ScaleByRStdDevState(mu=mu, nu=nu)
def update_fn(updates, state):
mu = _update_moment(updates, state.mu, decay, 1)
nu = _update_moment(updates, state.nu, decay, 2)
updates = tree_multimap(
lambda g, m, n: g / jnp.sqrt(n - m**2 + eps), updates, mu, nu)
return updates, ScaleByRStdDevState(mu=mu, nu=nu)
return InitUpdate(init_fn, update_fn)
class ScaleByAdamState(OptState):
"""State for the Adam algorithm."""
count: jnp.ndarray # shape=(), dtype=jnp.int32.
mu: Updates
nu: Updates
def scale_by_adam(b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8) -> InitUpdate:
"""Rescale updates according to the Adam algorithm.
References:
[Kingma et al, 2014](https://arxiv.org/abs/1412.6980)
Args:
b1: decay rate for the exponentially weighted average of grads.
b2: decay rate for the exponentially weighted average of squared grads.
eps: term added to the denominator to improve numerical stability.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(params):
mu = tree_multimap(jnp.zeros_like, params) # First moment
nu = tree_multimap(jnp.zeros_like, params) # Second moment
return ScaleByAdamState(count=jnp.zeros([], jnp.int32), mu=mu, nu=nu)
def update_fn(updates, state):
mu = _update_moment(updates, state.mu, b1, 1)
nu = _update_moment(updates, state.nu, b2, 2)
mu_hat = tree_multimap(lambda t: t / (1 - b1 ** (state.count + 1)), mu)
nu_hat = tree_multimap(lambda t: t / (1 - b2 ** (state.count + 1)), nu)
updates = tree_multimap(
lambda m, v: m / (jnp.sqrt(v) + eps), mu_hat, nu_hat)
return updates, ScaleByAdamState(count=state.count + 1, mu=mu, nu=nu)
return InitUpdate(init_fn, update_fn)
class ScaleState(NamedTuple):
"""The scale transformation is stateless."""
def scale(step_size: float) -> InitUpdate:
"""Scale updates by some fixed scalar `step_size`.
Args:
step_size: a scalar corresponding to a fixed scaling factor for updates.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_):
return ScaleState()
def update_fn(updates, state):
updates = tree_multimap(lambda g: step_size * g, updates)
return updates, state
return InitUpdate(init_fn, update_fn)
class ScaleByScheduleState(OptState):
"""Maintains count for scale scheduling."""
count: jnp.ndarray # shape=(), dtype=jnp.int32
def scale_by_schedule(step_size_fn: Callable[[jnp.ndarray], jnp.ndarray]):
"""Scale updates using a custom schedule for the `step_size`.
Args:
step_size_fn: a function that takes an update count as input and proposes
the step_size to multiply the updates by.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_):
return ScaleByScheduleState(count=jnp.zeros([], jnp.int32))
def update_fn(updates, state):
updates = tree_multimap(lambda g: step_size_fn(state.count) * g, updates)
return updates, ScaleByScheduleState(count=state.count + 1)
return InitUpdate(init_fn, update_fn)
class AddNoiseState(OptState):
"""State for adding gradient noise. Contains a count for annealing."""
count: jnp.ndarray
rng_key: jnp.ndarray
def add_noise(eta: float, gamma: float, seed: int) -> InitUpdate:
"""Add gradient noise.
References:
[Neelakantan et al, 2014](https://arxiv.org/abs/1511.06807)
Args:
eta: base variance of the gaussian noise added to the gradient.
gamma: decay exponent for annealing of the variance.
seed: seed for random number generation.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_):
return AddNoiseState(count=jnp.zeros([], jnp.int32),
rng_key=jrandom.PRNGKey(seed))
def update_fn(updates, state): # pylint: disable=missing-docstring
num_vars = len(tree_leaves(updates))
treedef = tree_structure(updates)
variance = eta / (1 + state.count) ** gamma
all_keys = jrandom.split(state.rng_key, num=num_vars + 1)
noise = tree_multimap(
lambda g, k: jrandom.normal(k, shape=g.shape),
updates, tree_unflatten(treedef, all_keys[1:]))
updates = tree_multimap(
lambda g, n: g + variance * n, updates, noise)
return updates, AddNoiseState(count=state.count + 1, rng_key=all_keys[0])
return InitUpdate(init_fn, update_fn)
class ApplyEvery(OptState):
"""Contains a counter and a gradient accumulator."""
count: jnp.ndarray
grad_acc: Updates
def apply_every(k: int = 1) -> InitUpdate:
"""accumulate gradients and apply them every k steps.
Args:
k: apply the update every k steps otherwise accumulate the gradients.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(params):
grad_acc = tree_multimap(jnp.zeros_like, params)
return ApplyEvery(count=jnp.zeros([], jnp.int32), grad_acc=grad_acc)
def update_fn(updates, state):
c = state.count % k
acc = c != 0
grad_acc = tree_multimap(
lambda g, ga: acc * ga + g, updates, state.grad_acc)
emit = c == (k - 1)
updates = tree_multimap(lambda ga: emit * ga, grad_acc)
return updates, ApplyEvery(count=state.count + 1, grad_acc=grad_acc)
return InitUpdate(init_fn, update_fn)
###
# Utilities for building and using custom optimizers.
def chain(*args: InitUpdate) -> InitUpdate:
"""Applies a list of chainable update transformations.
Given a sequence of chainable transforms, `chain` returns an `init_fn`
that constructs a `state` by concatenating the states of the individual
transforms, and returns an `update_fn` which chains the update transformations
feeding the appropriate state to each.
Args:
*args: a sequence of chainable (init_fn, update_fn) tuples.
Returns:
A single (init_fn, update_fn) tuple.
"""
init_fns, update_fns = zip(*args)
def init_fn(params):
return [fn(params) for fn in init_fns]
def update_fn(updates, state):
new_state = []
for s, fn in zip(state, update_fns):
updates, new_s = fn(updates, s)
new_state.append(new_s)
return updates, new_state
return InitUpdate(init_fn, update_fn)
def apply_updates(params: Params, updates: Updates) -> Params:
"""Applies an update to the corresponding parameters.
This is an (optional) utility functions that applies an update, and returns
the updated parameters to the caller. The update itself is typically the
result of applying any number of `chainable` transformations.
Args:
params: a tree of parameters.
updates: a tree of updates, the tree structure and the shape of the leaf
nodes must match that of `params`.
Returns:
Updated parameters, with same structure and shape as `params`.
"""
return tree_multimap(lambda p, u: p + u, params, updates)
###
# Aliases for popular optimizers.
def sgd(learning_rate: float,
momentum: float = 0.,
nesterov: bool = False) -> InitUpdate:
return chain(
trace(decay=momentum, nesterov=nesterov),
scale(-learning_rate),
)
def noisy_sgd(learning_rate: float,
eta: float = 0.01,
gamma: float = 0.55,
seed: int = 0) -> InitUpdate:
return chain(
trace(decay=0., nesterov=False),
scale(-learning_rate),
add_noise(eta, gamma, seed),
)
def adam(learning_rate: float,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8) -> InitUpdate:
return chain(
scale_by_adam(b1=b1, b2=b2, eps=eps),
scale(-learning_rate),
)
def rmsprop(learning_rate: float,
decay: float = 0.9,
eps: float = 1e-8,
centered: bool = False) -> InitUpdate:
if centered:
return chain(
scale_by_stddev(decay=decay, eps=eps),
scale(-learning_rate),
)
return chain(
scale_by_rms(decay=decay, eps=eps),
scale(-learning_rate),
)
| 29.447368
| 83
| 0.701313
|
e7d74e4ea995418806c56d05be4fe9151155c358
| 844
|
py
|
Python
|
lib/fast_rcnn/nms_wrapper.py
|
stanley-king/cuda10-py3-faster-rcnn
|
013f99c428874bfd3ddaeed264031143d10a8123
|
[
"BSD-2-Clause"
] | null | null | null |
lib/fast_rcnn/nms_wrapper.py
|
stanley-king/cuda10-py3-faster-rcnn
|
013f99c428874bfd3ddaeed264031143d10a8123
|
[
"BSD-2-Clause"
] | 1
|
2020-12-28T03:20:43.000Z
|
2020-12-28T03:20:43.000Z
|
lib/fast_rcnn/nms_wrapper.py
|
stanley-king/cuda10-py3-faster-rcnn
|
013f99c428874bfd3ddaeed264031143d10a8123
|
[
"BSD-2-Clause"
] | null | null | null |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
from fast_rcnn.config import cfg
from nms.gpu_nms import gpu_nms
from nms.cpu_nms import cpu_nms
from nms.py_cpu_nms import py_cpu_nms
def nms(dets, thresh, force_cpu=False):
"""Dispatch to either CPU or GPU NMS implementations."""
if dets.shape[0] == 0:
return []
# if cfg.USE_GPU_NMS and not force_cpu:
# return gpu_nms(dets, thresh, device_id=cfg.GPU_ID)
# else:
return cpu_nms(dets, thresh)
# ret1 = gpu_nms(dets, thresh, device_id=1)
# ret2 = cpu_nms(dets, thresh)
# ret3 = py_cpu_nms(dets, thresh)
# return py_cpu_nms(dets, thresh)
| 29.103448
| 60
| 0.598341
|
53bfa2b2d825acf30feccd0422eab904cebc7f76
| 27
|
py
|
Python
|
topfarm/constraint_components/__init__.py
|
DTUWindEnergy/TopFarm2
|
cba70b20431f7a828370447117fe2e7533edf7c2
|
[
"MIT"
] | 4
|
2019-02-18T08:46:00.000Z
|
2021-01-28T06:35:52.000Z
|
topfarm/constraint_components/__init__.py
|
DTUWindEnergy/TopFarm2
|
cba70b20431f7a828370447117fe2e7533edf7c2
|
[
"MIT"
] | 1
|
2019-11-26T12:12:12.000Z
|
2019-11-26T12:12:12.000Z
|
topfarm/constraint_components/__init__.py
|
DTUWindEnergy/TopFarm2
|
cba70b20431f7a828370447117fe2e7533edf7c2
|
[
"MIT"
] | 8
|
2019-01-14T09:33:26.000Z
|
2021-06-30T11:56:03.000Z
|
from ._constraint import *
| 13.5
| 26
| 0.777778
|
85099abe643db0de18e33e08c53cefcb6295e3fd
| 5,215
|
py
|
Python
|
contrib/linearize/linearize-hashes.py
|
bbergaoui/bastoji-coin
|
1752e38216f32c6b1559bc246ec885097dae88c4
|
[
"MIT"
] | null | null | null |
contrib/linearize/linearize-hashes.py
|
bbergaoui/bastoji-coin
|
1752e38216f32c6b1559bc246ec885097dae88c4
|
[
"MIT"
] | null | null | null |
contrib/linearize/linearize-hashes.py
|
bbergaoui/bastoji-coin
|
1752e38216f32c6b1559bc246ec885097dae88c4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
try: # Python 3
import http.client as httplib
except ImportError: # Python 2
import httplib
import json
import re
import base64
import sys
import os
import os.path
settings = {}
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class BastojiRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BastojiRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
sys.exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
# Open the cookie file
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r', encoding="ascii") as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1], encoding="utf8")
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file", file=sys.stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
| 33.216561
| 108
| 0.609588
|
e154a5becd7554e2c0fc8c545038ad2ebe2a2931
| 3,030
|
bzl
|
Python
|
antlir/bzl/image/package/btrfs.bzl
|
facebookincubator/fs_image
|
3515a24bb0e93176a5584bdc8839464fa28390d7
|
[
"MIT"
] | 9
|
2019-12-02T20:17:35.000Z
|
2020-06-13T16:34:25.000Z
|
antlir/bzl/image/package/btrfs.bzl
|
facebookincubator/fs_image
|
3515a24bb0e93176a5584bdc8839464fa28390d7
|
[
"MIT"
] | 19
|
2019-11-22T23:30:04.000Z
|
2020-07-16T18:05:48.000Z
|
antlir/bzl/image/package/btrfs.bzl
|
facebookincubator/fs_image
|
3515a24bb0e93176a5584bdc8839464fa28390d7
|
[
"MIT"
] | 4
|
2019-12-04T19:03:28.000Z
|
2020-06-13T16:34:29.000Z
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
load("@bazel_skylib//lib:shell.bzl", "shell")
load("//antlir/bzl:image_utils.bzl", "image_utils")
load("//antlir/bzl:loopback_opts.bzl", "normalize_loopback_opts")
load("//antlir/bzl:oss_shim.bzl", "buck_genrule")
load("//antlir/bzl:shape.bzl", "shape")
load("//antlir/bzl:target_helpers.bzl", "antlir_dep")
load(":btrfs.shape.bzl", "btrfs_opts_t", "btrfs_subvol_t")
def _new_btrfs_subvol(**kwargs):
return shape.new(
btrfs_subvol_t,
**kwargs
)
_btrfs_subvol_api = struct(
new = _new_btrfs_subvol,
t = btrfs_subvol_t,
)
def _new_btrfs_opts(subvols, default_subvol = None, loopback_opts = None, **kwargs):
if default_subvol and not default_subvol.startswith("/"):
fail("Default subvol must be an absolute path: " + default_subvol)
return shape.new(
btrfs_opts_t,
subvols = subvols,
default_subvol = default_subvol,
loopback_opts = normalize_loopback_opts(loopback_opts),
**kwargs
)
_btrfs_opts_api = struct(
new = _new_btrfs_opts,
subvol = _btrfs_subvol_api,
t = btrfs_opts_t,
)
def _new_btrfs(
name,
# Opts are required
opts,
# Buck `labels` to add to the resulting target; aka `tags` in fbcode.
labels = None,
visibility = None,
antlir_rule = "user-facing"):
visibility = visibility or []
if not opts:
fail("`opts` is required for btrfs.new")
# For queries
_rule_type = "image-package-btrfs"
# All the layers being built
layers = []
for subvol_name, subvol in opts.subvols.items():
if not subvol_name.startswith("/"):
fail("Requested subvol names must be absolute paths: " + subvol_name)
layers.append(subvol.layer)
opts_name = name + "__opts"
buck_genrule(
name = opts_name,
out = "opts.json",
cmd = "echo {} > $OUT".format(shell.quote(shape.do_not_cache_me_json(opts))),
cacheable = False,
antlir_rule = antlir_rule,
)
buck_genrule(
name = name,
out = "image.btrfs",
type = _rule_type,
bash = image_utils.wrap_bash_build_in_common_boilerplate(
self_dependency = antlir_dep("bzl/image/package:btrfs"),
bash = '''
$(exe {package_btrfs}) \
--subvolumes-dir "$subvolumes_dir" \
--output-path "$OUT" \
--opts $(location :{opts_name})
'''.format(
package_btrfs = antlir_dep("package:btrfs"),
opts_name = opts_name,
),
rule_type = _rule_type,
target_name = name,
),
visibility = visibility,
labels = ["uses_sudo"] + (labels or []),
antlir_rule = antlir_rule,
)
btrfs = struct(
new = _new_btrfs,
opts = _btrfs_opts_api,
)
| 29.417476
| 85
| 0.610891
|
7c65d6bbab26ae7a08c1f7c7a15c03c97541492a
| 4,087
|
py
|
Python
|
2b.py
|
dzimiks/raf-ml-homework-1
|
34b3f1a56da402533ead80283c9d2c9c5dc1f593
|
[
"MIT"
] | null | null | null |
2b.py
|
dzimiks/raf-ml-homework-1
|
34b3f1a56da402533ead80283c9d2c9c5dc1f593
|
[
"MIT"
] | null | null | null |
2b.py
|
dzimiks/raf-ml-homework-1
|
34b3f1a56da402533ead80283c9d2c9c5dc1f593
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
# Pomocna funkcija koja od niza trening primera pravi feature matricu (m X n).
def create_feature_matrix(x, nb_features):
tmp_features = []
for deg in range(1, nb_features + 1):
tmp_features.append(np.power(x, deg))
return np.column_stack(tmp_features)
def polynomial_regression(input_data, nb_samples, nb_features, lambda_param, color):
# Restartuj graf
tf.reset_default_graph()
# Kreiranje feature matrice.
data = input_data.copy()
data['x'] = create_feature_matrix(data['x'], nb_features)
# Korak 2: Model.
X = tf.placeholder(shape=(None, nb_features), dtype=tf.float32, name='X')
Y = tf.placeholder(shape=None, dtype=tf.float32, name='Y')
w = tf.Variable(tf.zeros(nb_features), name='w')
bias = tf.Variable(0.0, name='bias')
w_col = tf.reshape(w, (nb_features, 1), name='w_col')
hyp = tf.add(tf.matmul(X, w_col), bias, name='hyp')
# Korak 3: Funkcija troška i optimizacija.
# L2 regularizacija.
Y_col = tf.reshape(Y, (-1, 1), name='Y_col')
Lambda = tf.constant(lambda_param, dtype=tf.float32, name='lambda')
loss = tf.add(tf.reduce_mean(tf.square(hyp - Y_col)), tf.multiply(Lambda, tf.nn.l2_loss(w)), name='loss')
# Prelazimo na AdamOptimizer jer se prost GradientDescent lose snalazi sa
# slozenijim funkcijama.
opt_op = tf.train.AdamOptimizer(name='opt_op').minimize(loss)
# Korak 4: Trening.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Izvršavamo 200 epoha treninga.
nb_epochs = 200
for epoch in range(nb_epochs):
# Stochastic Gradient Descent.
epoch_loss = 0
for sample in range(nb_samples):
feed = {X: data['x'][sample].reshape((1, nb_features)),
Y: data['y'][sample]}
_, curr_loss = sess.run([opt_op, loss], feed_dict=feed)
epoch_loss += curr_loss
# U svakoj desetoj epohi ispisujemo prosečan loss.
epoch_loss /= nb_samples
if (epoch + 1) % 10 == 0:
print('Epoch: {}/{} | Avg loss: {:.5f}'.format(epoch + 1, nb_epochs, epoch_loss))
# Ispisujemo i plotujemo finalnu vrednost parametara.
w_val = sess.run(w)
bias_val = sess.run(bias)
print('w =', w_val)
print('bias =', bias_val)
xs = create_feature_matrix(np.linspace(-2, 4, 100), nb_features)
hyp_val = sess.run(hyp, feed_dict={X: xs}) # Bez Y jer nije potrebno.
plt.plot(xs[:, 0].tolist(), hyp_val.tolist(), color=color)
plt.xlim([-2, 2])
plt.ylim([-3, 4])
return sess.run(loss, feed_dict={X: data['x'], Y: data['y']})
def main():
# Izbegavamo scientific notaciju i zaokruzujemo na 5 decimala.
np.set_printoptions(suppress=True, precision=5)
# Korak 1: Učitavanje i obrada podataka.
filename = 'data/funky.csv'
all_data = np.loadtxt(filename, delimiter=',', skiprows=0, usecols=(0, 1))
data = dict()
data['x'] = all_data[:, 0]
data['y'] = all_data[:, 1]
# Nasumično mešanje.
nb_samples = data['x'].shape[0]
indices = np.random.permutation(nb_samples)
data['x'] = data['x'][indices]
data['y'] = data['y'][indices]
# Normalizacija (obratiti pažnju na axis=0).
data['x'] = (data['x'] - np.mean(data['x'], axis=0)) / np.std(data['x'], axis=0)
data['y'] = (data['y'] - np.mean(data['y'])) / np.std(data['y'])
# Iscrtavanje podataka
plt.scatter(data['x'], data['y'])
plt.xlabel('X value')
plt.ylabel('Y value')
colors = ['g', 'm', 'b', 'r', 'k', 'y', 'c']
losses = []
i = 0
for lambda_param in [0, 0.001, 0.01, 0.1, 1, 10, 100]:
print('Polynomial regression for lambda = ', lambda_param)
loss = polynomial_regression(data, nb_samples, 3, lambda_param, colors[i])
losses.append(loss)
i += 1
print('loss = {:.5f}'.format(loss))
print('----------------------------')
plt.show()
plt.plot([0, 0.001, 0.01, 0.1, 1, 10, 100], losses, color='g')
plt.xlabel('Lambda')
plt.ylabel('Loss')
plt.show()
# Cuvamo graf kako bismo ga ucitali u TensorBoard
writer = tf.summary.FileWriter('logs/')
writer.add_graph(tf.get_default_graph())
writer.flush()
if __name__ == '__main__':
main()
# Najmanji loss se dobije kad je lambda = 0
# Kako raste lambda tako raste i loss
| 30.051471
| 106
| 0.666993
|
5443390da3009eb5d171b8f7ac521e3fe85ae651
| 5,325
|
py
|
Python
|
imcsdk/mometa/adaptor/AdaptorConnectorInfo.py
|
ragupta-git/ImcSdk
|
2e41f2ffe5282d38de85bc4739fa53dd2f0c9bb4
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/mometa/adaptor/AdaptorConnectorInfo.py
|
ragupta-git/ImcSdk
|
2e41f2ffe5282d38de85bc4739fa53dd2f0c9bb4
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/mometa/adaptor/AdaptorConnectorInfo.py
|
ragupta-git/ImcSdk
|
2e41f2ffe5282d38de85bc4739fa53dd2f0c9bb4
|
[
"Apache-2.0"
] | 3
|
2018-11-14T13:02:40.000Z
|
2018-11-14T13:49:38.000Z
|
"""This module contains the general information for AdaptorConnectorInfo ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class AdaptorConnectorInfoConsts:
pass
class AdaptorConnectorInfo(ManagedObject):
"""This is AdaptorConnectorInfo class."""
consts = AdaptorConnectorInfoConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("AdaptorConnectorInfo", "adaptorConnectorInfo", "connector-info", VersionMeta.Version204c, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'adaptorExtEthIf'], [], ["Get"]),
"modular": MoMeta("AdaptorConnectorInfo", "adaptorConnectorInfo", "connector-info", VersionMeta.Version303a, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'adaptorExtEthIf'], [], ["Get"])
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version204c, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version204c, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"part_number": MoPropertyMeta("part_number", "partNumber", "string", VersionMeta.Version204c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"part_revision": MoPropertyMeta("part_revision", "partRevision", "string", VersionMeta.Version204c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"present": MoPropertyMeta("present", "present", "string", VersionMeta.Version204c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version204c, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version204c, MoPropertyMeta.READ_ONLY, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"supported": MoPropertyMeta("supported", "supported", "string", VersionMeta.Version204c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version204c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version204c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
},
"modular": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version303a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"part_number": MoPropertyMeta("part_number", "partNumber", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"part_revision": MoPropertyMeta("part_revision", "partRevision", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"present": MoPropertyMeta("present", "present", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"supported": MoPropertyMeta("supported", "supported", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"vendor": MoPropertyMeta("vendor", "vendor", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"dn": "dn",
"partNumber": "part_number",
"partRevision": "part_revision",
"present": "present",
"rn": "rn",
"status": "status",
"supported": "supported",
"type": "type",
"vendor": "vendor",
},
"modular": {
"childAction": "child_action",
"dn": "dn",
"partNumber": "part_number",
"partRevision": "part_revision",
"present": "present",
"rn": "rn",
"status": "status",
"supported": "supported",
"type": "type",
"vendor": "vendor",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.part_number = None
self.part_revision = None
self.present = None
self.status = None
self.supported = None
self.type = None
self.vendor = None
ManagedObject.__init__(self, "AdaptorConnectorInfo", parent_mo_or_dn, **kwargs)
| 54.896907
| 207
| 0.612394
|
a36583e675c39cb825c3629d31bf9694901dda5d
| 3,472
|
py
|
Python
|
haproxyspoa/spoa_frame.py
|
krrg/haproxy-python-spoa
|
f8a3c4dcea1c0451683dbc89c035009911b234e2
|
[
"Apache-2.0"
] | 4
|
2021-04-06T01:46:58.000Z
|
2022-01-10T12:38:29.000Z
|
haproxyspoa/spoa_frame.py
|
krrg/haproxy-python-spoa
|
f8a3c4dcea1c0451683dbc89c035009911b234e2
|
[
"Apache-2.0"
] | null | null | null |
haproxyspoa/spoa_frame.py
|
krrg/haproxy-python-spoa
|
f8a3c4dcea1c0451683dbc89c035009911b234e2
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import io
from enum import IntEnum
from haproxyspoa.payloads.agent_hello import AgentHelloPayload
from haproxyspoa.spoa_data_types import parse_varint, write_varint
class FrameType(IntEnum):
FRAGMENT = 0
HAPROXY_HELLO = 1
HAPROXY_DISCONNECT = 2
HAPROXY_NOTIFY = 3
AGENT_HELLO = 101
AGENT_DISCONNECT = 102
ACK = 103
class FrameHeaders:
def __init__(
self,
frame_type: int,
flags: int,
stream_id: int,
frame_id: int,
):
self.type = frame_type
self.flags = flags
self.stream_id = stream_id
self.frame_id = frame_id
def is_fragmented_or_unset(self):
# Note: This implementation doesn't support fragmented frames, so
# if the frame is fragmented, we're toast.
return self.type == FrameType.FRAGMENT
def is_haproxy_hello(self):
return self.type == FrameType.HAPROXY_HELLO
def is_haproxy_disconnect(self):
return self.type == FrameType.HAPROXY_DISCONNECT
def is_haproxy_notify(self):
return self.type == FrameType.HAPROXY_NOTIFY
def is_agent_hello(self):
return self.type == FrameType.AGENT_HELLO
def is_agent_disconnect(self):
return self.type == FrameType.AGENT_DISCONNECT
def is_ack(self):
return self.type == FrameType.ACK
class Frame:
def __init__(
self,
frame_type: int,
flags: int,
stream_id: int,
frame_id: int,
payload: io.BytesIO,
):
self.headers = FrameHeaders(
frame_type,
flags,
stream_id,
frame_id
)
self.payload = payload
@staticmethod
async def read_frame(reader: asyncio.StreamReader):
frame_length = int.from_bytes(await reader.readexactly(4), byteorder='big', signed=False)
frame_bytes: bytes = await reader.readexactly(frame_length)
frame_bytesio = io.BytesIO(frame_bytes)
frame_type = int.from_bytes(frame_bytesio.read(1), byteorder='big', signed=False)
flags = int.from_bytes(frame_bytesio.read(4), byteorder='big', signed=False)
stream_id = parse_varint(frame_bytesio)
frame_id = parse_varint(frame_bytesio)
return Frame(
frame_type,
flags,
stream_id,
frame_id,
frame_bytesio
)
async def write_frame(self, writer: asyncio.StreamWriter):
header_buffer = io.BytesIO()
header_buffer.write(self.headers.type.to_bytes(1, byteorder='big'))
header_buffer.write(self.headers.flags.to_bytes(4, byteorder='big'))
header_buffer.write(write_varint(self.headers.stream_id))
header_buffer.write(write_varint(self.headers.frame_id))
frame_header_bytes = header_buffer.getvalue()
frame_payload_bytes = self.payload.getvalue()
frame_length = len(frame_header_bytes) + len(frame_payload_bytes)
writer.write(frame_length.to_bytes(4, byteorder='big'))
writer.write(frame_header_bytes)
writer.write(frame_payload_bytes)
await writer.drain()
class AgentHelloFrame(Frame):
def __init__(self, payload: AgentHelloPayload, flags: int = 1, stream_id: int = 0, frame_id: int = 0):
super().__init__(
FrameType.AGENT_HELLO,
flags,
stream_id,
frame_id,
io.BytesIO(payload.to_bytes())
)
| 28
| 107
| 0.644297
|
3e89689b6001e44408afa7f87a6be59929962a91
| 8,311
|
py
|
Python
|
barcode/codex.py
|
goflynn-123/python-barcode
|
83b7e212dd164ae1520ddeda9479f1f1ae9c2b80
|
[
"MIT"
] | null | null | null |
barcode/codex.py
|
goflynn-123/python-barcode
|
83b7e212dd164ae1520ddeda9479f1f1ae9c2b80
|
[
"MIT"
] | null | null | null |
barcode/codex.py
|
goflynn-123/python-barcode
|
83b7e212dd164ae1520ddeda9479f1f1ae9c2b80
|
[
"MIT"
] | null | null | null |
"""Module: barcode.codex
:Provided barcodes: Code 39, Code 128, PZN
"""
from barcode.base import Barcode
from barcode.charsets import code128, code39
from barcode.errors import (
BarcodeError,
IllegalCharacterError,
NumberOfDigitsError,
)
__docformat__ = 'restructuredtext en'
# Sizes
MIN_SIZE = 0.2
MIN_QUIET_ZONE = 2.54
def check_code(code, name, allowed):
wrong = []
for char in code:
if char not in allowed:
wrong.append(char)
if wrong:
raise IllegalCharacterError(
'The following characters are not valid for '
'{name}: {wrong}'.format(name=name, wrong=', '.join(wrong))
)
class Code39(Barcode):
r"""Initializes a new Code39 instance.
:parameters:
code : String
Code 39 string without \* and checksum (added automatically if
`add_checksum` is True).
writer : barcode.writer Instance
The writer to render the barcode (default: SVGWriter).
add_checksum : Boolean
Add the checksum to code or not (default: True).
"""
name = 'Code 39'
def __init__(self, code, writer=None, add_checksum=True):
self.code = code.upper()
if add_checksum:
self.code += self.calculate_checksum()
self.writer = writer or Barcode.default_writer()
check_code(self.code, self.name, code39.REF)
def __unicode__(self):
return self.code
__str__ = __unicode__
def get_fullcode(self):
return self.code
def calculate_checksum(self):
check = sum(code39.MAP[x][0] for x in self.code) % 43
for k, v in code39.MAP.items():
if check == v[0]:
return k
def build(self):
chars = [code39.EDGE]
for char in self.code:
chars.append(code39.MAP[char][1])
chars.append(code39.EDGE)
return [code39.MIDDLE.join(chars)]
def render(self, writer_options=None, text=None):
options = {'module_width': MIN_SIZE, 'quiet_zone': MIN_QUIET_ZONE}
options.update(writer_options or {})
return Barcode.render(self, options, text)
class PZN7(Code39):
"""Initializes new German number for pharmaceutical products.
:parameters:
pzn : String
Code to render.
writer : barcode.writer Instance
The writer to render the barcode (default: SVGWriter).
"""
name = 'Pharmazentralnummer'
digits = 6
def __init__(self, pzn, writer=None):
pzn = pzn[:self.digits]
if not pzn.isdigit():
raise IllegalCharacterError('PZN can only contain numbers.')
if len(pzn) != self.digits:
raise NumberOfDigitsError(
'PZN must have {0} digits, not '
'{1}.'.format(self.digits, len(pzn))
)
self.pzn = pzn
self.pzn = '{0}{1}'.format(pzn, self.calculate_checksum())
Code39.__init__(
self, 'PZN-{0}'.format(self.pzn), writer, add_checksum=False
)
def get_fullcode(self):
return 'PZN-{0}'.format(self.pzn)
def calculate_checksum(self):
sum_ = sum(int(x) * int(y) for x, y in enumerate(self.pzn, start=2))
checksum = sum_ % 11
if checksum == 10:
raise BarcodeError('Checksum can not be 10 for PZN.')
else:
return checksum
class PZN8(PZN7):
"""Will be fully added in v0.9."""
digits = 7
class Code128(Barcode):
"""Initializes a new Code128 instance. The checksum is added automatically
when building the bars.
:parameters:
code : String
Code 128 string without checksum (added automatically).
writer : barcode.writer Instance
The writer to render the barcode (default: SVGWriter).
"""
name = 'Code 128'
def __init__(self, code, writer=None):
self.code = code
self.writer = writer or Barcode.default_writer()
self._charset = 'B'
self._buffer = ''
check_code(self.code, self.name, code128.ALL)
def __unicode__(self):
return self.code
__str__ = __unicode__
@property
def encoded(self):
return self._build()
def get_fullcode(self):
return self.code
def _new_charset(self, which):
if which == 'A':
code = self._convert('TO_A')
elif which == 'B':
code = self._convert('TO_B')
elif which == 'C':
code = self._convert('TO_C')
self._charset = which
return [code]
def _maybe_switch_charset(self, pos):
char = self.code[pos]
next_ = self.code[pos:pos + 10]
def look_next():
digits = 0
for c in next_:
if c.isdigit():
digits += 1
else:
break
return digits > 3 and (digits % 2) == 0
codes = []
if self._charset == 'C' and not char.isdigit():
if char in code128.B:
codes = self._new_charset('B')
elif char in code128.A:
codes = self._new_charset('A')
if len(self._buffer) == 1:
codes.append(self._convert(self._buffer[0]))
self._buffer = ''
elif self._charset == 'B':
if look_next():
codes = self._new_charset('C')
elif char not in code128.B:
if char in code128.A:
codes = self._new_charset('A')
elif self._charset == 'A':
if look_next():
codes = self._new_charset('C')
elif char not in code128.A:
if char in code128.B:
codes = self._new_charset('B')
return codes
def _convert(self, char):
if self._charset == 'A':
return code128.A[char]
elif self._charset == 'B':
return code128.B[char]
elif self._charset == 'C':
if char in code128.C:
return code128.C[char]
elif char.isdigit():
self._buffer += char
if len(self._buffer) == 2:
value = int(self._buffer)
self._buffer = ''
return value
def _try_to_optimize(self, encoded):
if encoded[1] in code128.TO:
encoded[:2] = [code128.TO[encoded[1]]]
return encoded
def _calculate_checksum(self, encoded):
cs = [encoded[0]]
for i, code_num in enumerate(encoded[1:], start=1):
cs.append(i * code_num)
return sum(cs) % 103
def _build(self):
encoded = [code128.START_CODES[self._charset]]
for i, char in enumerate(self.code):
encoded.extend(self._maybe_switch_charset(i))
code_num = self._convert(char)
if code_num is not None:
encoded.append(code_num)
# Finally look in the buffer
if len(self._buffer) == 1:
encoded.extend(self._new_charset('B'))
encoded.append(self._convert(self._buffer[0]))
self._buffer = ''
encoded = self._try_to_optimize(encoded)
return encoded
def build(self):
encoded = self._build()
encoded.append(self._calculate_checksum(encoded))
code = ''
for code_num in encoded:
code += code128.CODES[code_num]
code += code128.STOP
code += '11'
return [code]
def render(self, writer_options=None, text=None):
options = {'module_width': MIN_SIZE, 'quiet_zone': MIN_QUIET_ZONE}
options.update(writer_options or {})
return Barcode.render(self, options, text)
class Gs1_128(Code128):
"""
following the norm, a gs1-128 barcode is a subset of code 128 barcode,
it can be generated by prepending the code with the FNC1 character
https://en.wikipedia.org/wiki/GS1-128
https://www.gs1-128.info/
"""
name = 'GS1-128'
FNC1_CHAR = '\xf1'
def __init__(self, code, writer=None):
code = self.FNC1_CHAR + code
super(Gs1_128, self).__init__(code, writer)
def get_fullcode(self):
return super(Gs1_128, self).get_fullcode()[1:]
# For pre 0.8 compatibility
PZN = PZN7
| 29.059441
| 78
| 0.56732
|
dd11211e3c4712c7f6079abb1970c652430a34c6
| 1,141
|
py
|
Python
|
core/controllers/pages_test.py
|
bching/oppia
|
9e9b6d756859b8bc1e46f88a1be8736f8398a8d8
|
[
"Apache-2.0"
] | 1
|
2017-11-30T02:16:01.000Z
|
2017-11-30T02:16:01.000Z
|
core/controllers/pages_test.py
|
bching/oppia
|
9e9b6d756859b8bc1e46f88a1be8736f8398a8d8
|
[
"Apache-2.0"
] | 1
|
2020-01-26T14:02:43.000Z
|
2020-01-26T14:02:43.000Z
|
core/controllers/pages_test.py
|
bching/oppia
|
9e9b6d756859b8bc1e46f88a1be8736f8398a8d8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for various static pages (like the About page)."""
from core.tests import test_utils
class NoninteractivePagesTest(test_utils.GenericTestBase):
def test_about_page(self):
"""Test the About page."""
response = self.testapp.get('/about')
self.assertEqual(response.status_int, 200)
self.assertEqual(response.content_type, 'text/html')
response.mustcontain(
'I18N_ABOUT_PAGE_CREDITS_TAB_HEADING',
'I18N_ABOUT_PAGE_FOUNDATION_TAB_PARAGRAPH_5_LICENSE_HEADING')
| 38.033333
| 74
| 0.737073
|
88ae1ef1c8f2e39620c1c6207d3b1565f5a399a3
| 19,319
|
py
|
Python
|
dxm/lib/DxJobs/DxJob.py
|
arunbsar/dxm-toolkit
|
4633d322bdebe2d75e10d1e7cd28b5a7caed2ce2
|
[
"Apache-2.0"
] | null | null | null |
dxm/lib/DxJobs/DxJob.py
|
arunbsar/dxm-toolkit
|
4633d322bdebe2d75e10d1e7cd28b5a7caed2ce2
|
[
"Apache-2.0"
] | null | null | null |
dxm/lib/DxJobs/DxJob.py
|
arunbsar/dxm-toolkit
|
4633d322bdebe2d75e10d1e7cd28b5a7caed2ce2
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
# Author : Marcin Przepiorowski
# Date : April 2018
import logging
import time
import pytz
from tqdm import tqdm
from dxm.lib.DxLogging import print_error
from dxm.lib.DxLogging import print_message
import dxm.lib.DxJobs.DxJobCounter
from dxm.lib.DxTools.DxTools import paginator
class DxJob(object):
def __init__(self, engine, execList):
"""
Constructor
:param1 engine: DxMaskingEngine object
:param2 execList: list of job executions
"""
#MaskingJob.__init__(self)
self.__engine = engine
self.__execList = execList
self.__logger = logging.getLogger()
self.__logger.debug("creating DxJob object")
self.__monitor = False
if (self.__engine.version_ge('6.0.0')):
from masking_api_60.models.masking_job import MaskingJob
from masking_api_60.api.masking_job_api import MaskingJobApi
from masking_api_60.api.execution_api import ExecutionApi
from masking_api_60.models.execution import Execution
from masking_api_60.api.execution_component_api import ExecutionComponentApi
from masking_api_60.rest import ApiException
else:
from masking_api_53.models.masking_job import MaskingJob
from masking_api_53.api.masking_job_api import MaskingJobApi
from masking_api_53.api.execution_api import ExecutionApi
from masking_api_53.models.execution import Execution
from masking_api_53.api.execution_component_api import ExecutionComponentApi
from masking_api_53.rest import ApiException
self.__api = MaskingJobApi
self.__model = MaskingJob
self.__apiexec = ExecutionApi
self.__apicomponent = ExecutionComponentApi
self.__modelexec = Execution
self.__apiexc = ApiException
self.__obj = None
@property
def monitor(self):
return self.__monitor
@monitor.setter
def monitor(self, value):
self.__monitor = value
@property
def lastExec(self):
if self.__execList:
return self.__execList[-1]
else:
return None
@property
def execList(self):
return self.__execList
@property
def obj(self):
if self.__obj is not None:
return self.__obj
else:
return None
@property
def email(self):
if self.obj is not None:
return self.obj.email
else:
return None
@email.setter
def email(self, email):
if self.__obj is not None:
self.__obj.email = email
else:
raise ValueError("Object needs to be initialized first")
@property
def max_memory(self):
if self.obj is not None:
return self.obj.max_memory
else:
return None
@max_memory.setter
def max_memory(self, max_memory):
if self.__obj is not None:
self.__obj.max_memory = max_memory
else:
raise ValueError("Object needs to be initialized first")
@property
def min_memory(self):
if self.obj is not None:
return self.obj.min_memory
else:
return None
@min_memory.setter
def min_memory(self, min_memory):
if self.__obj is not None:
self.__obj.min_memory = min_memory
else:
raise ValueError("Object needs to be initialized first")
@property
def num_input_streams(self):
if self.obj is not None:
return self.obj.num_input_streams
else:
return None
@num_input_streams.setter
def num_input_streams(self, num_input_streams):
if self.__obj is not None:
self.__obj.num_input_streams = num_input_streams
else:
raise ValueError("Object needs to be initialized first")
@property
def multi_tenant(self):
if self.obj is not None:
return self.obj.multi_tenant
else:
return None
@multi_tenant.setter
def multi_tenant(self, multi_tenant):
if self.__obj is not None:
self.__obj.multi_tenant = multi_tenant
else:
raise ValueError("Object needs to be initialized first")
@property
def feedback_size(self):
if self.obj is not None:
return self.obj.feedback_size
else:
return None
@feedback_size.setter
def feedback_size(self, feedback_size):
if self.__obj is not None:
self.__obj.feedback_size = feedback_size
else:
raise ValueError("Object needs to be initialized first")
@property
def on_the_fly_masking(self):
if self.obj is not None:
return self.obj.on_the_fly_masking
else:
return None
@on_the_fly_masking.setter
def on_the_fly_masking(self, on_the_fly_masking):
if self.__obj is not None:
self.__obj.on_the_fly_masking = on_the_fly_masking
else:
raise ValueError("Object needs to be initialized first")
@property
def on_the_fly_masking_source(self):
if self.obj is not None:
return self.obj.on_the_fly_masking_source
else:
return None
@on_the_fly_masking_source.setter
def on_the_fly_masking_source(self, on_the_fly_masking_source):
if self.__obj is not None:
self.__obj.on_the_fly_masking_source = on_the_fly_masking_source
else:
raise ValueError("Object needs to be initialized first")
@property
def database_masking_options(self):
if self.obj is not None:
return self.obj.database_masking_options
else:
return None
@database_masking_options.setter
def database_masking_options(self, database_masking_options):
if self.__obj is not None:
self.__obj.database_masking_options = database_masking_options
else:
raise ValueError("Object needs to be initialized first")
@property
def job_description(self):
if self.obj is not None:
return self.obj.job_description
else:
return None
@job_description.setter
def job_description(self, job_description):
if self.__obj is not None:
self.__obj.job_description = job_description
else:
raise ValueError("Object needs to be initialized first")
@property
def job_name(self):
if self.obj is not None:
return self.obj.job_name
else:
return None
@job_name.setter
def job_name(self, job_name):
if self.__obj is not None:
self.__obj.job_name = job_name
else:
raise ValueError("Object needs to be initialized first")
@property
def ruleset_id(self):
if self.obj is not None:
return self.obj.ruleset_id
else:
return None
@ruleset_id.setter
def ruleset_id(self, ruleset_id):
if self.__obj is not None:
self.__obj.ruleset_id = ruleset_id
else:
raise ValueError("Object needs to be initialized first")
@property
def ruleset_type(self):
if self.obj is not None:
if hasattr(self.obj, "ruleset_type"):
return self.obj.ruleset_type
else:
return "N/A"
else:
return None
@ruleset_type.setter
def ruleset_type(self, ruleset_type):
if self.__obj is not None:
self.__obj.ruleset_type = ruleset_type
else:
raise ValueError("Object needs to be initialized first")
@property
def masking_job_id(self):
if self.obj is not None:
return self.obj.masking_job_id
else:
return None
@masking_job_id.setter
def masking_job_id(self, masking_job_id):
if self.__obj is not None:
self.__obj.masking_job_id = masking_job_id
else:
raise ValueError("Object needs to be initialized first")
def from_job(self, job):
self.__obj = job
def create_job(self, job_name, ruleset_id):
"""
Create an connector object
:param connector_name
:param database_type
:param environment_id
"""
self.__obj = self.__model(job_name=job_name, ruleset_id=ruleset_id)
def add(self):
"""
Add job to Masking engine and print status message
return a None if non error
return 1 in case of error
"""
if (self.obj.job_name is None):
print_error("Job name is required")
self.__logger.error("Job name is required")
return 1
if (self.ruleset_id is None):
print_error("ruleset_id is required")
self.__logger.error("ruleset_id is required")
return 1
try:
self.__logger.debug("create job input %s" % str(self.obj))
api_instance = self.__api(self.__engine.api_client)
self.__logger.debug("API instance created")
response = api_instance.create_masking_job(self.obj)
self.from_job(response)
self.__logger.debug("job response %s"
% str(response))
print_message("Job %s added" % self.job_name)
return 0
except self.__apiexc as e:
print_error(e.body)
self.__logger.error(e)
return 1
def delete(self):
"""
Delete job from Engine
return a 0 if non error
return 1 in case of error
"""
try:
api_instance = self.__api(self.__engine.api_client)
response = api_instance.delete_masking_job(
self.obj.masking_job_id,
_request_timeout=self.__engine.get_timeout())
self.__logger.debug("job response %s"
% str(response))
print_message("Job %s deleted" % self.job_name)
return 0
except self.__apiexc as e:
print_error(e.body)
self.__logger.error(e)
return 1
def update(self):
"""
Update job in Engine
return a 0 if non error
return 1 in case of error
"""
try:
api_instance = self.__api(self.__engine.api_client)
self.__logger.debug("update job request %s"
% str(self.obj))
response = api_instance.update_masking_job(
self.obj.masking_job_id,
self.obj,
_request_timeout=self.__engine.get_timeout())
self.__logger.debug("job response %s"
% str(response))
print_message("Job %s updated" % self.job_name)
return 0
except self.__apiexc as e:
print_error(e.body)
self.__logger.error(e)
return 1
def cancel(self):
"""
Cancel running job in Engine
return a 0 if non error
return 1 in case of error
"""
try:
execid = self.__lastExec.execution_id
exec_api = self.__apiexec(self.__engine.api_client)
self.__logger.debug("Stopping execution %s" % str(execid))
execjob = exec_api.cancel_execution(execid)
self.__logger.debug("Stopping execution response %s" % str(execjob))
while execjob.status == 'RUNNING':
time.sleep(1)
execjob = exec_api.get_execution_by_id(execid)
print_message(execjob)
return 0
except self.__apiexc as e:
print_error(e.body)
self.__logger.error(e)
return 1
def start(self, target_connector_id, source_connector_id, nowait, posno,
lock):
"""
Start masking job
:param1 target_connector_id: target connector id for multinentant
:param2 source_connector_id: source connector for on the fly job
:param3 wait_for_finish: wait for job to finish
Return 0 if job started and finished OK, or was in nowait
Return 1 if errors
"""
exec_api = self.__apiexec(self.__engine.api_client)
execjob = self.__modelexec(job_id = self.masking_job_id)
if (self.multi_tenant):
# target is mandatory
if target_connector_id:
execjob.target_connector_id = target_connector_id
else:
print_error("Target connector is required for multitenant job")
return 1
if (self.on_the_fly_masking):
if not self.on_the_fly_masking_source:
if source_connector_id:
execjob.source_connector_id = source_connector_id
else:
print_error(
"Source connector is required for on the fly job")
return 1
try:
self.__logger.debug("start job input %s" % str(execjob))
response = exec_api.create_execution(
execjob,
_request_timeout=self.__engine.get_timeout())
self.__logger.debug("start job response %s"
% str(response))
if nowait:
return 0
else:
return self.wait_for_job(response, posno, lock)
except self.__apiexc as e:
print_error(e.body)
self.__logger.error(e)
lock.acquire()
dxm.lib.DxJobs.DxJobCounter.ret = \
dxm.lib.DxJobs.DxJobCounter.ret + 1
lock.release()
self.__logger.error('return value %s'
% dxm.lib.DxJobs.DxJobCounter.ret)
return 1
def wait_for_job(self, execjob, posno, lock):
"""
Wait for job to finish execution
:param1 execjob: Execution job response
Return 0 finished OK
Return 1 if errors
"""
execid = execjob.execution_id
first = True
bar = None
exec_api = self.__apiexec(self.__engine.api_client)
last = 0
self.__logger.debug('Waiting for job %s to start processing rows'
% self.job_name)
if not self.monitor:
print_message('Waiting for job %s to start processing rows'
% self.job_name)
while execjob.status == 'RUNNING':
time.sleep(10)
execjob = exec_api.get_execution_by_id(execid)
if first and (execjob.rows_total is not None):
first = False
if self.monitor and (bar is None):
bar = tqdm(
total=execjob.rows_total,
desc=self.job_name,
position=posno,
bar_format="{desc}: {percentage:3.0f}%|{bar}|"
" {n_fmt}/{total_fmt}")
else:
print_message('Job %s is processing rows'
% self.job_name)
if execjob.rows_masked is not None:
if self.monitor and (bar is not None):
self.__logger.debug(execjob.rows_masked)
self.__logger.debug(last)
step = execjob.rows_masked-last
# if step == 0:
# step = 1
self.__logger.debug(step)
bar.update(step)
last = execjob.rows_masked
if execjob.status == 'SUCCEEDED':
if not self.monitor:
print_message('Masking job %s finished.' % self.job_name)
print_message('%s rows masked' % (execjob.rows_masked or 0))
else:
if bar:
bar.close()
self.__logger.debug('Masking job %s finished' % self.job_name)
self.__logger.debug('%s rows masked' % execjob.rows_masked)
return 0
else:
if not self.monitor:
print_error('Problem with masking job %s' % self.job_name)
print_error('%s rows masked' % (execjob.rows_masked or 0))
else:
if bar:
bar.close()
self.__logger.error('Problem with masking job %s'
% self.job_name)
self.__logger.error('%s rows masked' % execjob.rows_masked)
lock.acquire()
dxm.lib.DxJobs.DxJobCounter.ret = \
dxm.lib.DxJobs.DxJobCounter.ret + 1
lock.release()
self.__logger.error('return value %s'
% dxm.lib.DxJobs.DxJobCounter.ret)
return 1
def filter_executions(self, startdate, enddate):
"""
Filter job executions using start and end date parameters
:param1 startdate: start date
:param2 enddate: end date
Return a list of filtered executions
"""
if startdate:
startdate_tz = startdate.replace(tzinfo=pytz.UTC)
if enddate:
enddate_tz = enddate.replace(tzinfo=pytz.UTC)
if self.execList:
execlist = [ x for x in self.execList if ((startdate is None) or (x.start_time >= startdate_tz)) and ((enddate is None) or (x.end_time <= enddate_tz)) ]
else:
execlist = self.execList
return execlist
def list_execution_component(self, execid):
"""
List an execution detalis ( tables, rows, etc)
:param1 execid: execution id to display
return a None if non error
return 1 in case of error
"""
if (execid is None):
print_error("Execution id is required")
self.__logger.error("Execution id is required")
return 1
try:
self.__logger.debug("execute component")
api_instance = self.__apicomponent(self.__engine.api_client)
execomponents = paginator(
api_instance,
"get_all_execution_components",
execution_id=execid,
_request_timeout=self.__engine.get_timeout())
return execomponents.response_list
except self.__apiexc as e:
print_error(e.body)
self.__logger.error(e)
return None
| 31.985099
| 164
| 0.579274
|
148b6e19be4eaeb1b14b5931695f878ade352799
| 3,387
|
py
|
Python
|
discordSuperUtils/economy.py
|
MG-LSJ/discord-super-utils
|
6434b77f20b1d69a06ac4e9e450ce8d21369fd9f
|
[
"MIT"
] | 1
|
2022-01-10T08:56:18.000Z
|
2022-01-10T08:56:18.000Z
|
discordSuperUtils/economy.py
|
MG-LSJ/discord-super-utils
|
6434b77f20b1d69a06ac4e9e450ce8d21369fd9f
|
[
"MIT"
] | null | null | null |
discordSuperUtils/economy.py
|
MG-LSJ/discord-super-utils
|
6434b77f20b1d69a06ac4e9e450ce8d21369fd9f
|
[
"MIT"
] | 1
|
2021-12-23T17:18:10.000Z
|
2021-12-23T17:18:10.000Z
|
import discord
from typing import List, Optional
from .base import DatabaseChecker
class EconomyAccount:
def __init__(self, guild: int, member: int, database, table):
self.guild = guild
self.member = member
self.database = database
self.table = table
def __str__(self):
return f"<Account MEMBER={self.member}, GUILD={self.guild}>"
@property
def __checks(self):
return EconomyManager.generate_checks(self.guild, self.member)
async def currency(self):
currency_data = await self.database.select(
self.table, ["currency"], self.__checks
)
return currency_data["currency"]
async def bank(self):
bank_data = await self.database.select(self.table, ["bank"], self.__checks)
return bank_data["bank"]
async def net(self):
return await self.bank() + await self.currency()
async def change_currency(self, amount: int):
currency = await self.currency()
await self.database.update(
self.table, {"currency": currency + amount}, self.__checks
)
async def change_bank(self, amount: int):
bank_amount = await self.bank()
await self.database.update(
self.table, {"bank": bank_amount + amount}, self.__checks
)
class EconomyManager(DatabaseChecker):
def __init__(self, bot):
super().__init__(
[
{
"guild": "snowflake",
"member": "snowflake",
"currency": "snowflake",
"bank": "snowflake",
}
],
["economy"],
)
self.bot = bot
@staticmethod
def generate_checks(guild: int, member: int):
return {"guild": guild, "member": member}
async def create_account(self, member: discord.Member) -> None:
self._check_database()
await self.database.insertifnotexists(
self.tables["economy"],
{"guild": member.guild.id, "member": member.id, "currency": 0, "bank": 0},
self.generate_checks(member.guild.id, member.id),
)
async def get_account(self, member: discord.Member) -> Optional[EconomyAccount]:
self._check_database()
member_data = await self.database.select(
self.tables["economy"],
[],
self.generate_checks(member.guild.id, member.id),
True,
)
if member_data:
return EconomyAccount(
member.guild.id, member.id, self.database, self.tables["economy"]
)
return None
async def get_leaderboard(self, guild) -> List[EconomyAccount]:
self._check_database()
guild_info = await self.database.select(
self.tables["economy"], [], {"guild": guild.id}, True
)
members = [
EconomyAccount(
member_info["guild"],
member_info["member"],
database=self.database,
table=self.tables["economy"],
)
for member_info in sorted(
guild_info, key=lambda x: x["bank"] + x["currency"], reverse=True
)
]
return members
| 30.513514
| 87
| 0.542958
|
6b5ed27232975cb2be59d057d3e3924c918439df
| 260
|
py
|
Python
|
recipe/import_test.py
|
regro-cf-autotick-bot/yeadon-feedstock
|
d7afede10dae5a3a7394797710ade91df029d3c1
|
[
"BSD-3-Clause"
] | 22
|
2015-01-24T20:22:42.000Z
|
2022-02-06T18:07:57.000Z
|
recipe/import_test.py
|
regro-cf-autotick-bot/yeadon-feedstock
|
d7afede10dae5a3a7394797710ade91df029d3c1
|
[
"BSD-3-Clause"
] | 19
|
2015-02-09T22:33:12.000Z
|
2021-02-09T17:41:01.000Z
|
recipe/import_test.py
|
regro-cf-autotick-bot/yeadon-feedstock
|
d7afede10dae5a3a7394797710ade91df029d3c1
|
[
"BSD-3-Clause"
] | 5
|
2015-11-10T13:45:48.000Z
|
2018-07-28T15:36:38.000Z
|
import yeadon
import yeadon.exceptions
import yeadon.human
import yeadon.inertia
import yeadon.segment
import yeadon.solid
import yeadon.ui
import yeadon.utils
import yeadon.tests
try:
import yeadon.gui
except ImportError: # mayavi not installed
pass
| 18.571429
| 43
| 0.811538
|
fa8292c59c2f8ef981ac6b326fcd406cef6d7061
| 2,727
|
py
|
Python
|
fastestimator/backend/_permute.py
|
DwijayDS/fastestimator
|
9b288cb2bd870f971ec4cee09d0b3205e1316a94
|
[
"Apache-2.0"
] | 57
|
2019-05-21T21:29:26.000Z
|
2022-02-23T05:55:21.000Z
|
fastestimator/backend/permute.py
|
vbvg2008/fastestimator
|
6061a4fbbeb62a2194ef82ba8017f651710d0c65
|
[
"Apache-2.0"
] | 93
|
2019-05-23T18:36:07.000Z
|
2022-03-23T17:15:55.000Z
|
fastestimator/backend/permute.py
|
vbvg2008/fastestimator
|
6061a4fbbeb62a2194ef82ba8017f651710d0c65
|
[
"Apache-2.0"
] | 47
|
2019-05-09T15:41:37.000Z
|
2022-03-26T17:00:08.000Z
|
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import List, TypeVar
import numpy as np
import tensorflow as tf
import torch
Tensor = TypeVar('Tensor', tf.Tensor, torch.Tensor, np.ndarray)
def permute(tensor: Tensor, permutation: List[int]) -> Tensor:
"""Perform the specified `permutation` on the axes of a given `tensor`.
This method can be used with Numpy data:
```python
n = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]]])
b = fe.backend.permute(n, [2, 0, 1]) # [[[0, 2], [4, 6], [8, 10]], [[1, 3], [5, 7], [9, 11]]]
b = fe.backend.permute(n, [0, 2, 1]) # [[[0, 2], [1, 3]], [[4, 6], [5, 7]], [[8, 10], [9, 11]]]
```
This method can be used with TensorFlow tensors:
```python
t = tf.constant([[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]]])
b = fe.backend.permute(t, [2, 0, 1]) # [[[0, 2], [4, 6], [8, 10]], [[1, 3], [5, 7], [9, 11]]]
b = fe.backend.permute(t, [0, 2, 1]) # [[[0, 2], [1, 3]], [[4, 6], [5, 7]], [[8, 10], [9, 11]]]
```
This method can be used with PyTorch tensors:
```python
p = torch.tensor([[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8, 9], [10, 11]]])
b = fe.backend.permute(p, [2, 0, 1]) # [[[0, 2], [4, 6], [8, 10]], [[1, 3], [5, 7], [9, 11]]]
b = fe.backend.permute(P, [0, 2, 1]) # [[[0, 2], [1, 3]], [[4, 6], [5, 7]], [[8, 10], [9, 11]]]
```
Args:
tensor: The tensor to permute.
permutation: The new axis order to be used. Should be a list containing all integers in range [0, tensor.ndim).
Returns:
The `tensor` with axes swapped according to the `permutation`.
Raises:
ValueError: If `tensor` is an unacceptable data type.
"""
if tf.is_tensor(tensor):
return tf.transpose(tensor, perm=permutation)
elif isinstance(tensor, torch.Tensor):
return tensor.permute(*permutation)
elif isinstance(tensor, np.ndarray):
return np.transpose(tensor, axes=permutation)
else:
raise ValueError("Unrecognized tensor type {}".format(type(tensor)))
| 41.318182
| 119
| 0.571324
|
f55f5886288d2e6b9f727aba41c205793f086b90
| 1,052
|
py
|
Python
|
bc/search/tests/utils.py
|
Buckinghamshire-Digital-Service/buckinghamshire-council
|
bbbdb52b515bcdfc79a2bd9198dfa4828405370e
|
[
"BSD-3-Clause"
] | 1
|
2021-02-27T07:27:17.000Z
|
2021-02-27T07:27:17.000Z
|
bc/search/tests/utils.py
|
Buckinghamshire-Digital-Service/buckinghamshire-council
|
bbbdb52b515bcdfc79a2bd9198dfa4828405370e
|
[
"BSD-3-Clause"
] | null | null | null |
bc/search/tests/utils.py
|
Buckinghamshire-Digital-Service/buckinghamshire-council
|
bbbdb52b515bcdfc79a2bd9198dfa4828405370e
|
[
"BSD-3-Clause"
] | 1
|
2021-06-09T15:56:54.000Z
|
2021-06-09T15:56:54.000Z
|
from django.conf import settings
from django.core.management import call_command
from wagtail.search.backends import get_search_backend
ORIGINAL_INDEX_NAME = settings.WAGTAILSEARCH_BACKENDS["default"].get("INDEX")
def update_search_index():
call_command("update_index")
def is_elasticsearch_backend(backend):
return hasattr(backend, "es")
def get_index_name_for_test():
if ORIGINAL_INDEX_NAME:
return "test_" + ORIGINAL_INDEX_NAME
def delete_test_indices_from_elasticsearch():
backend = get_search_backend()
if is_elasticsearch_backend(backend):
test_indices = backend.es.indices.get(get_index_name_for_test() + "*")
for test_index in test_indices.keys():
backend.es.indices.delete(test_index)
def get_search_settings_for_test():
search_backend_settings = settings.WAGTAILSEARCH_BACKENDS
backend = get_search_backend()
if is_elasticsearch_backend(backend):
search_backend_settings["default"]["INDEX"] = get_index_name_for_test()
return search_backend_settings
| 29.222222
| 79
| 0.76711
|
b27da22598bc5f6b159452a1c5ab4a5f3fd53a6e
| 2,735
|
py
|
Python
|
lib/progress.py
|
re-knownout/video-compressor
|
708bbf813e73b8c72f849ccda567c754fdcbfc2b
|
[
"MIT"
] | 1
|
2022-02-28T07:07:58.000Z
|
2022-02-28T07:07:58.000Z
|
lib/progress.py
|
re-knownout/video-compressor
|
708bbf813e73b8c72f849ccda567c754fdcbfc2b
|
[
"MIT"
] | null | null | null |
lib/progress.py
|
re-knownout/video-compressor
|
708bbf813e73b8c72f849ccda567c754fdcbfc2b
|
[
"MIT"
] | null | null | null |
import math
from typing import Callable
class Progress:
"""
Class for creating simple progress bars
"""
_disable_unicode: bool
_tiles: int
_target: str
_percents: bool
_callback: Callable
def __init__(self, tiles=20, target="", percents=False, callback: Callable = None):
self._tiles = tiles
self._target = target
self._percents = percents
self._callback = callback
@staticmethod
def _print(*string: str):
"""
Internal method for printing stripped tuples without new line
:param string: values to be printed
:return: void
"""
print(" ".join(string).strip(), end="")
def _render(self, progress: float, info: str = ""):
"""
Internal method for rendering progress bar
:param progress: current progress percent
:param info: progress info
:return: self
"""
tiles = (progress * self._tiles) / 100
self._print("[")
for i in range(self._tiles):
print(("-" if self._disable_unicode else "▪") if i == math.ceil(tiles) else (
"=" if self._disable_unicode else "■") if i < tiles else " ", end="")
self._print("]", "".join(["{:.2f}".format(round(progress, 2)) + "%" if self._percents else "", info]))
# Check if completed and has callback
if tiles == self._tiles and self._callback:
# Clean console before next output
print("\r", end="")
for i in range(240):
self._print(" ")
print("\r", end="")
self._callback()
return self
def show(self, info: str = ""):
"""
Method for first progress bar rendering
:param info: progress info
:return: self
"""
print(self._target, end="\n")
return self._render(0, info)
def update(self, progress: float, info: str = ""):
"""
Method for updating progress bar without re-render
:param progress: current progress percent
:param info: progress info
:return: self
"""
# Clean console before next output
print("\r", end="")
for i in range(240):
self._print(" ")
print("\r", end="")
self._render(progress, info)
print("\r", end="")
return self
def set_callback(self, callback: Callable):
"""
Method for updating callback (fires when progress get to 100 percents)
:param callback: Callable
:return:
"""
self._callback = callback
def set_disable_unicode(self, disable_unicode: bool):
self._disable_unicode = disable_unicode
| 28.195876
| 110
| 0.559415
|
2b4c17991b6d82d43a33f32339ab4b951e793f12
| 908
|
py
|
Python
|
kubernetes_typed/client/models/v1_node_status.py
|
nikhiljha/kubernetes-typed
|
4f4b969aa400c88306f92560e56bda6d19b2a895
|
[
"Apache-2.0"
] | 22
|
2020-12-10T13:06:02.000Z
|
2022-02-13T21:58:15.000Z
|
kubernetes_typed/client/models/v1_node_status.py
|
nikhiljha/kubernetes-typed
|
4f4b969aa400c88306f92560e56bda6d19b2a895
|
[
"Apache-2.0"
] | 4
|
2021-03-08T07:06:12.000Z
|
2022-03-29T23:41:45.000Z
|
kubernetes_typed/client/models/v1_node_status.py
|
nikhiljha/kubernetes-typed
|
4f4b969aa400c88306f92560e56bda6d19b2a895
|
[
"Apache-2.0"
] | 2
|
2021-09-05T19:18:28.000Z
|
2022-03-14T02:56:17.000Z
|
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V1NodeStatusDict generated type."""
from typing import TypedDict, Dict, List
from kubernetes_typed.client import (
V1AttachedVolumeDict,
V1ContainerImageDict,
V1NodeAddressDict,
V1NodeConditionDict,
V1NodeConfigStatusDict,
V1NodeDaemonEndpointsDict,
V1NodeSystemInfoDict,
)
V1NodeStatusDict = TypedDict(
"V1NodeStatusDict",
{
"addresses": List[V1NodeAddressDict],
"allocatable": Dict[str, str],
"capacity": Dict[str, str],
"conditions": List[V1NodeConditionDict],
"config": V1NodeConfigStatusDict,
"daemonEndpoints": V1NodeDaemonEndpointsDict,
"images": List[V1ContainerImageDict],
"nodeInfo": V1NodeSystemInfoDict,
"phase": str,
"volumesAttached": List[V1AttachedVolumeDict],
"volumesInUse": List[str],
},
total=False,
)
| 28.375
| 54
| 0.680617
|
04a29ef4bdb3574dd1a751a2cdae2d7ef626cadf
| 5,354
|
py
|
Python
|
isi_sdk_8_0/isi_sdk_8_0/models/snapshot_lock_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_0/isi_sdk_8_0/models/snapshot_lock_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_0/isi_sdk_8_0/models/snapshot_lock_extended.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SnapshotLockExtended(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'comment': 'str',
'count': 'int',
'expires': 'int',
'id': 'int'
}
attribute_map = {
'comment': 'comment',
'count': 'count',
'expires': 'expires',
'id': 'id'
}
def __init__(self, comment=None, count=None, expires=None, id=None): # noqa: E501
"""SnapshotLockExtended - a model defined in Swagger""" # noqa: E501
self._comment = None
self._count = None
self._expires = None
self._id = None
self.discriminator = None
if comment is not None:
self.comment = comment
if count is not None:
self.count = count
if expires is not None:
self.expires = expires
if id is not None:
self.id = id
@property
def comment(self):
"""Gets the comment of this SnapshotLockExtended. # noqa: E501
User supplied lock comment. # noqa: E501
:return: The comment of this SnapshotLockExtended. # noqa: E501
:rtype: str
"""
return self._comment
@comment.setter
def comment(self, comment):
"""Sets the comment of this SnapshotLockExtended.
User supplied lock comment. # noqa: E501
:param comment: The comment of this SnapshotLockExtended. # noqa: E501
:type: str
"""
self._comment = comment
@property
def count(self):
"""Gets the count of this SnapshotLockExtended. # noqa: E501
Recursive lock count. # noqa: E501
:return: The count of this SnapshotLockExtended. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this SnapshotLockExtended.
Recursive lock count. # noqa: E501
:param count: The count of this SnapshotLockExtended. # noqa: E501
:type: int
"""
self._count = count
@property
def expires(self):
"""Gets the expires of this SnapshotLockExtended. # noqa: E501
The Unix Epoch time the snapshot lock will expire and be eligible for automatic deletion. # noqa: E501
:return: The expires of this SnapshotLockExtended. # noqa: E501
:rtype: int
"""
return self._expires
@expires.setter
def expires(self, expires):
"""Sets the expires of this SnapshotLockExtended.
The Unix Epoch time the snapshot lock will expire and be eligible for automatic deletion. # noqa: E501
:param expires: The expires of this SnapshotLockExtended. # noqa: E501
:type: int
"""
self._expires = expires
@property
def id(self):
"""Gets the id of this SnapshotLockExtended. # noqa: E501
System generated lock ID. # noqa: E501
:return: The id of this SnapshotLockExtended. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SnapshotLockExtended.
System generated lock ID. # noqa: E501
:param id: The id of this SnapshotLockExtended. # noqa: E501
:type: int
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SnapshotLockExtended):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.904523
| 111
| 0.566306
|
65ace28d8d6a9f54f04b0135d3182eb98a3e0628
| 1,974
|
py
|
Python
|
recipes/qr-code-generator/all/conanfile.py
|
nadzkie0/conan-center-index
|
fde12bf20f2c4cb6a7554d09a5c9433a0f5cb72c
|
[
"MIT"
] | 2
|
2022-01-04T11:30:41.000Z
|
2022-01-04T11:31:32.000Z
|
recipes/qr-code-generator/all/conanfile.py
|
nadzkie0/conan-center-index
|
fde12bf20f2c4cb6a7554d09a5c9433a0f5cb72c
|
[
"MIT"
] | 5
|
2021-03-25T01:49:56.000Z
|
2021-03-28T16:42:12.000Z
|
recipes/qr-code-generator/all/conanfile.py
|
nadzkie0/conan-center-index
|
fde12bf20f2c4cb6a7554d09a5c9433a0f5cb72c
|
[
"MIT"
] | 4
|
2021-06-03T23:24:03.000Z
|
2022-03-03T17:16:09.000Z
|
from conans import ConanFile, CMake, tools
import os
class QrCodeGeneratorConan(ConanFile):
name = "qr-code-generator"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/nayuki/QR-Code-generator"
description = "High-quality QR Code generator library in Java, JavaScript, Python, C++, C, Rust, TypeScript."
topics = ["qr-code", "qr-generator", "c-plus-plus"]
license = "MIT"
settings = "os", "compiler", "arch", "build_type"
options = {"shared": [True, False],
"fPIC": [True, False]}
default_options = {'shared': False,
'fPIC': True}
exports_sources = ["CMakeLists.txt", "patches/**"]
generators = "cmake"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
def configure(self):
if self.settings.os == "Windows":
del self.options.fPIC
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("QR-Code-generator-{}".format(self.version),
self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.configure()
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def _extract_license(self):
header = tools.load(os.path.join(
self._source_subfolder, "cpp", "QrCode.hpp"))
license_contents = header[2:header.find("*/", 1)]
tools.save("LICENSE", license_contents)
def package(self):
self._extract_license()
self.copy("LICENSE", dst="licenses")
cmake = self._configure_cmake()
cmake.install()
def package_info(self):
self.cpp_info.libs.append("qrcodegen")
| 31.83871
| 113
| 0.60689
|
cedd78fd1eea1ef09d8cb7326298efa373c8bc61
| 60,170
|
py
|
Python
|
scripts/automation/trex_control_plane/interactive/trex/utils/parsing_opts.py
|
bdollma/trex-core
|
c052590f3842c117c46b0498c40455cbb011ceb1
|
[
"Apache-2.0"
] | null | null | null |
scripts/automation/trex_control_plane/interactive/trex/utils/parsing_opts.py
|
bdollma/trex-core
|
c052590f3842c117c46b0498c40455cbb011ceb1
|
[
"Apache-2.0"
] | null | null | null |
scripts/automation/trex_control_plane/interactive/trex/utils/parsing_opts.py
|
bdollma/trex-core
|
c052590f3842c117c46b0498c40455cbb011ceb1
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from collections import namedtuple, OrderedDict
from .common import list_intersect, list_difference, is_valid_ipv4, is_valid_ipv6, is_valid_mac, list_remove_dup
from .text_opts import format_text
from trex.emu.trex_emu_validator import Ipv4, Ipv6, Mac
from ..common.trex_vlan import VLAN
from ..common.trex_types import *
from ..common.trex_types import PortProfileID, DEFAULT_PROFILE_ID, ALL_PROFILE_ID
from ..common.trex_exceptions import TRexError, TRexConsoleNoAction, TRexConsoleError
from ..common.trex_psv import PSV_ACQUIRED
from .constants import ON_OFF_DICT, UP_DOWN_DICT, FLOW_CTRL_DICT
import sys
import re
import os
import inspect
ArgumentPack = namedtuple('ArgumentPack', ['name_or_flags', 'options'])
ArgumentGroup = namedtuple('ArgumentGroup', ['type', 'args', 'options'])
MUTEX, NON_MUTEX = range(2)
def check_negative(value):
ivalue = int(value)
if ivalue < 0:
raise argparse.ArgumentTypeError("non positive value provided: '{0}'".format(value))
return ivalue
def match_time_unit(val):
'''match some val against time shortcut inputs '''
match = re.match("^(\d+(\.\d+)?)([m|h]?)$", val)
if match:
digit = float(match.group(1))
unit = match.group(3)
if not unit:
return digit
elif unit == 'm':
return digit*60
else:
return digit*60*60
else:
raise argparse.ArgumentTypeError("Duration should be passed in the following format: \n"
"-d 100 : in sec \n"
"-d 10m : in min \n"
"-d 1h : in hours")
match_multiplier_help = """Multiplier should be passed in the following format:
[number][<empty> | bps | kbps | mbps | gbps | pps | kpps | mpps | %% ].
no suffix will provide an absoulute factor and percentage
will provide a percentage of the line rate. examples
'-m 10',
'-m 10kbps',
'-m 10kbpsl1',
'-m 10mpps',
'-m 23%% '
'-m 23%%' : is 23%% L1 bandwidth
'-m 23mbps': is 23mbps in L2 bandwidth (including FCS+4)
'-m 23mbpsl1': is 23mbps in L1 bandwidth
"""
dynamic_profile_help = """A list of profiles on which to apply the command.
Multiple profile IDs are allocated dynamically on the same port.
Profile expression is used as <port id>.<profile id>.
Default profile id is \"_\" when not specified.
"""
astf_profile_help = """A list of profiles on which to apply the command.
Default profile id is \"_\" when not specified.
"""
# decodes multiplier
# if allow_update - no +/- is allowed
# divide states between how many entities the
# value should be divided
def decode_multiplier(val, allow_update = False, divide_count = 1):
factor_table = {None: 1, 'k': 1e3, 'm': 1e6, 'g': 1e9}
pattern = "^(\d+(\.\d+)?)(((k|m|g)?(bpsl1|pps|bps))|%)?"
# do we allow updates ? +/-
if not allow_update:
pattern += "$"
match = re.match(pattern, val)
op = None
else:
pattern += "([\+\-])?$"
match = re.match(pattern, val)
if match:
op = match.group(7)
else:
op = None
result = {}
if not match:
return None
# value in group 1
value = float(match.group(1))
# decode unit as whole
unit = match.group(3)
# k,m,g
factor = match.group(5)
# type of multiplier
m_type = match.group(6)
# raw type (factor)
if not unit:
result['type'] = 'raw'
result['value'] = value
# percentage
elif unit == '%':
result['type'] = 'percentage'
result['value'] = value
elif m_type == 'bps':
result['type'] = 'bps'
result['value'] = value * factor_table[factor]
elif m_type == 'pps':
result['type'] = 'pps'
result['value'] = value * factor_table[factor]
elif m_type == 'bpsl1':
result['type'] = 'bpsl1'
result['value'] = value * factor_table[factor]
if op == "+":
result['op'] = "add"
elif op == "-":
result['op'] = "sub"
else:
result['op'] = "abs"
if result['op'] != 'percentage':
result['value'] = result['value'] / divide_count
return result
def match_multiplier(val):
'''match some val against multiplier shortcut inputs '''
result = decode_multiplier(val, allow_update = True)
if not result:
raise argparse.ArgumentTypeError(match_multiplier_help)
return val
def match_multiplier_strict(val):
'''match some val against multiplier shortcut inputs '''
result = decode_multiplier(val, allow_update = False)
if not result:
raise argparse.ArgumentTypeError(match_multiplier_help)
return val
def hex_int (val):
pattern = r"0x[1-9a-fA-F][0-9a-fA-F]*"
if not re.match(pattern, val):
raise argparse.ArgumentTypeError("{0} is not a valid positive HEX formatted number".format(val))
return int(val, 16)
def action_conv_type_to_bytes():
class IPConv(argparse.Action):
def __init__(self, conv_type, mc = False, *args, **kwargs):
super(IPConv, self).__init__(*args, **kwargs)
self.conv_type = conv_type
self.mc = mc
def __call__(self, parser, args, values, option_string=None):
if self.conv_type == 'ipv4':
res = Ipv4(values, mc = self.mc)
elif self.conv_type == 'ipv6':
res = Ipv6(values, mc = self.mc)
elif self.conv_type == 'mac':
res = Mac(values)
setattr(args, self.dest, res.V())
return IPConv
def action_check_min_max():
class MinMaxValidate(argparse.Action):
def __init__(self, min_val = float('-inf'), max_val = float('inf'), *args, **kwargs):
super(MinMaxValidate, self).__init__(*args, **kwargs)
self.min_val = min_val
self.max_val = max_val
def __call__(self, parser, args, values, option_string=None):
try:
values = int(values)
except ValueError:
parser.error('Value "%s" must be an integer' % values)
if self.min_val <= values <= self.max_val:
setattr(args, self.dest, values)
else:
parser.error('Value "%s" must be between %s and %s' % (option_string, self.min_val, self.max_val))
return MinMaxValidate
def action_check_vlan():
class VLANCheck(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
try:
vlan = VLAN(values)
except TRexError as e:
parser.error(e.brief())
setattr(args, self.dest, vlan.get_tags())
return
return VLANCheck
def action_check_tpid():
class TPIDCheck(argparse.Action):
TPIDS = ["0x8100", "0x88A8"]
def __call__(self, parser, args, values, option_string=None):
if any(v not in TPIDCheck.TPIDS for v in values):
err = 'tpid value is not one of the valid tpids: %s' % TPIDCheck.TPIDS
parser.error(err)
values = [int(v, 16) for v in values]
setattr(args, self.dest, values)
return TPIDCheck
def action_bpf_filter_merge():
class BPFFilterMerge(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
setattr(args, self.dest, ' '.join(values).strip("'\""))
return
return BPFFilterMerge
def is_valid_file(filename):
if os.path.isdir(filename):
raise argparse.ArgumentTypeError("Given path '%s' is a directory" % filename)
if not os.path.isfile(filename):
raise argparse.ArgumentTypeError("The file '%s' does not exist" % filename)
return filename
# scapy decoder class for parsing opts
class ScapyDecoder(object):
scapy_layers = None
@staticmethod
def init ():
# one time
if ScapyDecoder.scapy_layers:
return
import scapy.all
import scapy.layers.dhcp
raw = {}
# default layers
raw.update(scapy.all.__dict__)
# extended layers - add here
raw.update(scapy.layers.dhcp.__dict__)
ScapyDecoder.scapy_layers = {k: v for k, v in raw.items() if inspect.isclass(v) and issubclass(v, scapy.all.Packet)}
@staticmethod
def to_scapy(scapy_str):
ScapyDecoder.init()
try:
scapy_obj = eval(scapy_str, {'__builtins__': {}, 'True': True, 'False': False}, ScapyDecoder.scapy_layers)
len(scapy_obj)
return scapy_obj
except Exception as e:
raise argparse.ArgumentTypeError("invalid scapy expression: '{0}' - {1}".format(scapy_str, str(e)))
@staticmethod
def formatted_layers ():
ScapyDecoder.init()
output = ''
for k, v in sorted(ScapyDecoder.scapy_layers.items()):
name = format_text("'{}'".format(k), 'bold')
descr = v.name
#fields = ", ".join(["'%s'" % f.name for f in v.fields_desc])
#print("{:<50} - {} ({})".format(name ,descr, fields))
output += "{:<50} - {}\n".format(name ,descr)
return output
def check_ipv4_addr (ipv4_str):
if not is_valid_ipv4(ipv4_str):
raise argparse.ArgumentTypeError("invalid IPv4 address: '{0}'".format(ipv4_str))
return ipv4_str
def check_ipv6_addr(ipv6_str):
if not is_valid_ipv6(ipv6_str):
raise argparse.ArgumentTypeError("invalid IPv6 address: '{0}'".format(ipv6_str))
return ipv6_str
def check_ip_addr(addr):
if not (is_valid_ipv4(addr) or is_valid_ipv6(addr)):
raise argparse.ArgumentTypeError("invalid IPv4/6 address: '{0}'".format(addr))
return addr
def check_pkt_size (pkt_size):
try:
pkt_size = int(pkt_size)
except ValueError:
raise argparse.ArgumentTypeError("invalid packet size type: '{0}'".format(pkt_size))
if (pkt_size < 64) or (pkt_size > 9216):
raise argparse.ArgumentTypeError("invalid packet size: '{0}' - valid range is 64 to 9216".format(pkt_size))
return pkt_size
def check_mac_addr (addr):
def _check_one_mac(m):
if not is_valid_mac(m):
raise argparse.ArgumentTypeError("Not a valid MAC address: '{0}'".format(m))
if isinstance(addr, list):
for m in addr:
_check_one_mac(m)
else:
_check_one_mac(addr)
return addr
def check_valid_port(port_str):
try:
port = int(port_str)
except ValueError:
raise argparse.ArgumentTypeError("invalid port type: '{0}'".format(port_str))
if not (1 <= port and port <= 65535):
raise argparse.ArgumentTypeError("invalid port number: '{0}' - valid range is 1 to 65535".format(port))
return port
def decode_tunables (tunable_str):
tunables = {}
# split by comma to tokens
tokens = tunable_str.split(',')
# each token is of form X=Y
for token in tokens:
m = re.search('(\S+)=(.+)', token)
if not m:
raise argparse.ArgumentTypeError("bad syntax for tunables: {0}".format(token))
val = m.group(2) # string
if val.startswith(("'", '"')) and val.endswith(("'", '"')) and len(val) > 1: # need to remove the quotes from value
val = val[1:-1]
elif val.startswith('0x'): # hex
val = int(val, 16)
else:
try:
if '.' in val: # float
val = float(val)
else: # int
val = int(val)
except:
pass
tunables[m.group(1)] = val
return tunables
class TunnelType:
NONE = 0
GTPU = 1
tunnel_types = TunnelType()
supported_tunnels = [attr for attr in dir(tunnel_types) if not callable(getattr(tunnel_types, attr)) and not attr.startswith("__") and attr != 'NONE']
supported_tunnels = str(supported_tunnels).replace('[', '').replace(']', '')
def get_tunnel_type(tunnel_type_str):
tunnel_type_str = tunnel_type_str.lower()
if tunnel_type_str == "gtpu":
return TunnelType.GTPU
else:
raise argparse.ArgumentTypeError("bad tunnel type : {0}".format(tunnel_type_str))
def convert_old_tunables_to_new_tunables(tunable_str, help=False):
try:
tunable_dict = decode_tunables(tunable_str)
except argparse.ArgumentTypeError as e:
raise TRexError(e)
tunable_list = []
# converting from tunables dictionary to list
for tunable_key in tunable_dict:
tunable_list.extend(["--{}".format(tunable_key), str(tunable_dict[tunable_key])])
if help:
tunable_list.append("--help")
return tunable_list
class OPTIONS_DB_ARGS:
MULTIPLIER = ArgumentPack(
['-m', '--multiplier'],
{'help': match_multiplier_help,
'dest': "mult",
'default': "1",
'type': match_multiplier})
MULTIPLIER_STRICT = ArgumentPack(
['-m', '--multiplier'],
{'help': match_multiplier_help,
'dest': "mult",
'default': "1",
'type': match_multiplier_strict})
MULTIPLIER_NUM = ArgumentPack(
['-m'],
{'help': 'Sent traffic numeric multiplier',
'dest': 'mult',
'default': 1,
'type': float})
TOTAL = ArgumentPack(
['-t', '--total'],
{'help': "traffic will be divided between all ports specified",
'dest': "total",
'default': False,
'action': "store_true"})
IPG = ArgumentPack(
['-i', '--ipg'],
{'help': "IPG value in usec between packets. default will be from the pcap",
'dest': "ipg_usec",
'default': None,
'type': float})
MIN_IPG = ArgumentPack(
['--min-ipg'],
{'help': "Minimal IPG value in usec between packets. Used to guard from too small IPGs.",
'dest': "min_ipg_usec",
'default': None,
'type': float})
SPEEDUP = ArgumentPack(
['-s', '--speedup'],
{'help': "Factor to accelerate the injection. effectively means IPG = IPG / SPEEDUP",
'dest': "speedup",
'default': 1.0,
'type': float})
COUNT = ArgumentPack(
['-c', '--count'],
{'help': "How many times to perform action [default is 1, 0 means forever]",
'dest': "count",
'default': 1,
'type': int})
PROMISCUOUS = ArgumentPack(
['--prom'],
{'help': "Set port promiscuous on/off",
'choices': ON_OFF_DICT})
MULTICAST = ArgumentPack(
['--mult'],
{'help': "Set port multicast on/off",
'choices': ON_OFF_DICT})
LINK_STATUS = ArgumentPack(
['--link'],
{'help': 'Set link status up/down',
'choices': UP_DOWN_DICT})
LED_STATUS = ArgumentPack(
['--led'],
{'help': 'Set LED status on/off',
'choices': ON_OFF_DICT})
FLOW_CTRL = ArgumentPack(
['--fc'],
{'help': 'Set Flow Control type',
'dest': 'flow_ctrl',
'choices': FLOW_CTRL_DICT})
VXLAN_FS = ArgumentPack(
['--vxlan-fs'],
{'help': 'UDP ports for which HW flow stats will be read from layers after VXLAN',
'nargs': '*',
'action': 'merge',
'type': int})
SRC_IPV4 = ArgumentPack(
['--src'],
{'help': 'Configure source IPv4 address',
'dest': 'src_ipv4',
'required': True,
'type': check_ipv4_addr})
DST_IPV4 = ArgumentPack(
['--dst'],
{'help': 'Configure destination IPv4 address',
'dest': 'dst_ipv4',
'required': True,
'type': check_ipv4_addr})
DST_IPV4_NOT_REQ = ArgumentPack(
['--dst'],
{'help': 'Configure destination IPv4 address',
'dest': 'dst_ipv4',
'required': False,
'type': check_ipv4_addr})
DUAL_IPV4 = ArgumentPack(
['--dual-ip'],
{'help': 'IP address to be added for each pair of ports (starting from second pair)',
'default': '1.0.0.0',
'type': check_ipv4_addr})
DST_MAC = ArgumentPack(
['--dst'],
{'help': 'Configure destination MAC address',
'dest': 'dst_mac',
'required': True,
'type': check_mac_addr})
NODE_MAC = ArgumentPack(
['--mac'],
{'help': 'Configure node MAC address',
'dest': 'mac',
'required': True,
'type': check_mac_addr})
RETRIES = ArgumentPack(
['-r', '--retries'],
{'help': 'retries count [default is zero]',
'dest': 'retries',
'default': 0,
'type': int})
OUTPUT_FILENAME = ArgumentPack(
['-o', '--output'],
{'help': 'Output PCAP filename',
'dest': 'output_filename',
'default': None,
'type': str})
ASTF_PROFILE_LIST = ArgumentPack(
['--pid'],
{"nargs": 1,
'dest':'profiles',
'metavar': 'PROFILE',
'type': str,
'help': astf_profile_help,
'default': [DEFAULT_PROFILE_ID]})
ASTF_PROFILE_DEFAULT_LIST = ArgumentPack(
['--pid'],
{"nargs": '+',
'dest':'profiles',
'metavar': 'PROFILE',
'type': str,
'help': astf_profile_help,
'default': [DEFAULT_PROFILE_ID]})
ASTF_NC = ArgumentPack(
['--nc'],
{'help': 'Faster flow termination at the end of the test, see --nc in the manual',
'action': 'store_true'})
ASTF_IPV6 = ArgumentPack(
['--ipv6'],
{'help': 'Convert traffic to IPv6',
'action': 'store_true'})
ASTF_CLIENTS = ArgumentPack(
['--clients'],
{'nargs': '+',
'action': 'merge',
'type': int,
'help': 'Only those client interfaces will send traffic.',
'default': []})
ASTF_SERVERS_ONLY = ArgumentPack(
['--servers-only'],
{'help': 'All client interfaces will be disabled.',
'action': 'store_true'})
ASTF_LATENCY = ArgumentPack(
['-l'],
{'dest': 'latency_pps',
'default': 0,
'type': int,
'help': "start latency streams"})
PORT_RESTART = ArgumentPack(
['-r', '--restart'],
{'help': 'hard restart port(s)',
'dest': 'restart',
'default': False,
'action': 'store_true'})
LIMIT = ArgumentPack(
['-l', '--limit'],
{'help': 'Limit the packet count to be written to the file',
'dest': 'limit',
'default': 1000,
'type': int})
SUPPORTED = ArgumentPack(
['--supp'],
{'help': 'Show which attributes are supported by current NICs',
'default': None,
'action': 'store_true'})
TUNABLES = ArgumentPack(
['-t'],
{'help': "Sets tunables for a profile. Example: '-t fsize=100,pg_id=7'",
'metavar': 'T1=VAL[,T2=VAL ...]',
'dest': "tunables",
'default': {},
'action': 'merge',
'type': decode_tunables})
PROFILE_LIST = ArgumentPack(
['-p', '--port'],
{"nargs": '+',
'dest':'ports',
'metavar': 'PORT[.PROFILE]',
'action': 'merge',
'type': PortProfileID,
'help': dynamic_profile_help,
'default': []})
PORT_LIST = ArgumentPack(
['-p', '--port'],
{"nargs": '+',
'dest':'ports',
'metavar': 'PORTS',
'action': 'merge',
'type': int,
'help': "A list of ports on which to apply the command",
'default': []})
PORT_LIST_NO_DEFAULT = ArgumentPack(
['-p', '--port'],
{"nargs": '+',
'dest':'ports_no_default',
'metavar': 'PORTS',
'action': 'merge',
'type': int,
'help': "A list of ports on which to apply the command"})
SINGLE_PORT = ArgumentPack(
['-p', '--port'],
{'dest':'ports',
'type': int,
'metavar': 'PORT',
'help': 'source port for the action',
'required': True})
PING_IP = ArgumentPack(
['-d'],
{'help': 'which IPv4/6 to ping',
'dest': 'ping_ip',
'required': True,
'type': check_ip_addr})
PING_COUNT = ArgumentPack(
['-n', '--count'],
{'help': 'How many times to ping [default is 5]',
'dest': 'count',
'default': 5,
'type': int})
PKT_SIZE = ArgumentPack(
['-s'],
{'dest':'pkt_size',
'help': 'packet size to use',
'default': 64,
'type': check_pkt_size})
ALL_PORTS = ArgumentPack(
['-a'],
{"action": "store_true",
"dest": "all_ports",
'help': "Set this flag to apply the command on all available ports",
'default': False})
ALL_PROFILES = ArgumentPack(
['-a'],
{"action": "store_true",
"dest": "all_profiles",
'help': "Set this flag to apply the command on all available dynamic profiles",
'default': False})
DURATION = ArgumentPack(
['-d'],
{'action': "store",
'metavar': 'TIME',
'dest': 'duration',
'type': match_time_unit,
'default': -1.0,
'help': "Set duration time for job."})
ESTABLISH_DURATION = ArgumentPack(
['--e_duration'],
{'action': "store",
'metavar': 'TIME',
'dest': 'e_duration',
'type': match_time_unit,
'default': 0.0,
'help': "Set time limit for the first flow establishment."})
TERMINATE_DURATION = ArgumentPack(
['--t_duration'],
{'action': "store",
'metavar': 'TIME',
'dest': 't_duration',
'type': match_time_unit,
'default': 0.0,
'help': "Set time limit waiting for all the flow to terminate gracefully."})
TIMEOUT = ArgumentPack(
['-t'],
{'action': "store",
'metavar': 'TIMEOUT',
'dest': 'timeout',
'type': int,
'default': None,
'help': "Timeout for operation in seconds."})
FORCE = ArgumentPack(
['--force'],
{"action": "store_true",
'default': False,
'help': "Set if you want to stop active ports before appyling command."})
LOOPBACK = ArgumentPack(
['--loopback'],
{"action": "store_true",
'default': False,
'help': "Set if you want to enable tunnel-loopback mode."})
TUNNEL_OFF = ArgumentPack(
['--off'],
{"action": "store_true",
'default': False,
'help': "Set if you want to deactivate tunnel mode."})
TUNNEL_TYPE = ArgumentPack(
['--type'],
{'required': True,
'type': get_tunnel_type,
'help': "The tunnel type for example --type gtpu. " +
"Currently the supported tunnels are: " + supported_tunnels + "."})
CLIENT_START = ArgumentPack(
['--c_start'],
{"required": True,
'type': check_ipv4_addr,
'help': "The first client that you want to update its tunnel."})
CLIENT_END = ArgumentPack(
['--c_end'],
{"required": True,
'type': check_ipv4_addr,
'help': "The last client that you want to update its tunnel."})
VERSION = ArgumentPack(
['--ipv6'],
{"action": "store_true",
'default': False,
'help': "Set if you want ipv6 instead of ipv4."})
TEID = ArgumentPack(
['--teid'],
{'type' : int,
'required': True,
'help': "The tunnel teid of the first client. The teid of the second client is going to be teid+1"})
SRC_IP = ArgumentPack(
['--src_ip'],
{'type' : str,
'required': True,
'help': "The tunnel src ip."})
DST_IP = ArgumentPack(
['--dst_ip'],
{'type' : str,
'required': True,
'help': "The tunnel dst ip."})
SPORT = ArgumentPack(
['--sport'],
{'type' : check_valid_port,
'required': True,
'help': "The source port of the tunnel."})
REMOVE = ArgumentPack(
['--remove'],
{"action": "store_true",
'default': False,
'help': "Set if you want to remove the active profiles after stopping them."})
READONLY = ArgumentPack(
['-r'],
{'action': 'store_true',
'dest': 'readonly',
'help': 'Do not acquire ports, connect as read-only.'})
REMOTE_FILE = ArgumentPack(
['-r', '--remote'],
{"action": "store_true",
'default': False,
'help': "file path should be interpeted by the server (remote file)"})
DUAL = ArgumentPack(
['--dual'],
{"action": "store_true",
'default': False,
'help': "Transmit in a dual mode - requires ownership on the adjacent port"})
SRC_MAC_PCAP = ArgumentPack(
['--src-mac-pcap'],
{"action": "store_true",
"default": False,
"help": "Source MAC address will be taken from pcap file"})
DST_MAC_PCAP = ArgumentPack(
['--dst-mac-pcap'],
{"action": "store_true",
"default": False,
"help": "Destination MAC address will be taken from pcap file"})
FILE_PATH = ArgumentPack(
['-f'],
{'metavar': 'FILE',
'dest': 'file',
'nargs': 1,
'required': True,
'type': is_valid_file,
'help': "File path to use"})
FILE_PATH_NO_CHECK = ArgumentPack(
['-f'],
{'metavar': 'FILE',
'dest': 'file',
'nargs': 1,
'required': True,
'type': str,
'help': "File path to use"})
FILE_FROM_DB = ArgumentPack(
['--db'],
{'metavar': 'LOADED_STREAM_PACK',
'help': "A stream pack which already loaded into console cache."})
SERVER_IP = ArgumentPack(
['--server'],
{'metavar': 'SERVER',
'help': "server IP"})
DRY_RUN = ArgumentPack(
['-n', '--dry'],
{'action': 'store_true',
'dest': 'dry',
'default': False,
'help': "Dry run - no traffic will be injected"})
SYNCHRONIZED = ArgumentPack(
['--sync'],
{'action': 'store_true',
'dest': 'sync',
'default': False,
'help': 'Run the traffic with syncronized time at adjacent ports. Need to ensure effective ipg is at least 1000 usec.'})
XTERM = ArgumentPack(
['-x', '--xterm'],
{'action': 'store_true',
'dest': 'xterm',
'default': False,
'help': "Starts TUI in xterm window"})
LOCKED = ArgumentPack(
['-l', '--locked'],
{'action': 'store_true',
'dest': 'locked',
'default': False,
'help': "Locks TUI on legend mode"})
FULL_OUTPUT = ArgumentPack(
['--full'],
{'action': 'store_true',
'help': "Prompt full info in a JSON format"})
GLOBAL_STATS = ArgumentPack(
['-g'],
{'action': 'store_const',
'dest': 'stats',
'const': 'global',
'help': "Fetch only global statistics"})
PORT_STATS = ArgumentPack(
['-p'],
{'action': 'store_const',
'dest': 'stats',
'const': 'ports',
'help': "Fetch only port statistics"})
PORT_STATUS = ArgumentPack(
['--ps'],
{'action': 'store_const',
'dest': 'stats',
'const': 'status',
'help': "Fetch only port status data"})
STREAMS_STATS = ArgumentPack(
['-s'],
{'action': 'store_const',
'dest': 'stats',
'const': 'streams',
'help': "Fetch only streams stats"})
LATENCY_STATS = ArgumentPack(
['-l'],
{'action': 'store_const',
'dest': 'stats',
'const': 'latency',
'help': "Fetch only latency stats"})
LATENCY_HISTOGRAM = ArgumentPack(
['--lh'],
{'action': 'store_const',
'dest': 'stats',
'const': 'latency_histogram',
'help': "Fetch only latency histogram"})
LATENCY_COUNTERS = ArgumentPack(
['--lc'],
{'action': 'store_const',
'dest': 'stats',
'const': 'latency_counters',
'help': "Fetch only latency counters"})
CPU_STATS = ArgumentPack(
['-c'],
{'action': 'store_const',
'dest': 'stats',
'const': 'cpu',
'help': "Fetch only CPU utilization stats"})
MBUF_STATS = ArgumentPack(
['-m'],
{'action': 'store_const',
'dest': 'stats',
'const': 'mbuf',
'help': "Fetch only MBUF utilization stats"})
EXTENDED_STATS = ArgumentPack(
['-x'],
{'action': 'store_const',
'dest': 'stats',
'const': 'xstats',
'help': "Fetch xstats of port, excluding lines with zero values"})
EXTENDED_INC_ZERO_STATS = ArgumentPack(
['--xz', '--zx'],
{'action': 'store_const',
'dest': 'stats',
'const': 'xstats_inc_zero',
'help': "Fetch xstats of port, including lines with zero values"})
ASTF_STATS = ArgumentPack(
['-a'],
{'action': 'store_const',
'dest': 'stats',
'const': 'astf',
'help': "Fetch ASTF counters, excluding lines with zero values"})
ASTF_INC_ZERO_STATS = ArgumentPack(
['--za', '--az'],
{'action': 'store_const',
'dest': 'stats',
'const': 'astf_inc_zero',
'help': "Fetch ASTF counters, including lines with zero values"})
ASTF_PROFILE_STATS = ArgumentPack(
['--pid'],
{"nargs": 1,
'dest':'pfname',
'metavar': 'PROFILE',
'type': str,
'default': None,
'help': "ASTF Profile ID: When using --pid option, Should use with -a or --za option"})
STREAMS_MASK = ArgumentPack(
['-i', '--id'],
{"nargs": '+',
'dest':'ids',
'metavar': 'ID',
'type': int,
'help': 'Filter by those stream IDs (default is all streams).',
'default': []})
STREAMS_CODE = ArgumentPack(
['--code'],
{'type': str,
'nargs': '?',
'const': '',
'metavar': 'FILE',
'help': 'Get Python code that creates the stream(s). Provided argument is filename to save, or by default prints to stdout.'})
PIN_CORES = ArgumentPack(
['--pin'],
{'action': 'store_true',
'dest': 'pin_cores',
'default': False,
'help': "Pin cores to interfaces - cores will be divided between interfaces (performance boot for symetric profiles)"})
CORE_MASK = ArgumentPack(
['--core_mask'],
{'action': 'store',
'nargs': '+',
'type': hex_int,
'dest': 'core_mask',
'default': None,
'help': "Core mask - only cores responding to the bit mask will be active"})
SERVICE_BGP_FILTERED = ArgumentPack(
['--bgp'],
{'action': 'store_true',
'default': False,
'dest': 'allow_bgp',
'help': 'filter mode with bgp packets forward to rx'})
SERVICE_TRAN_FILTERED = ArgumentPack(
['--tran'],
{'action': 'store_true',
'default': False,
'dest': 'allow_transport',
'help': 'filter mode with tcp/udp packets forward to rx (generated by emu)' })
SERVICE_DHCP_FILTERED = ArgumentPack(
['--dhcp'],
{'action': 'store_true',
'default': False,
'dest': 'allow_dhcp',
'help': 'filter mode with dhcpv4/dhcpv6 packets forward to rx'})
SERVICE_MDNS_FILTERED = ArgumentPack(
['--mdns'],
{'action': 'store_true',
'default': False,
'dest': 'allow_mdns',
'help': 'filter mode with mDNS packets forward to rx'})
SERVICE_EMU_FILTERED = ArgumentPack(
['--emu'],
{'action': 'store_true',
'default': False,
'dest': 'allow_emu',
'help': 'filter mode for all emu services rx'})
SERVICE_NO_TCP_UDP_FILTERED = ArgumentPack(
['--no-tcp-udp'],
{'action': 'store_true',
'default': False,
'dest': 'allow_no_tcp_udp',
'help': 'filter mode with no_tcp_udp packets forward to rx'})
SERVICE_ALL_FILTERED = ArgumentPack(
['--all'],
{'action': 'store_true',
'default': False,
'dest': 'allow_all',
'help': 'Allow every filter possible'})
SERVICE_OFF = ArgumentPack(
['--off'],
{'action': 'store_false',
'dest': 'enabled',
'default': True,
'help': 'Deactivates services on port(s)'})
TX_PORT_LIST = ArgumentPack(
['--tx'],
{'nargs': '+',
'dest':'tx_port_list',
'metavar': 'TX',
'action': 'merge',
'type': int,
'help': 'A list of ports to capture on the TX side',
'default': []})
RX_PORT_LIST = ArgumentPack(
['--rx'],
{'nargs': '+',
'dest':'rx_port_list',
'metavar': 'RX',
'action': 'merge',
'type': int,
'help': 'A list of ports to capture on the RX side',
'default': []})
MONITOR_TYPE_VERBOSE = ArgumentPack(
['-v', '--verbose'],
{'action': 'store_true',
'dest': 'verbose',
'default': False,
'help': 'output to screen as verbose'})
MONITOR_TYPE_PIPE = ArgumentPack(
['-p', '--pipe'],
{'action': 'store_true',
'dest': 'pipe',
'default': False,
'help': 'forward packets to a pipe'})
BPF_FILTER = ArgumentPack(
['-f', '--filter'],
{'type': str,
'nargs': '+',
'action': action_bpf_filter_merge(),
'dest': 'filter',
'default': '',
'help': 'BPF filter'})
CAPTURE_ID = ArgumentPack(
['-i', '--id'],
{'help': "capture ID to remove",
'dest': "capture_id",
'type': int,
'required': True})
SCAPY_PKT = ArgumentPack(
['-s'],
{'dest':'scapy_pkt',
'metavar': 'PACKET',
'type': ScapyDecoder.to_scapy,
'help': 'A scapy notation packet (e.g.: Ether()/IP())'})
SHOW_LAYERS = ArgumentPack(
['--layers', '-l'],
{'action': 'store_true',
'dest': 'layers',
'help': "Show all registered layers / inspect a specific layer"})
VLAN_TAGS = ArgumentPack(
['--vlan'],
{'dest':'vlan',
'action': action_check_vlan(),
'type': int,
'nargs': '*',
'metavar': 'VLAN',
'help': 'single or double VLAN tags'})
VLAN_TPIDS = ArgumentPack(
['--tpid'],
{'dest':'tpid',
'action': action_check_tpid(),
'type': str,
'nargs': '*',
'metavar': 'TPID',
'help': 'single or double VLAN tpids'})
CLEAR_VLAN = ArgumentPack(
['-c'],
{'action': 'store_true',
'dest': 'clear_vlan',
'default': False,
'help': "clear any VLAN configuration"})
PLUGIN_NAME = ArgumentPack(
['plugin_name'],
{'type': str,
'metavar': 'name',
'help': 'Name of plugin'})
IPV6_OFF = ArgumentPack(
['--off'],
{'help': 'Disable IPv6 on port.',
'action': 'store_true'})
IPV6_AUTO = ArgumentPack(
['--auto-ipv6'],
{'help': 'Enable IPv6 on port with automatic address.',
'action': 'store_true'})
IPV6_SRC = ArgumentPack(
['-s', '--src'],
{'help': 'Enable IPv6 on port with specific address.',
'dest': 'src_ipv6',
'type': check_ipv6_addr})
TG_NAME_START = ArgumentPack(
['--start'],
{'help': 'Starting index to print template group names',
'dest': 'start',
'type': int,
'default': 0
})
TG_NAME_AMOUNT = ArgumentPack(
['--amount'],
{'help': 'Amount of template group names to print',
'dest': 'amount',
'type': int,
'default': 50
})
TG_STATS = ArgumentPack(
['--name'],
{'dest': 'name',
'required': True,
'type': str,
'help': "Template group name"})
# Emu Args
SINGLE_PORT_REQ = ArgumentPack(
['-p', '--port'],
{'type': int,
'metavar': 'PORT',
'help': 'Port for the action',
'required': True})
SINGLE_PORT_NOT_REQ = ArgumentPack(
['-p', '--port'],
{'type': int,
'metavar': 'PORT',
'help': 'Port for the action',
'required': False})
MAX_CLIENT_RATE = ArgumentPack(
['-m', '--max-rate'],
{'dest': 'max_rate',
'type': int,
'help': "Max clients rate, clients per second"})
TO_JSON = ArgumentPack(
['--json'],
{'action': 'store_true',
'help': "Prompt info into a JSON format"})
TO_YAML = ArgumentPack(
['--yaml'],
{'action': 'store_true',
'help': "Prompt info into a YAML format"})
SHARED_NS = ArgumentPack(
['--shared-ns'],
{'type': str,
'default': None,
'help': "Create a node in this shared namespace"})
SUBNET = ArgumentPack(
['--subnet'],
{'type': int,
'default': None,
'action': action_check_min_max(),
'min_val': 1, 'max_val': 32,
'help': "IPv4 subnet mask for shared ns as a CIDR number [1-32]"})
SHOW_MAX_CLIENTS = ArgumentPack(
['--max-clients'],
{'default': 15,
'action': action_check_min_max(),
'min_val': 1, 'max_val': 255,
'help': "Max clients to show each time"})
SHOW_MAX_NS = ArgumentPack(
['--max-ns'],
{'default': 1,
'action': action_check_min_max(),
'min_val': 1, 'max_val': 255,
'help': "Max namespaces to show each time"})
SHOW_IPV6_DATA = ArgumentPack(
['--6'],
{'default': False,
'dest': 'ipv6',
'action': 'store_true',
'help': "Show ipv6 information in table, i.e: IPv6 Local, IPv6 Slaac.."})
SHOW_IPV6_ROUTER = ArgumentPack(
['--6-router'],
{'default': False,
'dest': 'ipv6_router',
'action': 'store_true',
'help': "Show ipv6 router table"})
SHOW_IPV4_DG = ArgumentPack(
['--4-dg'],
{'default': False,
'dest': 'ipv4_dg',
'action': 'store_true',
'help': "Show ipv4 default gateway table"})
SHOW_IPV6_DG = ArgumentPack(
['--6-dg'],
{'default': False,
'dest': 'ipv6_dg',
'action': 'store_true',
'help': "Show ipv6 default gateway table"})
ARGPARSE_TUNABLES = ArgumentPack(
['-t', '--tunables'],
{'default': [],
'type': str,
'nargs': argparse.REMAINDER,
'help': 'Sets tunables for a profile. -t MUST be the last flag. Example: "load_profile -f emu/simple_emu.py -t -h" to see tunables help'})
EMU_DRY_RUN_JSON = ArgumentPack(
['-d', '--dry-run'],
{'default': False,
'action': 'store_true',
'dest': 'dry',
'help': 'Dry run, only prints profile as JSON'})
EMU_SHUTDOWN_TIME = ArgumentPack(
['-t', '--time'],
{'default': 0,
'type': int,
'dest': 'time',
'help': 'shutdown Emu server in time seconds'
}
)
#Emu Client Args
MAC_ADDRESS = ArgumentPack(
['--mac'],
{'help': "MAC address",
'required': True,
'conv_type': 'mac',
'action': action_conv_type_to_bytes()})
MAC_ADDRESSES = ArgumentPack(
['--macs'],
{'help': "MAC addresses",
'required': True,
'dest': 'macs',
'nargs': '+',
'type': check_mac_addr})
CLIENT_IPV4 = ArgumentPack(
['-4'],
{'help': "Client's destination IPv4 address",
'dest': 'ipv4',
'type': check_ipv4_addr})
CLIENT_DG = ArgumentPack(
['--dg'],
{'help': "Client's default gateway IPv4 address",
'type': check_ipv4_addr})
CLIENT_IPV6 = ArgumentPack(
['-6'],
{'help': "Client's IPv6 address",
'dest': 'ipv6',
'type': check_ipv6_addr})
# Emu counters args
COUNTERS_TABLES = ArgumentPack(
['--tables'],
{'type': str,
'help': 'Tables to show as a reg expression, i.e: --tables mbuf-*..'})
COUNTERS_HEADERS = ArgumentPack(
['--headers'],
{'action': 'store_true',
'help': 'Only show the counters headers names and exit'})
COUNTERS_CLEAR = ArgumentPack(
['--clear'],
{'help': 'Clear all counters',
'action': 'store_true'})
COUNTERS_TYPE = ArgumentPack(
['--types'],
{'nargs': '*',
'type': str.upper,
'dest': 'cnt_types',
'help': 'Filters counters by their type. Example: "--filter info warning"'})
COUNTERS_SHOW_ZERO = ArgumentPack(
['--zero'],
{'action': 'store_true',
'default': False,
'help': 'Show all the zero values'})
EMU_ALL_NS = ArgumentPack(
['--all-ns'],
{'action': 'store_true',
'help': 'Use all namespaces for the action'})
# Plugins cfg args
ARP_ENABLE = ArgumentPack(
['--enable'],
{'choices': ON_OFF_DICT,
'required': True,
'help': 'Enable ARP'})
ARP_GARP = ArgumentPack(
['--garp'],
{'choices': ON_OFF_DICT,
'required': True,
'help': 'Enable gratuitous ARP'})
GEN_NAME = ArgumentPack(
['-g', '--gen'],
{'type': str,
'dest': 'gen_name',
'required': True,
'help': 'Name of IPFix generator'})
GEN_RATE = ArgumentPack(
['-r', '--rate'],
{'type': float,
'dest': 'rate',
'required': True,
'help': 'New rate (in pps) for template/data packets of an IPFix generator'})
MTU = ArgumentPack(
['--mtu'],
{'type': int,
'action': action_check_min_max(),
'min_val': 256, 'max_val': 9000,
'required': True,
'help': 'Maximum transmission unit'})
IPV4_START = ArgumentPack(
['--4'],
{'help': "IPv4 start address",
'dest': 'ipv4_start',
'required': True,
'mc': True, 'conv_type': 'ipv4', # params for action
'action': action_conv_type_to_bytes()})
IPV4_COUNT = ArgumentPack(
['--4-count'],
{'help': "Number of IPv4 addresses to generate from start",
'dest': 'ipv4_count',
'required': True,
'action': action_check_min_max(),
'min_val': 0})
IPV6_START = ArgumentPack(
['--6'],
{'help': "IPv6 start address",
'dest': 'ipv6_start',
'required': True,
'mc': True, 'conv_type': 'ipv6', # params for action
'action': action_conv_type_to_bytes()})
IPV6_COUNT = ArgumentPack(
['--6-count'],
{'help': "Number of IPv6 addresses to generate from start",
'dest': 'ipv6_count',
'required': True,
'action': action_check_min_max(),
'min_val': 0})
IPV4_G_START = ArgumentPack(
['--4g'],
{'help': "IPv4 group start address",
'dest': 'g_start',
'required': True,
'mc': True, 'conv_type': 'ipv4', # params for action
'action': action_conv_type_to_bytes()})
IPV6_G_START = ArgumentPack(
['--6g'],
{'help': "IPv6 group start address",
'dest': 'g6_start',
'required': True,
'mc': True, 'conv_type': 'ipv6', # params for action
'action': action_conv_type_to_bytes()})
IPV6_G_COUNT = ArgumentPack(
['--6g-count'],
{'help': "Number of IPv6 group addresses to generate from start",
'dest': 'g6_count',
'default': 1,
'action': action_check_min_max(),
'min_val': 0})
IPV6_S_START = ArgumentPack(
['--6s'],
{'help': "IPv6 sources start address",
'dest': 's6_start',
'required': True,
'conv_type': 'ipv6', # params for action
'action': action_conv_type_to_bytes()})
IPV6_S_COUNT = ArgumentPack(
['--6s-count'],
{'help': "Number of IPv6 sources to generate from start",
'dest': 's6_count',
'default': 1,
'action': action_check_min_max(),
'min_val': 0})
IPV4_S_START = ArgumentPack(
['--4s'],
{'help': "IPv4 source start address",
'dest': 's_start',
'required': True,
'conv_type': 'ipv4', # params for action
'action': action_conv_type_to_bytes()})
IPV4_S_COUNT = ArgumentPack(
['--4s-count'],
{'help': "Number of IPv4 source addresses to generate from start",
'dest': 's_count',
'default': 1,
'action': action_check_min_max(),
'min_val': 0})
IPV4_G_COUNT = ArgumentPack(
['--4g-count'],
{'help': "Number of IPv4 group addresses to generate from start",
'dest': 'g_count',
'default': 1,
'action': action_check_min_max(),
'min_val': 0})
# ICMP Start Ping
PING_AMOUNT = ArgumentPack(
['--amount'],
{'help': 'Amount of pings to sent. Default is 5.',
'dest': 'ping_amount',
'required': False,
'action': action_check_min_max(),
'min_val': 0})
PING_PACE = ArgumentPack(
['--pace'],
{'help': 'Pace of pings, in pps. Default is 1.',
'dest': 'ping_pace',
'required': False,
'type': float})
PING_DST = ArgumentPack(
['--dst'],
{'help': 'Destination address. Default is Default Gateway.',
'dest': 'ping_dst',
'required': False,
'conv_type': 'ipv4',
'action': action_conv_type_to_bytes()})
PING_DST_V6 = ArgumentPack(
['--dst'],
{'help': 'Destination address. Default is Default Gateway.',
'dest': 'pingv6_dst',
'required': False,
'conv_type': 'ipv6',
'action': action_conv_type_to_bytes()})
PING_SRC_V6 = ArgumentPack(
['--src'],
{'help': 'Source address.',
'dest': 'pingv6_src',
'required': False,
'conv_type': 'ipv6',
'action': action_conv_type_to_bytes()})
PING_SIZE = ArgumentPack(
['--size'],
{'help': 'Size of the ICMPv4/v6 payload, in bytes. Minimal and default is 16.',
'dest': 'ping_size',
'required': False,
'action': action_check_min_max(),
'min_val': 0})
#DNS Query
DNS_DOMAIN_NAME = ArgumentPack(
['-d', '--domain'],
{'help': 'Dns Domain',
'required': True,
'type': str})
DNS_QUERY_NAME = ArgumentPack(
['-n', '--name'],
{'help': 'Hostname/Domain to query',
'required': True,
'type': str})
DNS_QUERY_TYPE = ArgumentPack(
['-t', '--type'],
{'help': 'DNS/mDNS query type',
'dest': 'dns_type',
'default': "A",
'choices': ["A", "AAAA","TXT", "PTR"],
'type': str})
DNS_QUERY_CLASS = ArgumentPack(
['-c', '--class'],
{'help': 'DNS/mDNS class type',
'dest': 'dns_class',
'default': "IN",
'type': str})
MDNS_QUERY_IPV6 = ArgumentPack(
['-6', '--ipv6'],
{'help': 'Send query using Ipv6',
'dest': 'ipv6',
'action': 'store_true'}
)
MDNS_HOSTS_LIST = ArgumentPack(
['--hosts'], # -h is taken by help
{'help': 'List of hosts to add/remove from mDNS client',
'dest': 'hosts',
'type': str,
'nargs': '+', # at least one argument must be provided , -h Host1 Host2 Host3
'required': True
}
)
OPTIONS_DB = {}
opt_index = 0
for var_name in dir(OPTIONS_DB_ARGS):
var = getattr(OPTIONS_DB_ARGS, var_name)
if type(var) is ArgumentPack:
opt_index += 1
OPTIONS_DB[opt_index] = var
exec('%s = %d' % (var_name, opt_index))
class OPTIONS_DB_GROUPS:
ASTF_CLIENT_CTRL = ArgumentGroup(
MUTEX,
[
ASTF_CLIENTS,
ASTF_SERVERS_ONLY,
],
{'required': False})
SCAPY_PKT_CMD = ArgumentGroup(
MUTEX,
[
SCAPY_PKT,
SHOW_LAYERS
],
{'required': True})
IPV6_OPTS_CMD = ArgumentGroup(
MUTEX,
[
IPV6_OFF,
IPV6_AUTO,
IPV6_SRC
],
{'required': True})
# advanced options
PORT_LIST_WITH_ALL = ArgumentGroup(
MUTEX,
[
PORT_LIST,
ALL_PORTS
],
{'required': False})
# advanced options
PROFILE_LIST_WITH_ALL = ArgumentGroup(
MUTEX,
[
PROFILE_LIST,
ALL_PROFILES
],
{'required': False})
VLAN_CFG = ArgumentGroup(
MUTEX,
[
VLAN_TAGS,
CLEAR_VLAN
],
{'required': True})
STREAM_FROM_PATH_OR_FILE = ArgumentGroup(
MUTEX,
[
FILE_PATH,
FILE_FROM_DB
],
{'required': True})
STL_STATS = ArgumentGroup(
MUTEX,
[
GLOBAL_STATS,
PORT_STATS,
PORT_STATUS,
STREAMS_STATS,
LATENCY_STATS,
LATENCY_HISTOGRAM,
CPU_STATS,
MBUF_STATS,
EXTENDED_STATS,
EXTENDED_INC_ZERO_STATS,
],
{})
ASTF_STATS_GROUP = ArgumentGroup(
MUTEX,
[
GLOBAL_STATS,
PORT_STATS,
PORT_STATUS,
LATENCY_STATS,
LATENCY_HISTOGRAM,
LATENCY_COUNTERS,
CPU_STATS,
MBUF_STATS,
EXTENDED_STATS,
EXTENDED_INC_ZERO_STATS,
ASTF_STATS,
ASTF_INC_ZERO_STATS,
],
{})
CORE_MASK_GROUP = ArgumentGroup(
MUTEX,
[
PIN_CORES,
CORE_MASK
],
{'required': False})
CAPTURE_PORTS_GROUP = ArgumentGroup(
NON_MUTEX,
[
TX_PORT_LIST,
RX_PORT_LIST
],
{})
MONITOR_TYPE = ArgumentGroup(
MUTEX,
[
MONITOR_TYPE_VERBOSE,
MONITOR_TYPE_PIPE],
{'required': False})
SERVICE_GROUP = ArgumentGroup(
NON_MUTEX,
[
SERVICE_BGP_FILTERED,
SERVICE_DHCP_FILTERED,
SERVICE_MDNS_FILTERED,
SERVICE_EMU_FILTERED,
SERVICE_TRAN_FILTERED,
SERVICE_NO_TCP_UDP_FILTERED,
SERVICE_ALL_FILTERED,
SERVICE_OFF
],{})
# EMU Groups
EMU_NS_GROUP = ArgumentGroup(
NON_MUTEX,
[
SINGLE_PORT_REQ,
VLAN_TAGS,
VLAN_TPIDS,
],{})
EMU_NS_GROUP_NOT_REQ = ArgumentGroup(
NON_MUTEX,
[
SINGLE_PORT_NOT_REQ,
VLAN_TAGS,
VLAN_TPIDS,
],{})
EMU_CLIENT_GROUP = ArgumentGroup(
NON_MUTEX,
[
MAC_ADDRESS,
CLIENT_IPV4,
CLIENT_DG,
CLIENT_IPV6,
],{})
EMU_MAX_SHOW = ArgumentGroup(
NON_MUTEX,
[
SHOW_MAX_CLIENTS,
SHOW_MAX_NS,
],{})
EMU_SHOW_CLIENT_OPTIONS = ArgumentGroup(
NON_MUTEX,
[
SHOW_IPV6_DATA,
SHOW_IPV6_ROUTER,
SHOW_IPV4_DG,
SHOW_IPV6_DG
],{})
EMU_SHOW_CNT_GLOBAL_GROUP = ArgumentGroup(
NON_MUTEX,
[
MONITOR_TYPE_VERBOSE,
COUNTERS_TABLES,
COUNTERS_HEADERS,
COUNTERS_CLEAR,
COUNTERS_TYPE,
COUNTERS_SHOW_ZERO,
], {}
)
EMU_SHOW_CNT_GROUP = ArgumentGroup(
NON_MUTEX,
[
MONITOR_TYPE_VERBOSE,
COUNTERS_TABLES,
COUNTERS_HEADERS,
COUNTERS_CLEAR,
COUNTERS_TYPE,
COUNTERS_SHOW_ZERO,
], {}
)
EMU_DUMPS_OPT = ArgumentGroup(
MUTEX,
[
TO_JSON,
TO_YAML
], {}
)
EMU_ICMP_PING_PARAMS = ArgumentGroup(
NON_MUTEX,
[
PING_AMOUNT,
PING_PACE,
PING_DST,
PING_SIZE,
], {}
)
EMU_ICMPv6_PING_PARAMS = ArgumentGroup(
NON_MUTEX,
[
PING_AMOUNT,
PING_PACE,
PING_DST_V6,
PING_SRC_V6,
PING_SIZE,
], {}
)
for var_name in dir(OPTIONS_DB_GROUPS):
var = getattr(OPTIONS_DB_GROUPS, var_name)
if type(var) is ArgumentGroup:
opt_index += 1
OPTIONS_DB[opt_index] = var
exec('%s = %d' % (var_name, opt_index))
class _MergeAction(argparse._AppendAction):
def __call__(self, parser, namespace, values, option_string=None):
items = getattr(namespace, self.dest)
if not items:
items = values
elif type(items) is list and type(values) is list:
items.extend(values)
elif type(items) is dict and type(values) is dict: # tunables are dict
items.update(values)
else:
raise Exception("Argparser 'merge' option should be used on dict or list.")
setattr(namespace, self.dest, items)
class CCmdArgParser(argparse.ArgumentParser):
def __init__(self, client = None, *args, **kwargs):
super(CCmdArgParser, self).__init__(*args, **kwargs)
self.client = client
self.cmd_name = kwargs.get('prog')
self.register('action', 'merge', _MergeAction)
def add_arg_list (self, *args):
populate_parser(self, *args)
# a simple hook for add subparsers to add stateless client
def add_subparsers(self, *args, **kwargs):
sub = super(CCmdArgParser, self).add_subparsers(*args, **kwargs)
# save pointer to the original add parser method
add_parser = sub.add_parser
client = self.client
def add_parser_hook (self, *args, **kwargs):
parser = add_parser(self, *args, **kwargs)
parser.client = client
return parser
# override with the hook
sub.add_parser = add_parser_hook
def remove_parser(name):
if name in sub._name_parser_map:
del sub._name_parser_map[name]
for action in sub._choices_actions:
if action.dest == name:
sub._choices_actions.remove(action)
else:
self._print_message(bold('Subparser "%s" does not exist!' % name))
sub.remove_parser = remove_parser
sub.has_parser = lambda name: name in sub._name_parser_map
sub.get_parser = lambda name: sub._name_parser_map.get(name)
return sub
# hook this to the logger
def _print_message(self, message, file=None):
self.client.logger.info(message)
def error(self, message):
self.print_usage()
self._print_message(('%s: error: %s\n') % (self.prog, message))
raise ValueError(message)
def has_ports_cfg (self, opts):
return hasattr(opts, "all_ports") or hasattr(opts, "ports") or hasattr(opts, "all_profiles")
def parse_args(self, args=None, namespace=None, default_ports=None, verify_acquired=False, allow_empty=True):
try:
opts = super(CCmdArgParser, self).parse_args(args, namespace)
if opts is None:
raise TRexError("'{0}' - invalid arguments".format(self.cmd_name))
if not self.has_ports_cfg(opts):
return opts
opts.ports = listify(opts.ports)
# explicit -a means ALL ports
if (getattr(opts, "all_ports", None) == True):
opts.ports = self.client.get_all_ports()
# explicit -a means ALL profiles
elif (getattr(opts, "all_profiles", None) == True):
opts.ports = self.client.get_profiles_with_state("all")
# default ports
elif (getattr(opts, "ports", None) == []):
opts.ports = self.client.get_acquired_ports() if default_ports is None else default_ports
opts.ports = list_remove_dup(opts.ports)
# validate the ports state
if verify_acquired:
self.client.psv.validate(self.cmd_name, opts.ports, PSV_ACQUIRED, allow_empty = allow_empty)
else:
self.client.psv.validate(self.cmd_name, opts.ports, allow_empty = allow_empty)
return opts
except ValueError as e:
raise TRexConsoleError(str(e))
except SystemExit:
# recover from system exit scenarios, such as "help", or bad arguments.
raise TRexConsoleNoAction()
def formatted_error (self, msg):
self.print_usage()
self._print_message(('%s: error: %s\n') % (self.prog, msg))
def get_flags (opt):
return OPTIONS_DB[opt].name_or_flags
def populate_parser (parser, *args):
for param in args:
try:
if isinstance(param, int):
argument = OPTIONS_DB[param]
else:
argument = param
if isinstance(argument, ArgumentGroup):
if argument.type == MUTEX:
# handle as mutually exclusive group
group = parser.add_mutually_exclusive_group(**argument.options)
for sub_argument in argument.args:
group.add_argument(*OPTIONS_DB[sub_argument].name_or_flags,
**OPTIONS_DB[sub_argument].options)
elif argument.type == NON_MUTEX:
group = parser.add_argument_group(**argument.options)
for sub_argument in argument.args:
group.add_argument(*OPTIONS_DB[sub_argument].name_or_flags,
**OPTIONS_DB[sub_argument].options)
else:
raise Exception('Invalid ArgumentGroup type, should be either MUTEX or NON_MUTEX')
elif isinstance(argument, ArgumentPack):
parser.add_argument(*argument.name_or_flags,
**argument.options)
else:
raise Exception('Invalid arg object, should be ArgumentGroup or ArgumentPack, got: %s' % type(argument))
except KeyError as e:
cause = e.args[0]
raise KeyError("The attribute '{0}' is missing as a field of the {1} option.\n".format(cause, param))
def gen_parser(client, op_name, description, *args, **kw):
parser = CCmdArgParser(client, prog=op_name, conflict_handler='resolve',
description=description, **kw)
populate_parser(parser, *args)
return parser
if __name__ == "__main__":
pass
| 29.166263
| 150
| 0.527588
|
cc6cdec04e4e2af803177dfe43e73c8980dbd3b6
| 803
|
py
|
Python
|
tests/test_utils.py
|
PGBI/Surprise
|
76e47037675afc6c0fb017490a88d1b2b2dff0f7
|
[
"BSD-3-Clause"
] | 5,572
|
2016-11-24T08:21:53.000Z
|
2022-03-31T20:35:00.000Z
|
tests/test_utils.py
|
daihui-lu/Surprise
|
46b9914995e6c8c7d227b46f2eaeef2d4600580f
|
[
"BSD-3-Clause"
] | 393
|
2016-11-22T12:48:00.000Z
|
2022-03-26T15:09:53.000Z
|
tests/test_utils.py
|
daihui-lu/Surprise
|
46b9914995e6c8c7d227b46f2eaeef2d4600580f
|
[
"BSD-3-Clause"
] | 1,096
|
2016-12-08T22:01:57.000Z
|
2022-03-29T03:55:54.000Z
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pytest
from surprise.utils import get_rng
def test_get_rng():
# assert two RNG with same int are the same
rng_a = get_rng(12)
rng_b = get_rng(12)
a = [rng_a.rand() for _ in range(10)]
b = [rng_b.rand() for _ in range(10)]
assert a == b
# assert passing an int returns the corresponding numpy rng instance
rng_a = get_rng(12)
rng_b = np.random.RandomState(12)
a = [rng_a.rand() for _ in range(10)]
b = [rng_b.rand() for _ in range(10)]
assert a == b
# Make sure this is ok
get_rng(None)
with pytest.raises(ValueError):
get_rng(23.2)
with pytest.raises(ValueError):
get_rng('bad')
| 23.617647
| 72
| 0.635118
|
70836ca1748caea3d326c0101f9ea4804dc8b95e
| 9,125
|
py
|
Python
|
shops/visualize_gradient_descent/utilities/viz_utils.py
|
vb690/machine_learning_exercises
|
9c5473652b576a3b0f0bd1df81e24166ca01c2b7
|
[
"MIT"
] | null | null | null |
shops/visualize_gradient_descent/utilities/viz_utils.py
|
vb690/machine_learning_exercises
|
9c5473652b576a3b0f0bd1df81e24166ca01c2b7
|
[
"MIT"
] | 1
|
2021-04-26T19:06:06.000Z
|
2021-04-26T19:06:06.000Z
|
shops/visualize_gradient_descent/utilities/viz_utils.py
|
vb690/machine_learning_exercises
|
9c5473652b576a3b0f0bd1df81e24166ca01c2b7
|
[
"MIT"
] | 1
|
2021-04-22T12:14:59.000Z
|
2021-04-22T12:14:59.000Z
|
import os
from tqdm import tqdm
import numpy as np
from scipy.interpolate import griddata
from sklearn.preprocessing import KBinsDiscretizer
import imageio
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib
def save_3D_animation(embeddings, emb_space_sizes, train_losses, test_losses,
opt_name, n_bins=10, horizon_size=10, cmap_name='jet',
**plotting_kwargs):
"""Utility function for visualizing the changes in weights over time in
UMAP space. The visualization is in 3D for better appreciating the descent
on the error surface.
Args:
- embeddings: list of embeddings, result of alligned UMAP
- emb_space_sizes: list of arrays, define the limits of the
embedding space for the three layers of the MLP.
- train_losses: list, training losses history.
- test_losses: list, test losses.
- opt_name: string, name of the optimizer used.
- n_bins: int, number of bins for discretizing the training loss.
- horizon_size: int, maximum number of points simultaneously
on screen.
- cmap_name: string, name of the colormap used for representing
the change in train losses.
- **plotting_kwargs: keyword arguments, keyword arguments for the
plotting function.
Returns:
- None
"""
discretizer = KBinsDiscretizer(
n_bins=n_bins,
encode='ordinal',
strategy='uniform'
)
cmap = matplotlib.cm.get_cmap(cmap_name)
colors = np.array(train_losses)
colors = discretizer.fit_transform(colors.reshape(-1, 1)).flatten()
norm = plt.Normalize(colors.min(), colors.max())
for i in tqdm(range(embeddings[0].shape[0])):
fig, axs = plt.subplots(
1,
3,
figsize=(30, 10),
subplot_kw=dict(projection='3d')
)
for index, emb in enumerate(embeddings):
min_sizes, max_sizes = emb_space_sizes[index]
past_horizon = max(0, i - horizon_size)
axs[index].scatter(
emb[past_horizon:i, 0],
emb[past_horizon:i, 1],
train_losses[past_horizon:i],
c=[cmap(norm(color)) for color in colors[past_horizon:i]],
**plotting_kwargs
)
# PLOT ON THE 2D FACES
axs[index].plot(
xs=emb[past_horizon:i, 0], # x=x
ys=train_losses[past_horizon:i], # y=z
c='grey',
zdir='y',
zs=max_sizes[1],
linewidth=5,
alpha=0.25
)
axs[index].plot(
xs=emb[past_horizon:i, 1], # x=y
ys=train_losses[past_horizon:i], # y=z
c='grey',
zdir='x',
linewidth=5,
alpha=0.25,
zs=min_sizes[0]
)
axs[index].plot(
xs=emb[past_horizon:i, 0], # x=x
ys=emb[past_horizon:i, 1], # y=y
c='grey',
zdir='z',
linewidth=5,
alpha=0.25,
zs=min_sizes[2]
)
axs[index].text2D(
0.05,
0.95,
f'Layer {index+1}',
transform=axs[index].transAxes
)
if index == 1:
axs[index].text2D(
0.5,
1.1,
f'Optimizer: {opt_name} \
\nTrain Loss: {round(train_losses[i], 3)} \
\n Test Loss: {round(test_losses[i], 3)}',
transform=axs[index].transAxes
)
elif index == 2:
axs[index].set_xlabel('Weights Space \n UMAP 1')
axs[index].set_ylabel('Weights Space \n UMAP 2')
axs[index].set_zlabel('Trainining Loss')
if not os.path.exists(f'results\\3D_{opt_name}'):
os.makedirs(f'results\\3D_{opt_name}')
plt.savefig(f'results\\3D_{opt_name}\\{i}.png', bbox_inches='tight')
plt.close('all')
return None
def save_2D_animation(embeddings, target_optimizers, emb_space_sizes,
total_train_losses, total_test_losses,
n_bins=100, cmap_name='jet', **plotting_kwargs):
"""Utility function for visualizing the changes in weights over time in
UMAP space. The visualization is in 2D for better appreciating the global
loss surface.
Args:
- embeddings: list of embeddings, result of alligned UMAP
- target_optimizers: list of strings, name of the optimizers
considered.
- emb_space_sizes: list of arrays, define the limits of the
embedding space for the three layers of the MLP.
- total_train_losses: list, training losses history.
- total_test_losses: list, test losses.
- n_bins: int, number of bins for discretizing the training loss.
- cmap_name: string, name of the colormap used for representing
the change in train losses.
- **plotting_kwargs: keyword arguments, keyword arguments for the
plotting function.
Returns:
- None
"""
fig, axs = plt.subplots(1, 3, figsize=(15, 5))
axs = axs.flatten()
Z = np.array(total_train_losses).flatten()
for layer, emb in enumerate(embeddings):
x = emb[:, 0]
y = emb[:, 1]
xi = np.linspace(
x.min(),
x.max(),
1000
)
yi = np.linspace(
y.min(),
y.max(),
1000
)
x_grid, Y_grid = np.meshgrid(xi, yi)
zi = griddata(
(x, y),
Z,
(xi[None, :], yi[:, None]),
method='linear'
)
zi = np.nan_to_num(zi, nan=Z.mean())
cont = axs[layer].contourf(
x_grid,
Y_grid,
zi,
cmap=cmap_name,
levels=n_bins,
vmin=Z.min(),
vmax=Z.max()
)
fig.subplots_adjust(right=0.85)
cbar_ax = fig.add_axes([0.88, 0.15, 0.04, 0.7])
fig.colorbar(
cont,
cax=cbar_ax,
label='Training Loss'
)
for index, opt_name in enumerate(target_optimizers):
print(f'Saving Optimizer {opt_name}')
emb_size = len(total_test_losses[index])
start = emb_size * index
stop = start + emb_size
embs = [emb[start:stop] for emb in embeddings]
for ax_idx, ax in enumerate(axs):
ax.set_title(
f'Layer {ax_idx + 1} \
\nOptimizer: {opt_name}'
)
if ax_idx == 0:
ax.set_ylabel('Weights Space \n UMAP 2')
ax.set_xlabel('Weights Space \n UMAP 1')
else:
ax.set_xlabel('Weights Space \n UMAP 1')
for i in tqdm(range(embs[0].shape[0])):
point_1 = axs[0].scatter(
embs[0][i, 0],
embs[0][i, 1],
marker="*",
c='white',
edgecolor='k',
s=60
)
point_2 = axs[1].scatter(
embs[1][i, 0],
embs[1][i, 1],
c='white',
marker="*",
edgecolor='k',
s=60
)
point_3 = axs[2].scatter(
embs[2][i, 0],
embs[2][i, 1],
c='white',
marker="*",
edgecolor='k',
s=60
)
if not os.path.exists(f'results\\2D_{opt_name}'):
os.makedirs(f'results\\2D_{opt_name}')
plt.savefig(
f'results\\2D_{opt_name}\\{i}.png',
bbox_inches='tight'
)
point_1.remove()
point_2.remove()
point_3.remove()
return None
def generating_movies(optimizers):
"""Generating MP4 movies from images visualized by various optimizers.
Args:
optimizers: list of string, names of the optimizers for which
"""
for optimizer in optimizers:
for modality in ['2D', '3D']:
writer = imageio.get_writer(
f'results//{modality}_{optimizer}.mp4',
format='FFMPEG',
mode='I',
fps=30
)
print(f'Generating movie for {modality}_{optimizer}')
n_frames = len(os.listdir(f'results//{modality}_{optimizer}'))
for frame in tqdm(range(n_frames)):
writer.append_data(
imageio.imread(
f'results//movies//{modality}_{optimizer}//{frame}.png'
)
)
writer.close()
return None
| 30.723906
| 79
| 0.50137
|
154c38edaf6b52d0bb1f0a5d92cf27de97e3f13a
| 674
|
py
|
Python
|
Other_Hackings/useful_scripts/binary_dot_symbols.py
|
hacky1997/My-Gray-Hacker-Resources
|
e9b10ac7b0e557a9e624a5a6e761f9af4488d777
|
[
"MIT"
] | 14
|
2017-06-14T06:10:07.000Z
|
2019-02-22T03:21:15.000Z
|
Other_Hackings/useful_scripts/binary_dot_symbols.py
|
rookie-12/My-Gray-Hacker-Resources
|
e9b10ac7b0e557a9e624a5a6e761f9af4488d777
|
[
"MIT"
] | 1
|
2021-04-30T21:19:32.000Z
|
2021-04-30T21:19:32.000Z
|
Other_Hackings/useful_scripts/binary_dot_symbols.py
|
rookie-12/My-Gray-Hacker-Resources
|
e9b10ac7b0e557a9e624a5a6e761f9af4488d777
|
[
"MIT"
] | 7
|
2019-02-18T10:19:43.000Z
|
2020-05-15T16:15:39.000Z
|
'''
.. . .. . . .. ... ... . . ..... .. . .. .. ... .
....... ....... ....... ....... ....... ....... ....... .......
.. . .. . . .. . . ..... .. .... .. .. . ..... .. ...
....... ....... ....... ....... ....... ....... ....... .......
.. .... . ..... .. ... .. . .. .... ... .. ... . ... ..
....... ....... ....... ....... ....... ....... ....... .......
'''
s="""1100001 1101001 1101110 1110100 1011111 1100001 1100110 1110010 1100001 1101001 1100100 1011111 1101111 1100110 1011111 1101110 1101111 1011111 1100111 1101000 1101111 1110011 1110100 1110011"""
a = ''.join(chr(int(i, 2)) for i in s.replace("\n"," ").split(' '))
print a
| 37.444444
| 199
| 0.308605
|
6d47bf9dc8bdc8db531deaa91251fb39edb927dc
| 44,622
|
py
|
Python
|
setup.py
|
pydsigner/kivy
|
36dab10b896bd3c442fb0a66416033104ec46f46
|
[
"MIT"
] | 1
|
2020-10-27T13:44:54.000Z
|
2020-10-27T13:44:54.000Z
|
setup.py
|
pydsigner/kivy
|
36dab10b896bd3c442fb0a66416033104ec46f46
|
[
"MIT"
] | null | null | null |
setup.py
|
pydsigner/kivy
|
36dab10b896bd3c442fb0a66416033104ec46f46
|
[
"MIT"
] | null | null | null |
#
# Kivy - Cross-platform UI framework
# https://kivy.org/
#
import sys
build_examples = False
if "--build_examples" in sys.argv:
build_examples = True
sys.argv.remove("--build_examples")
from kivy.utils import pi_version
from copy import deepcopy
import os
from os.path import join, dirname, sep, exists, basename, isdir
from os import walk, environ, makedirs
from distutils.command.build_ext import build_ext
from distutils.version import LooseVersion
from distutils.sysconfig import get_python_inc
from collections import OrderedDict
from time import sleep
from sysconfig import get_paths
from pathlib import Path
import logging
from setuptools import setup, Extension, find_packages
if sys.version_info[0] == 2:
logging.critical(
'Unsupported Python version detected!: Kivy 2.0.0 and higher does not '
'support Python 2. Please upgrade to Python 3, or downgrade Kivy to '
'1.11.1 - the last Kivy release that still supports Python 2.')
def ver_equal(self, other):
return self.version == other
# fix error with py3's LooseVersion comparisons
LooseVersion.__eq__ = ver_equal
def get_description():
with open(join(dirname(__file__), 'README.md'), 'rb') as fileh:
return fileh.read().decode("utf8").replace('\r\n', '\n')
def getoutput(cmd, env=None):
import subprocess
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
p.wait()
if p.returncode: # if not returncode == 0
print('WARNING: A problem occurred while running {0} (code {1})\n'
.format(cmd, p.returncode))
stderr_content = p.stderr.read()
if stderr_content:
print('{0}\n'.format(stderr_content))
return ""
return p.stdout.read()
def pkgconfig(*packages, **kw):
flag_map = {'-I': 'include_dirs', '-L': 'library_dirs', '-l': 'libraries'}
lenviron = None
pconfig = join(sys.prefix, 'libs', 'pkgconfig')
if isdir(pconfig):
lenviron = environ.copy()
lenviron['PKG_CONFIG_PATH'] = '{};{}'.format(
environ.get('PKG_CONFIG_PATH', ''), pconfig)
cmd = 'pkg-config --libs --cflags {}'.format(' '.join(packages))
results = getoutput(cmd, lenviron).split()
for token in results:
ext = token[:2].decode('utf-8')
flag = flag_map.get(ext)
if not flag:
continue
kw.setdefault(flag, []).append(token[2:].decode('utf-8'))
return kw
def get_isolated_env_paths():
try:
# sdl2_dev is installed before setup.py is run, when installing from
# source due to pyproject.toml. However, it is installed to a
# pip isolated env, which we need to add to compiler
import kivy_deps.sdl2_dev as sdl2_dev
except ImportError:
return [], []
root = os.path.abspath(join(sdl2_dev.__path__[0], '../../../..'))
includes = [join(root, 'Include')] if isdir(join(root, 'Include')) else []
libs = [join(root, 'libs')] if isdir(join(root, 'libs')) else []
return includes, libs
# -----------------------------------------------------------------------------
# Determine on which platform we are
build_examples = build_examples or \
os.environ.get('KIVY_BUILD_EXAMPLES', '0') == '1'
platform = sys.platform
# Detect 32/64bit for OSX (http://stackoverflow.com/a/1405971/798575)
if sys.platform == 'darwin':
if sys.maxsize > 2 ** 32:
osx_arch = 'x86_64'
else:
osx_arch = 'i386'
# Detect Python for android project (http://github.com/kivy/python-for-android)
ndkplatform = environ.get('NDKPLATFORM')
if ndkplatform is not None and environ.get('LIBLINK'):
platform = 'android'
kivy_ios_root = environ.get('KIVYIOSROOT', None)
if kivy_ios_root is not None:
platform = 'ios'
# proprietary broadcom video core drivers
if exists('/opt/vc/include/bcm_host.h'):
# The proprietary broadcom video core drivers are not available on the
# Raspberry Pi 4
if (pi_version or 4) < 4:
platform = 'rpi'
# use mesa video core drivers
if environ.get('VIDEOCOREMESA', None) == '1':
platform = 'vc'
mali_paths = (
'/usr/lib/arm-linux-gnueabihf/libMali.so',
'/usr/lib/arm-linux-gnueabihf/mali-egl/libmali.so',
'/usr/local/mali-egl/libmali.so')
if any((exists(path) for path in mali_paths)):
platform = 'mali'
# Needed when cross-compiling
if environ.get('KIVY_CROSS_PLATFORM'):
platform = environ.get('KIVY_CROSS_PLATFORM')
# -----------------------------------------------------------------------------
# Detect options
#
c_options = OrderedDict()
c_options['use_rpi'] = platform == 'rpi'
c_options['use_egl'] = False
c_options['use_opengl_es2'] = None
c_options['use_opengl_mock'] = environ.get('READTHEDOCS', None) == 'True'
c_options['use_sdl2'] = None
c_options['use_pangoft2'] = None
c_options['use_ios'] = False
c_options['use_android'] = False
c_options['use_mesagl'] = False
c_options['use_x11'] = False
c_options['use_wayland'] = False
c_options['use_gstreamer'] = None
c_options['use_avfoundation'] = platform in ['darwin', 'ios']
c_options['use_osx_frameworks'] = platform == 'darwin'
c_options['debug_gl'] = False
# Set the alpha size, this will be 0 on the Raspberry Pi and 8 on all other
# platforms, so SDL2 works without X11
c_options['kivy_sdl_gl_alpha_size'] = 8 if pi_version is None else 0
# now check if environ is changing the default values
for key in list(c_options.keys()):
ukey = key.upper()
if ukey in environ:
# kivy_sdl_gl_alpha_size should be an integer, the rest are booleans
value = int(environ[ukey])
if key != 'kivy_sdl_gl_alpha_size':
value = bool(value)
print('Environ change {0} -> {1}'.format(key, value))
c_options[key] = value
use_embed_signature = environ.get('USE_EMBEDSIGNATURE', '0') == '1'
use_embed_signature = use_embed_signature or bool(
platform not in ('ios', 'android'))
# -----------------------------------------------------------------------------
# We want to be able to install kivy as a wheel without a dependency
# on cython, but we also want to use cython where possible as a setup
# time dependency through `pyproject.toml` if building from source.
# There are issues with using cython at all on some platforms;
# exclude them from using or declaring cython.
# This determines whether Cython specific functionality may be used.
can_use_cython = True
if platform in ('ios', 'android'):
# NEVER use or declare cython on these platforms
print('Not using cython on %s' % platform)
can_use_cython = False
# -----------------------------------------------------------------------------
# Setup classes
# the build path where kivy is being compiled
src_path = build_path = dirname(__file__)
print("Current directory is: {}".format(os.getcwd()))
print("Source and initial build directory is: {}".format(src_path))
# __version__ is imported by exec, but help linter not complain
__version__ = None
with open(join(src_path, 'kivy', '_version.py'), encoding="utf-8") as f:
exec(f.read())
class KivyBuildExt(build_ext, object):
def __new__(cls, *a, **kw):
# Note how this class is declared as a subclass of distutils
# build_ext as the Cython version may not be available in the
# environment it is initially started in. However, if Cython
# can be used, setuptools will bring Cython into the environment
# thus its version of build_ext will become available.
# The reason why this is done as a __new__ rather than through a
# factory function is because there are distutils functions that check
# the values provided by cmdclass with issublcass, and so it would
# result in an exception.
# The following essentially supply a dynamically generated subclass
# that mix in the cython version of build_ext so that the
# functionality provided will also be executed.
if can_use_cython:
from Cython.Distutils import build_ext as cython_build_ext
build_ext_cls = type(
'KivyBuildExt', (KivyBuildExt, cython_build_ext), {})
return super(KivyBuildExt, cls).__new__(build_ext_cls)
else:
return super(KivyBuildExt, cls).__new__(cls)
def finalize_options(self):
retval = super(KivyBuildExt, self).finalize_options()
# Build the extensions in parallel if the options has not been set
if hasattr(self, 'parallel') and self.parallel is None:
# Use a maximum of 4 cores. If cpu_count returns None, then parallel
# build will be disabled
self.parallel = min(4, os.cpu_count() or 0)
if self.parallel:
print('Building extensions in parallel using {} cores'.format(
self.parallel))
global build_path
if (self.build_lib is not None and exists(self.build_lib) and
not self.inplace):
build_path = self.build_lib
print("Updated build directory to: {}".format(build_path))
return retval
def build_extensions(self):
# build files
config_h_fn = ('include', 'config.h')
config_pxi_fn = ('include', 'config.pxi')
config_py_fn = ('setupconfig.py', )
# generate headers
config_h = '// Autogenerated file for Kivy C configuration\n'
config_h += '#define __PY3 1\n'
config_pxi = '# Autogenerated file for Kivy Cython configuration\n'
config_pxi += 'DEF PY3 = 1\n'
config_py = '# Autogenerated file for Kivy configuration\n'
config_py += 'PY3 = 1\n'
config_py += 'CYTHON_MIN = {0}\nCYTHON_MAX = {1}\n'.format(
repr(MIN_CYTHON_STRING), repr(MAX_CYTHON_STRING))
config_py += 'CYTHON_BAD = {0}\n'.format(repr(', '.join(map(
str, CYTHON_UNSUPPORTED))))
# generate content
print('Build configuration is:')
for opt, value in c_options.items():
# kivy_sdl_gl_alpha_size is already an integer
if opt != 'kivy_sdl_gl_alpha_size':
value = int(bool(value))
print(' * {0} = {1}'.format(opt, value))
opt = opt.upper()
config_h += '#define __{0} {1}\n'.format(opt, value)
config_pxi += 'DEF {0} = {1}\n'.format(opt, value)
config_py += '{0} = {1}\n'.format(opt, value)
debug = bool(self.debug)
print(' * debug = {0}'.format(debug))
config_pxi += 'DEF DEBUG = {0}\n'.format(debug)
config_py += 'DEBUG = {0}\n'.format(debug)
config_pxi += 'DEF PLATFORM = "{0}"\n'.format(platform)
config_py += 'PLATFORM = "{0}"\n'.format(platform)
for fn, content in (
(config_h_fn, config_h), (config_pxi_fn, config_pxi),
(config_py_fn, config_py)):
build_fn = expand(build_path, *fn)
if self.update_if_changed(build_fn, content):
print('Updated {}'.format(build_fn))
src_fn = expand(src_path, *fn)
if src_fn != build_fn and self.update_if_changed(src_fn, content):
print('Updated {}'.format(src_fn))
c = self.compiler.compiler_type
print('Detected compiler is {}'.format(c))
if c != 'msvc':
for e in self.extensions:
e.extra_link_args += ['-lm']
super(KivyBuildExt, self).build_extensions()
def update_if_changed(self, fn, content):
need_update = True
if exists(fn):
with open(fn) as fd:
need_update = fd.read() != content
if need_update:
directory_name = dirname(fn)
if not exists(directory_name):
makedirs(directory_name)
with open(fn, 'w') as fd:
fd.write(content)
return need_update
def _check_and_fix_sdl2_mixer(f_path):
# Between SDL_mixer 2.0.1 and 2.0.4, the included frameworks changed
# smpeg2 have been replaced with mpg123, but there is no need to fix.
smpeg2_path = ("{}/Versions/A/Frameworks/smpeg2.framework"
"/Versions/A/smpeg2").format(f_path)
if not exists(smpeg2_path):
return
print("Check if SDL2_mixer smpeg2 have an @executable_path")
rpath_from = ("@executable_path/../Frameworks/SDL2.framework"
"/Versions/A/SDL2")
rpath_to = "@rpath/../../../../SDL2.framework/Versions/A/SDL2"
output = getoutput(("otool -L '{}'").format(smpeg2_path)).decode('utf-8')
if "@executable_path" not in output:
return
print("WARNING: Your SDL2_mixer version is invalid")
print("WARNING: The smpeg2 framework embedded in SDL2_mixer contains a")
print("WARNING: reference to @executable_path that will fail the")
print("WARNING: execution of your application.")
print("WARNING: We are going to change:")
print("WARNING: from: {}".format(rpath_from))
print("WARNING: to: {}".format(rpath_to))
getoutput("install_name_tool -change {} {} {}".format(
rpath_from, rpath_to, smpeg2_path))
output = getoutput(("otool -L '{}'").format(smpeg2_path))
if b"@executable_path" not in output:
print("WARNING: Change successfully applied!")
print("WARNING: You'll never see this message again.")
else:
print("WARNING: Unable to apply the changes, sorry.")
# -----------------------------------------------------------------------------
print("Python path is:\n{}\n".format('\n'.join(sys.path)))
# extract version (simulate doc generation, kivy will be not imported)
environ['KIVY_DOC_INCLUDE'] = '1'
import kivy
# Cython check
# on python-for-android and kivy-ios, cython usage is external
from kivy.tools.packaging.cython_cfg import get_cython_versions, get_cython_msg
CYTHON_REQUIRES_STRING, MIN_CYTHON_STRING, MAX_CYTHON_STRING, \
CYTHON_UNSUPPORTED = get_cython_versions()
cython_min_msg, cython_max_msg, cython_unsupported_msg = get_cython_msg()
if can_use_cython:
import Cython
print('\nFound Cython at', Cython.__file__)
cy_version_str = Cython.__version__
cy_ver = LooseVersion(cy_version_str)
print('Detected supported Cython version {}'.format(cy_version_str))
if cy_ver < LooseVersion(MIN_CYTHON_STRING):
print(cython_min_msg)
elif cy_ver in CYTHON_UNSUPPORTED:
print(cython_unsupported_msg)
elif cy_ver > LooseVersion(MAX_CYTHON_STRING):
print(cython_max_msg)
sleep(1)
# extra build commands go in the cmdclass dict {'command-name': CommandClass}
# see tools.packaging.{platform}.build.py for custom build commands for
# portable packages. Also e.g. we use build_ext command from cython if its
# installed for c extensions.
from kivy.tools.packaging.factory import FactoryBuild
cmdclass = {
'build_factory': FactoryBuild,
'build_ext': KivyBuildExt}
try:
# add build rules for portable packages to cmdclass
if platform == 'win32':
from kivy.tools.packaging.win32.build import WindowsPortableBuild
cmdclass['build_portable'] = WindowsPortableBuild
elif platform == 'darwin':
from kivy.tools.packaging.osx.build import OSXPortableBuild
cmdclass['build_portable'] = OSXPortableBuild
except ImportError:
print('User distribution detected, avoid portable command.')
# Detect which opengl version headers to use
if platform in ('android', 'darwin', 'ios', 'rpi', 'mali', 'vc'):
c_options['use_opengl_es2'] = True
elif c_options['use_opengl_es2'] is None:
c_options['use_opengl_es2'] = \
environ.get('KIVY_GRAPHICS', '').lower() == 'gles'
print('Using this graphics system: {}'.format(
['OpenGL', 'OpenGL ES 2'][int(c_options['use_opengl_es2'] or False)]))
# check if we are in a kivy-ios build
if platform == 'ios':
print('Kivy-IOS project environment detect, use it.')
print('Kivy-IOS project located at {0}'.format(kivy_ios_root))
c_options['use_ios'] = True
c_options['use_sdl2'] = True
elif platform == 'android':
c_options['use_android'] = True
elif platform == 'darwin':
if c_options['use_osx_frameworks']:
if osx_arch == "i386":
print("Warning: building with frameworks fail on i386")
else:
print("OSX framework used, force to x86_64 only")
environ["ARCHFLAGS"] = environ.get("ARCHFLAGS", "-arch x86_64")
print("OSX ARCHFLAGS are: {}".format(environ["ARCHFLAGS"]))
# detect gstreamer, only on desktop
# works if we forced the options or in autodetection
if platform not in ('ios', 'android') and (c_options['use_gstreamer']
in (None, True)):
gstreamer_valid = False
if c_options['use_osx_frameworks'] and platform == 'darwin':
# check the existence of frameworks
f_path = '/Library/Frameworks/GStreamer.framework'
if not exists(f_path):
c_options['use_gstreamer'] = False
print('GStreamer framework not found, fallback on pkg-config')
else:
print('GStreamer framework found')
gstreamer_valid = True
c_options['use_gstreamer'] = True
gst_flags = {
'extra_link_args': [
'-F/Library/Frameworks',
'-Xlinker', '-rpath',
'-Xlinker', '/Library/Frameworks',
'-Xlinker', '-headerpad',
'-Xlinker', '190',
'-framework', 'GStreamer'],
'include_dirs': [join(f_path, 'Headers')]}
elif platform == 'win32':
gst_flags = pkgconfig('gstreamer-1.0')
if 'libraries' in gst_flags:
print('GStreamer found via pkg-config')
gstreamer_valid = True
c_options['use_gstreamer'] = True
else:
_includes = get_isolated_env_paths()[0] + [get_paths()['include']]
for include_dir in _includes:
if exists(join(include_dir, 'gst', 'gst.h')):
print('GStreamer found via gst.h')
gstreamer_valid = True
c_options['use_gstreamer'] = True
gst_flags = {
'libraries':
['gstreamer-1.0', 'glib-2.0', 'gobject-2.0']}
break
if not gstreamer_valid:
# use pkg-config approach instead
gst_flags = pkgconfig('gstreamer-1.0')
if 'libraries' in gst_flags:
print('GStreamer found via pkg-config')
c_options['use_gstreamer'] = True
# detect SDL2, only on desktop and iOS, or android if explicitly enabled
# works if we forced the options or in autodetection
sdl2_flags = {}
if platform == 'win32' and c_options['use_sdl2'] is None:
c_options['use_sdl2'] = True
if c_options['use_sdl2'] or (
platform not in ('android',) and c_options['use_sdl2'] is None):
sdl2_valid = False
if c_options['use_osx_frameworks'] and platform == 'darwin':
# check the existence of frameworks
sdl2_valid = True
sdl2_flags = {
'extra_link_args': [
'-F/Library/Frameworks',
'-Xlinker', '-rpath',
'-Xlinker', '/Library/Frameworks',
'-Xlinker', '-headerpad',
'-Xlinker', '190'],
'include_dirs': [],
'extra_compile_args': ['-F/Library/Frameworks']
}
for name in ('SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer'):
f_path = '/Library/Frameworks/{}.framework'.format(name)
if not exists(f_path):
print('Missing framework {}'.format(f_path))
sdl2_valid = False
continue
sdl2_flags['extra_link_args'] += ['-framework', name]
sdl2_flags['include_dirs'] += [join(f_path, 'Headers')]
print('Found sdl2 frameworks: {}'.format(f_path))
if name == 'SDL2_mixer':
_check_and_fix_sdl2_mixer(f_path)
if not sdl2_valid:
c_options['use_sdl2'] = False
print('SDL2 frameworks not found, fallback on pkg-config')
else:
c_options['use_sdl2'] = True
print('Activate SDL2 compilation')
if not sdl2_valid and platform != "ios":
# use pkg-config approach instead
sdl2_flags = pkgconfig('sdl2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer')
if 'libraries' in sdl2_flags:
print('SDL2 found via pkg-config')
c_options['use_sdl2'] = True
# -----------------------------------------------------------------------------
# declare flags
def get_modulename_from_file(filename):
filename = filename.replace(sep, '/')
pyx = '.'.join(filename.split('.')[:-1])
pyxl = pyx.split('/')
while pyxl[0] != 'kivy':
pyxl.pop(0)
if pyxl[1] == 'kivy':
pyxl.pop(0)
return '.'.join(pyxl)
def expand(root, *args):
return join(root, 'kivy', *args)
class CythonExtension(Extension):
def __init__(self, *args, **kwargs):
Extension.__init__(self, *args, **kwargs)
self.cython_directives = {
'c_string_encoding': 'utf-8',
'profile': 'USE_PROFILE' in environ,
'embedsignature': use_embed_signature,
'language_level': 3,
'unraisable_tracebacks': True,
}
# XXX with pip, setuptools is imported before distutils, and change
# our pyx to c, then, cythonize doesn't happen. So force again our
# sources
self.sources = args[1]
def merge(d1, *args):
d1 = deepcopy(d1)
for d2 in args:
for key, value in d2.items():
value = deepcopy(value)
if key in d1:
d1[key].extend(value)
else:
d1[key] = value
return d1
def determine_base_flags():
includes, libs = get_isolated_env_paths()
flags = {
'libraries': [],
'include_dirs': [join(src_path, 'kivy', 'include')] + includes,
'library_dirs': [] + libs,
'extra_link_args': [],
'extra_compile_args': []}
if c_options['use_ios']:
sysroot = environ.get('IOSSDKROOT', environ.get('SDKROOT'))
if not sysroot:
raise Exception('IOSSDKROOT is not set')
flags['include_dirs'] += [sysroot]
flags['extra_compile_args'] += ['-isysroot', sysroot]
flags['extra_link_args'] += ['-isysroot', sysroot]
elif platform.startswith('freebsd'):
flags['include_dirs'] += [join(
environ.get('LOCALBASE', '/usr/local'), 'include')]
flags['library_dirs'] += [join(
environ.get('LOCALBASE', '/usr/local'), 'lib')]
elif platform == 'darwin' and c_options['use_osx_frameworks']:
v = os.uname()
if v[2] >= '13.0.0':
if 'SDKROOT' in environ:
sysroot = join(environ['SDKROOT'], 'System/Library/Frameworks')
else:
# use xcode-select to search on the right Xcode path
# XXX use the best SDK available instead of a specific one
import platform as _platform
xcode_dev = getoutput('xcode-select -p').splitlines()[0]
sdk_mac_ver = '.'.join(_platform.mac_ver()[0].split('.')[:2])
print('Xcode detected at {}, and using OS X{} sdk'.format(
xcode_dev, sdk_mac_ver))
sysroot = join(
xcode_dev.decode('utf-8'),
'Platforms/MacOSX.platform/Developer/SDKs',
'MacOSX{}.sdk'.format(sdk_mac_ver),
'System/Library/Frameworks')
else:
sysroot = ('/System/Library/Frameworks/'
'ApplicationServices.framework/Frameworks')
flags['extra_compile_args'] += ['-F%s' % sysroot]
flags['extra_link_args'] += ['-F%s' % sysroot]
elif platform == 'win32':
flags['include_dirs'] += [get_python_inc(prefix=sys.prefix)]
flags['library_dirs'] += [join(sys.prefix, "libs")]
return flags
def determine_gl_flags():
kivy_graphics_include = join(src_path, 'kivy', 'include')
flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}
base_flags = {'include_dirs': [kivy_graphics_include], 'libraries': []}
cross_sysroot = environ.get('KIVY_CROSS_SYSROOT')
if c_options['use_opengl_mock']:
return flags, base_flags
if platform == 'win32':
flags['libraries'] = ['opengl32', 'glew32']
elif platform == 'ios':
flags['libraries'] = ['GLESv2']
flags['extra_link_args'] = ['-framework', 'OpenGLES']
elif platform == 'darwin':
flags['extra_link_args'] = ['-framework', 'OpenGL', '-arch', osx_arch]
flags['extra_compile_args'] = ['-arch', osx_arch]
elif platform.startswith('freebsd'):
flags['libraries'] = ['GL']
elif platform.startswith('openbsd'):
flags['include_dirs'] = ['/usr/X11R6/include']
flags['library_dirs'] = ['/usr/X11R6/lib']
flags['libraries'] = ['GL']
elif platform == 'android':
flags['include_dirs'] = [join(ndkplatform, 'usr', 'include')]
flags['library_dirs'] = [join(ndkplatform, 'usr', 'lib')]
flags['libraries'] = ['GLESv2']
elif platform == 'rpi':
if not cross_sysroot:
flags['include_dirs'] = [
'/opt/vc/include',
'/opt/vc/include/interface/vcos/pthreads',
'/opt/vc/include/interface/vmcs_host/linux']
flags['library_dirs'] = ['/opt/vc/lib']
brcm_lib_files = (
'/opt/vc/lib/libbrcmEGL.so',
'/opt/vc/lib/libbrcmGLESv2.so')
else:
print("KIVY_CROSS_SYSROOT: " + cross_sysroot)
flags['include_dirs'] = [
cross_sysroot + '/usr/include',
cross_sysroot + '/usr/include/interface/vcos/pthreads',
cross_sysroot + '/usr/include/interface/vmcs_host/linux']
flags['library_dirs'] = [cross_sysroot + '/usr/lib']
brcm_lib_files = (
cross_sysroot + '/usr/lib/libbrcmEGL.so',
cross_sysroot + '/usr/lib/libbrcmGLESv2.so')
if all((exists(lib) for lib in brcm_lib_files)):
print('Found brcmEGL and brcmGLES library files '
'for rpi platform at ' + dirname(brcm_lib_files[0]))
gl_libs = ['brcmEGL', 'brcmGLESv2']
else:
print(
'Failed to find brcmEGL and brcmGLESv2 library files '
'for rpi platform, falling back to EGL and GLESv2.')
gl_libs = ['EGL', 'GLESv2']
flags['libraries'] = ['bcm_host'] + gl_libs
elif platform in ['mali', 'vc']:
flags['include_dirs'] = ['/usr/include/']
flags['library_dirs'] = ['/usr/lib/arm-linux-gnueabihf']
flags['libraries'] = ['GLESv2']
c_options['use_x11'] = True
c_options['use_egl'] = True
else:
flags['libraries'] = ['GL']
return flags, base_flags
def determine_sdl2():
flags = {}
if not c_options['use_sdl2']:
return flags
sdl2_path = environ.get('KIVY_SDL2_PATH', None)
if sdl2_flags and not sdl2_path and platform == 'darwin':
return sdl2_flags
includes, _ = get_isolated_env_paths()
# no pkgconfig info, or we want to use a specific sdl2 path, so perform
# manual configuration
flags['libraries'] = ['SDL2', 'SDL2_ttf', 'SDL2_image', 'SDL2_mixer']
split_chr = ';' if platform == 'win32' else ':'
sdl2_paths = sdl2_path.split(split_chr) if sdl2_path else []
if not sdl2_paths:
sdl2_paths = []
for include in includes + [join(sys.prefix, 'include')]:
sdl_inc = join(include, 'SDL2')
if isdir(sdl_inc):
sdl2_paths.append(sdl_inc)
sdl2_paths.extend(['/usr/local/include/SDL2', '/usr/include/SDL2'])
flags['include_dirs'] = sdl2_paths
flags['extra_link_args'] = []
flags['extra_compile_args'] = []
flags['library_dirs'] = (
sdl2_paths if sdl2_paths else
['/usr/local/lib/'])
if sdl2_flags:
flags = merge(flags, sdl2_flags)
# ensure headers for all the SDL2 and sub libraries are available
libs_to_check = ['SDL', 'SDL_mixer', 'SDL_ttf', 'SDL_image']
can_compile = True
for lib in libs_to_check:
found = False
for d in flags['include_dirs']:
fn = join(d, '{}.h'.format(lib))
if exists(fn):
found = True
print('SDL2: found {} header at {}'.format(lib, fn))
break
if not found:
print('SDL2: missing sub library {}'.format(lib))
can_compile = False
if not can_compile:
c_options['use_sdl2'] = False
return {}
return flags
base_flags = determine_base_flags()
gl_flags, gl_flags_base = determine_gl_flags()
# -----------------------------------------------------------------------------
# sources to compile
# all the dependencies have been found manually with:
# grep -inr -E '(cimport|include)' kivy/graphics/context_instructions.{pxd,pyx}
graphics_dependencies = {
'buffer.pyx': ['common.pxi'],
'context.pxd': ['instructions.pxd', 'texture.pxd', 'vbo.pxd', 'cgl.pxd'],
'cgl.pxd': ['common.pxi', 'config.pxi', 'gl_redirect.h'],
'compiler.pxd': ['instructions.pxd'],
'compiler.pyx': ['context_instructions.pxd'],
'cgl.pyx': ['cgl.pxd'],
'cgl_mock.pyx': ['cgl.pxd'],
'cgl_sdl2.pyx': ['cgl.pxd'],
'cgl_gl.pyx': ['cgl.pxd'],
'cgl_glew.pyx': ['cgl.pxd'],
'context_instructions.pxd': [
'transformation.pxd', 'instructions.pxd', 'texture.pxd'],
'fbo.pxd': ['cgl.pxd', 'instructions.pxd', 'texture.pxd'],
'fbo.pyx': [
'config.pxi', 'opcodes.pxi', 'transformation.pxd', 'context.pxd'],
'gl_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd', 'instructions.pxd'],
'instructions.pxd': [
'vbo.pxd', 'context_instructions.pxd', 'compiler.pxd', 'shader.pxd',
'texture.pxd', '../_event.pxd'],
'instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd',
'context.pxd', 'common.pxi', 'vertex.pxd', 'transformation.pxd'],
'opengl.pyx': [
'config.pxi', 'common.pxi', 'cgl.pxd', 'gl_redirect.h'],
'opengl_utils.pyx': [
'opengl_utils_def.pxi', 'cgl.pxd', ],
'shader.pxd': ['cgl.pxd', 'transformation.pxd', 'vertex.pxd'],
'shader.pyx': [
'config.pxi', 'common.pxi', 'cgl.pxd',
'vertex.pxd', 'transformation.pxd', 'context.pxd',
'gl_debug_logger.pxi'],
'stencil_instructions.pxd': ['instructions.pxd'],
'stencil_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd',
'gl_debug_logger.pxi'],
'scissor_instructions.pyx': [
'config.pxi', 'opcodes.pxi', 'cgl.pxd'],
'svg.pyx': ['config.pxi', 'common.pxi', 'texture.pxd', 'instructions.pxd',
'vertex_instructions.pxd', 'tesselator.pxd'],
'texture.pxd': ['cgl.pxd'],
'texture.pyx': [
'config.pxi', 'common.pxi', 'opengl_utils_def.pxi', 'context.pxd',
'cgl.pxd', 'opengl_utils.pxd',
'img_tools.pxi', 'gl_debug_logger.pxi'],
'vbo.pxd': ['buffer.pxd', 'cgl.pxd', 'vertex.pxd'],
'vbo.pyx': [
'config.pxi', 'common.pxi', 'context.pxd',
'instructions.pxd', 'shader.pxd', 'gl_debug_logger.pxi'],
'vertex.pxd': ['cgl.pxd'],
'vertex.pyx': ['config.pxi', 'common.pxi'],
'vertex_instructions.pyx': [
'config.pxi', 'common.pxi', 'vbo.pxd', 'vertex.pxd',
'instructions.pxd', 'vertex_instructions.pxd',
'cgl.pxd', 'texture.pxd', 'vertex_instructions_line.pxi'],
'vertex_instructions_line.pxi': ['stencil_instructions.pxd']}
sources = {
'_event.pyx': merge(base_flags, {'depends': ['properties.pxd']}),
'_clock.pyx': {},
'weakproxy.pyx': {},
'properties.pyx': merge(base_flags, {'depends': ['_event.pxd']}),
'graphics/buffer.pyx': merge(base_flags, gl_flags_base),
'graphics/context.pyx': merge(base_flags, gl_flags_base),
'graphics/compiler.pyx': merge(base_flags, gl_flags_base),
'graphics/context_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/fbo.pyx': merge(base_flags, gl_flags_base),
'graphics/gl_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/opengl.pyx': merge(base_flags, gl_flags_base),
'graphics/opengl_utils.pyx': merge(base_flags, gl_flags_base),
'graphics/shader.pyx': merge(base_flags, gl_flags_base),
'graphics/stencil_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/scissor_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/texture.pyx': merge(base_flags, gl_flags_base),
'graphics/transformation.pyx': merge(base_flags, gl_flags_base),
'graphics/vbo.pyx': merge(base_flags, gl_flags_base),
'graphics/vertex.pyx': merge(base_flags, gl_flags_base),
'graphics/vertex_instructions.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl_backend/cgl_mock.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl_backend/cgl_gl.pyx': merge(base_flags, gl_flags),
'graphics/cgl_backend/cgl_glew.pyx': merge(base_flags, gl_flags),
'graphics/cgl_backend/cgl_sdl2.pyx': merge(base_flags, gl_flags_base),
'graphics/cgl_backend/cgl_debug.pyx': merge(base_flags, gl_flags_base),
'core/text/text_layout.pyx': base_flags,
'core/window/window_info.pyx': base_flags,
'graphics/tesselator.pyx': merge(base_flags, {
'include_dirs': ['kivy/lib/libtess2/Include'],
'c_depends': [
'lib/libtess2/Source/bucketalloc.c',
'lib/libtess2/Source/dict.c',
'lib/libtess2/Source/geom.c',
'lib/libtess2/Source/mesh.c',
'lib/libtess2/Source/priorityq.c',
'lib/libtess2/Source/sweep.c',
'lib/libtess2/Source/tess.c'
]
}),
'graphics/svg.pyx': merge(base_flags, gl_flags_base)
}
if c_options["use_sdl2"]:
sdl2_flags = determine_sdl2()
if c_options['use_sdl2'] and sdl2_flags:
sources['graphics/cgl_backend/cgl_sdl2.pyx'] = merge(
sources['graphics/cgl_backend/cgl_sdl2.pyx'], sdl2_flags)
sdl2_depends = {'depends': ['lib/sdl2.pxi']}
for source_file in ('core/window/_window_sdl2.pyx',
'core/image/_img_sdl2.pyx',
'core/text/_text_sdl2.pyx',
'core/audio/audio_sdl2.pyx',
'core/clipboard/_clipboard_sdl2.pyx'):
sources[source_file] = merge(
base_flags, sdl2_flags, sdl2_depends)
if c_options['use_pangoft2'] in (None, True) and platform not in (
'android', 'ios', 'win32'):
pango_flags = pkgconfig('pangoft2')
if pango_flags and 'libraries' in pango_flags:
print('Pango: pangoft2 found via pkg-config')
c_options['use_pangoft2'] = True
pango_depends = {'depends': [
'lib/pango/pangoft2.pxi',
'lib/pango/pangoft2.h']}
sources['core/text/_text_pango.pyx'] = merge(
base_flags, pango_flags, pango_depends)
print(sources['core/text/_text_pango.pyx'])
if platform in ('darwin', 'ios'):
# activate ImageIO provider for our core image
if platform == 'ios':
osx_flags = {'extra_link_args': [
'-framework', 'Foundation',
'-framework', 'UIKit',
'-framework', 'AudioToolbox',
'-framework', 'CoreGraphics',
'-framework', 'QuartzCore',
'-framework', 'ImageIO',
'-framework', 'Accelerate']}
else:
osx_flags = {'extra_link_args': [
'-framework', 'ApplicationServices']}
sources['core/image/img_imageio.pyx'] = merge(
base_flags, osx_flags)
if c_options['use_avfoundation']:
import platform as _platform
mac_ver = [int(x) for x in _platform.mac_ver()[0].split('.')[:2]]
if mac_ver >= [10, 7] or platform == 'ios':
osx_flags = {
'extra_link_args': ['-framework', 'AVFoundation'],
'extra_compile_args': ['-ObjC++'],
'depends': ['core/camera/camera_avfoundation_implem.m']}
sources['core/camera/camera_avfoundation.pyx'] = merge(
base_flags, osx_flags)
else:
print('AVFoundation cannot be used, OSX >= 10.7 is required')
if c_options['use_rpi']:
sources['lib/vidcore_lite/egl.pyx'] = merge(
base_flags, gl_flags)
sources['lib/vidcore_lite/bcm.pyx'] = merge(
base_flags, gl_flags)
if c_options['use_x11']:
libs = ['Xrender', 'X11']
if c_options['use_egl']:
libs += ['EGL']
else:
libs += ['GL']
sources['core/window/window_x11.pyx'] = merge(
base_flags, gl_flags, {
# FIXME add an option to depend on them but not compile them
# cause keytab is included in core, and core is included in
# window_x11
#
# 'depends': [
# 'core/window/window_x11_keytab.c',
# 'core/window/window_x11_core.c'],
'libraries': libs})
if c_options['use_gstreamer']:
sources['lib/gstplayer/_gstplayer.pyx'] = merge(
base_flags, gst_flags, {
'depends': ['lib/gstplayer/_gstplayer.h']})
# -----------------------------------------------------------------------------
# extension modules
def get_dependencies(name, deps=None):
if deps is None:
deps = []
for dep in graphics_dependencies.get(name, []):
if dep not in deps:
deps.append(dep)
get_dependencies(dep, deps)
return deps
def resolve_dependencies(fn, depends):
fn = basename(fn)
deps = []
get_dependencies(fn, deps)
get_dependencies(fn.replace('.pyx', '.pxd'), deps)
deps_final = []
paths_to_test = ['graphics', 'include']
for dep in deps:
found = False
for path in paths_to_test:
filename = expand(src_path, path, dep)
if exists(filename):
deps_final.append(filename)
found = True
break
if not found:
print('ERROR: Dependency for {} not resolved: {}'.format(
fn, dep
))
return deps_final
def get_extensions_from_sources(sources):
ext_modules = []
if environ.get('KIVY_FAKE_BUILDEXT'):
print('Fake build_ext asked, will generate only .h/.c')
return ext_modules
for pyx, flags in sources.items():
is_graphics = pyx.startswith('graphics')
pyx = expand(src_path, pyx)
depends = [expand(src_path, x) for x in flags.pop('depends', [])]
c_depends = [expand(src_path, x) for x in flags.pop('c_depends', [])]
if not can_use_cython:
# can't use cython, so use the .c files instead.
pyx = '%s.c' % pyx[:-4]
if is_graphics:
depends = resolve_dependencies(pyx, depends)
f_depends = [x for x in depends if x.rsplit('.', 1)[-1] in (
'c', 'cpp', 'm')]
module_name = get_modulename_from_file(pyx)
flags_clean = {'depends': depends}
for key, value in flags.items():
if len(value):
flags_clean[key] = value
ext_modules.append(CythonExtension(
module_name, [pyx] + f_depends + c_depends, **flags_clean))
return ext_modules
ext_modules = get_extensions_from_sources(sources)
# -----------------------------------------------------------------------------
# automatically detect data files
split_examples = int(environ.get('KIVY_SPLIT_EXAMPLES', '0'))
data_file_prefix = 'share/kivy-'
examples = {}
examples_allowed_ext = ('readme', 'py', 'wav', 'png', 'jpg', 'svg', 'json',
'avi', 'gif', 'txt', 'ttf', 'obj', 'mtl', 'kv', 'mpg',
'glsl', 'zip')
for root, subFolders, files in walk('examples'):
for fn in files:
ext = fn.split('.')[-1].lower()
if ext not in examples_allowed_ext:
continue
filename = join(root, fn)
directory = '%s%s' % (data_file_prefix, dirname(filename))
if directory not in examples:
examples[directory] = []
examples[directory].append(filename)
binary_deps = []
binary_deps_path = join(src_path, 'kivy', 'binary_deps')
if isdir(binary_deps_path):
for root, dirnames, filenames in walk(binary_deps_path):
for fname in filenames:
binary_deps.append(
join(root.replace(binary_deps_path, 'binary_deps'), fname))
def glob_paths(*patterns, excludes=('.pyc', )):
files = []
base = Path(join(src_path, 'kivy'))
for pat in patterns:
for f in base.glob(pat):
if f.suffix in excludes:
continue
files.append(str(f.relative_to(base)))
return files
# -----------------------------------------------------------------------------
# setup !
if not build_examples:
setup(
name='Kivy',
version=__version__,
author='Kivy Team and other contributors',
author_email='kivy-dev@googlegroups.com',
url='http://kivy.org',
license='MIT',
description=(
'A software library for rapid development of '
'hardware-accelerated multitouch applications.'),
long_description=get_description(),
long_description_content_type='text/markdown',
ext_modules=ext_modules,
cmdclass=cmdclass,
packages=find_packages(include=['kivy*']),
package_dir={'kivy': 'kivy'},
package_data={
'kivy':
glob_paths('*.pxd', '*.pxi') +
glob_paths('**/*.pxd', '**/*.pxi') +
glob_paths('data/**/*.*') +
glob_paths('include/**/*.*') +
glob_paths('tools/**/*.*', excludes=('.pyc', '.enc')) +
glob_paths('graphics/**/*.h') +
glob_paths('tests/**/*.*') +
[
'setupconfig.py',
] + binary_deps
},
data_files=[] if split_examples else list(examples.items()),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Artistic Software',
'Topic :: Games/Entertainment',
'Topic :: Multimedia :: Graphics :: 3D Rendering',
'Topic :: Multimedia :: Graphics :: Capture :: Digital Camera',
'Topic :: Multimedia :: Graphics :: Presentation',
'Topic :: Multimedia :: Graphics :: Viewers',
'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',
'Topic :: Multimedia :: Video :: Display',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Scientific/Engineering :: Visualization',
('Topic :: Software Development :: Libraries :: '
'Application Frameworks'),
'Topic :: Software Development :: User Interfaces'])
else:
setup(
name='Kivy-examples',
version=__version__,
author='Kivy Team and other contributors',
author_email='kivy-dev@googlegroups.com',
url='http://kivy.org',
license='MIT',
description=('Kivy examples.'),
long_description_content_type='text/markdown',
long_description=get_description(),
data_files=list(examples.items()))
| 39.073555
| 80
| 0.597105
|
8cd35eae6c57d8b15e8d3ab699d98750dfc1fdc5
| 4,560
|
py
|
Python
|
src/fairtest/modules/metrics/metric.py
|
columbia/fairtest
|
8696051c9276f127ab8b2f437850f845ff0ca786
|
[
"Apache-2.0"
] | 42
|
2017-01-12T13:59:23.000Z
|
2022-03-01T01:44:12.000Z
|
src/fairtest/modules/metrics/metric.py
|
columbia/fairtest
|
8696051c9276f127ab8b2f437850f845ff0ca786
|
[
"Apache-2.0"
] | 3
|
2019-05-24T21:02:51.000Z
|
2019-11-15T15:36:17.000Z
|
src/fairtest/modules/metrics/metric.py
|
columbia/fairtest
|
8696051c9276f127ab8b2f437850f845ff0ca786
|
[
"Apache-2.0"
] | 20
|
2017-01-12T23:07:10.000Z
|
2021-08-11T09:13:50.000Z
|
"""
Abstract Fairness Metric.
"""
import abc
import numpy as np
class Metric(object):
"""
An abstract fairness metric.
"""
__metaclass__ = abc.ABCMeta
# Types of metrics
DATATYPE_CT = 'ct' # Metrics over a contingency table
DATATYPE_CORR = 'corr' # Correlation metrics
DATATYPE_REG = 'reg' # Regression metrics
# this Metric's data type
dataType = None
# max data size for approximate tests
approx_LIMIT_P = None
# max data size for approximate confidence intervals
approx_LIMIT_CI = None
def __init__(self):
self.stats = None
def get_size(self, data):
"""
Returns the size of the data for this metric.
Parameters
----------
data :
the data to be evaluated
Returns
-------
size :
the size of the data
"""
if self.dataType == self.DATATYPE_CT:
size = np.array(data).sum()
elif self.dataType == self.DATATYPE_CORR:
if np.array(data).shape == (6,):
size = data[5]
else:
size = len(data)
else:
size = len(data)
return size
def compute(self, data, conf, exact=True):
"""
Computes a confidence interval and p-value for given data.
Exact methods are used for confidence intervals and p-values when
`exact' is set to `True' and the size of the data is smaller than
respective class attributes `approx_LIMIT_CI' and `approx_LIMIT_P'
Parameters
----------
data :
the data to be evaluated
conf :
the confidence level for confidence intervals
exact :
indicates whether exact methods should be used
Returns
-------
self :
a pointer to the current Metric object. The computed statistics
are stored as an attribute `stats'
"""
size = self.get_size(data)
if not exact or size > min(self.approx_LIMIT_P, self.approx_LIMIT_CI):
try:
ci_low, ci_high, pval = self.approx_stats(data, conf)
except ValueError:
ci_low, ci_high, pval = 0, 0, 10*10
if exact and size <= self.approx_LIMIT_P:
pval = self.exact_test(data)
if exact and size <= self.approx_LIMIT_CI:
ci_low, ci_high = self.exact_ci(data, conf)
self.stats = [ci_low, ci_high, pval]
return self
@abc.abstractmethod
def abs_effect(self):
"""
Converts a confidence interval into an absolute effect size that can
be compared over different contexts.
Returns
-------
effect :
the absolute effect of this Metric
"""
return
@staticmethod
@abc.abstractmethod
def exact_test(data):
"""
Performs an exact test of independence.
Parameters
----------
data :
the data to be evaluated
Returns
-------
pval :
the p-value
"""
return
@staticmethod
@abc.abstractmethod
def validate(sens, output, expl):
"""
Validates the use of this metric for the current investigation.
Parameters
----------
sens :
the sensitive feature
output :
the target feature
expl :
the explanatory feature
"""
return
@staticmethod
@abc.abstractmethod
def exact_ci(data, conf):
"""
Computes an exact confidence interval.
Parameters
----------
data :
the data to be evaluated
conf :
the confidence level
Returns
-------
ci_low :
the lower end of the confidence interval
ci_high :
the higher end of the confidence interval
"""
return
@staticmethod
@abc.abstractmethod
def approx_stats(data, conf):
"""
Computes an approximate confidence interval and p-value.
Parameters
----------
data :
the data to be evaluated
conf :
the confidence level
Returns
-------
ci_low :
the lower end of the confidence interval
ci_high :
the higher end of the confidence interval
pval :
the p-value
"""
return
| 23.626943
| 78
| 0.533553
|
eddd5ad0ad4b59a8bea036f54f43fbf737e9bab7
| 1,379
|
py
|
Python
|
Validation/RecoParticleFlow/python/PFValidationClient_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Validation/RecoParticleFlow/python/PFValidationClient_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Validation/RecoParticleFlow/python/PFValidationClient_cff.py
|
Purva-Chaudhari/cmssw
|
32e5cbfe54c4d809d60022586cf200b7c3020bcf
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from DQMOffline.PFTau.PFClient_cfi import pfClient, pfClientJetRes
#from DQMOffline.PFTau.PFClient_cfi import *
pfJetClient = pfClient.clone(
FolderNames = ['PFJetValidation/CompWithGenJet'],
HistogramNames = ['delta_et_Over_et_VS_et_'],
CreateProfilePlots = True,
HistogramNamesForProfilePlots = ['delta_et_Over_et_VS_et_','delta_et_VS_et_','delta_eta_VS_et_','delta_phi_VS_et_']
)
pfMETClient = pfClient.clone(
FolderNames = ['PFMETValidation/CompWithGenMET'],
HistogramNames = ['delta_et_Over_et_VS_et_'],
CreateProfilePlots = True,
HistogramNamesForProfilePlots = ['delta_et_Over_et_VS_et_','delta_et_VS_et_','delta_eta_VS_et_','delta_phi_VS_et_']
)
pfJetResClient = pfClientJetRes.clone(
FolderNames = ['PFJetResValidation/JetPtRes'],
HistogramNames = ['delta_et_Over_et_VS_et_', 'BRdelta_et_Over_et_VS_et_', 'ERdelta_et_Over_et_VS_et_'],
CreateEfficiencyPlots = False,
HistogramNamesForEfficiencyPlots = ['pt_', 'eta_', 'phi_']
)
pfElectronClient = pfClient.clone(
FolderNames = ['PFElectronValidation/CompWithGenElectron'],
HistogramNames = [''],
CreateEfficiencyPlots = True,
HistogramNamesForEfficiencyPlots = ['pt_', 'eta_', 'phi_'],
HistogramNamesForProjectionPlots = ['delta_et_Over_et_VS_et_','delta_et_VS_et_','delta_eta_VS_et_','delta_phi_VS_et_']
)
| 40.558824
| 122
| 0.771574
|
101a80d28231761022640160a30e1afe18abe3ae
| 2,497
|
py
|
Python
|
src/easyurl/url_db_update.py
|
liu00david/easyurl
|
485bfb7dab9e78ae775b3b2e2a387a70ab1227b0
|
[
"MIT"
] | null | null | null |
src/easyurl/url_db_update.py
|
liu00david/easyurl
|
485bfb7dab9e78ae775b3b2e2a387a70ab1227b0
|
[
"MIT"
] | null | null | null |
src/easyurl/url_db_update.py
|
liu00david/easyurl
|
485bfb7dab9e78ae775b3b2e2a387a70ab1227b0
|
[
"MIT"
] | null | null | null |
import math
import pathlib
import json
import os
def pair(k1, k2):
"""
Cantor pairing function
"""
z = int(0.5 * (k1 + k2) * (k1 + k2 + 1) + k2)
return z
def depair(z):
"""
Inverse of Cantor pairing function
"""
w = math.floor((math.sqrt(8 * z + 1) - 1) / 2)
t = (w**2 + w) / 2
y = int(z - t)
x = int(w - y)
return x, y
def get_wordlist_path(pos):
curdir_path = pathlib.Path(__file__).parent.resolve()
wordlist_path = os.path.join(curdir_path, ("wordlists/" + pos + '.json'))
return wordlist_path
class UrlDatabaseFuncs:
"""
Called when new URL needs a shortname.
"""
def __init__(self, url):
self.url = url
i = 0
while i < 100000:
shortname_tuple = self.get_shortname_tuple(i)
shortname_string = self.get_shortname_string(shortname_tuple)
print(shortname_string)
i += 1
def db_get_index(self):
"""
Gets next avail index in DB
Input: none, output: index
"""
def get_shortname_tuple(self, index):
"""
Given index, find shortname tuple (int,int,int,int)
Input: index, output: tuple
"""
I_pair = depair(index)
A_pair = depair(I_pair[0])
B_pair = depair(I_pair[1])
return (A_pair[0], A_pair[1], B_pair[0], B_pair[1])
def get_shortname_string(self, shortname_tuple):
"""
Given shortname_tuple, give shortname string
adj(shortname_tuple[0])
nouns(shortname_tuple[1])
adj(shortname_tuple[2])
nouns(shortname_tuple[3])
Input: tuple, output: string
"""
nouns_wordlist_path = get_wordlist_path("nouns")
with open(nouns_wordlist_path) as f1:
nouns_data = json.load(f1)
adjectives_wordlist_path = get_wordlist_path("adjectives")
with open(adjectives_wordlist_path) as f2:
adjectives_data = json.load(f2)
adjective1 = adjectives_data["words"][str(shortname_tuple[0])]
noun1 = nouns_data["words"][str(shortname_tuple[1])]
adjective2 = adjectives_data["words"][str(shortname_tuple[2])]
noun2 = nouns_data["words"][str(shortname_tuple[3])]
shortname_string = adjective1 + noun1 + adjective2 + noun2
return shortname_string
def db_add_entry(self, index, url):
"""
Given index, url, entry into DB
Input: self index, output: none
"""
| 26.56383
| 77
| 0.592311
|
aada8ec58b5d2e110fd82a63c6b3c378c9431c12
| 1,085
|
py
|
Python
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/keyword_plan_ad_group_service/transports/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/keyword_plan_ad_group_service/transports/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/keyword_plan_ad_group_service/transports/__init__.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import KeywordPlanAdGroupServiceTransport
from .grpc import KeywordPlanAdGroupServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[KeywordPlanAdGroupServiceTransport]]
_transport_registry['grpc'] = KeywordPlanAdGroupServiceGrpcTransport
__all__ = (
'KeywordPlanAdGroupServiceTransport',
'KeywordPlanAdGroupServiceGrpcTransport',
)
| 33.90625
| 96
| 0.784332
|
13ca41029388e3bbe489c23f93778768cb4ebf03
| 5,641
|
py
|
Python
|
configuration/coding_plans.py
|
AfricasVoices/Test-Pipeline---Daniel
|
285f608fa085b3a1e52da33b27eaab53fa673b0b
|
[
"MIT"
] | null | null | null |
configuration/coding_plans.py
|
AfricasVoices/Test-Pipeline---Daniel
|
285f608fa085b3a1e52da33b27eaab53fa673b0b
|
[
"MIT"
] | 2
|
2021-02-11T09:22:50.000Z
|
2021-02-16T16:55:27.000Z
|
configuration/coding_plans.py
|
AfricasVoices/Test-Pipeline---Daniel
|
285f608fa085b3a1e52da33b27eaab53fa673b0b
|
[
"MIT"
] | 1
|
2021-04-09T06:38:36.000Z
|
2021-04-09T06:38:36.000Z
|
from core_data_modules.cleaners import somali, swahili, Codes
from core_data_modules.cleaners.cleaning_utils import CleaningUtils
from core_data_modules.traced_data import Metadata
from core_data_modules.traced_data.util.fold_traced_data import FoldStrategies
from configuration import code_imputation_functions
from configuration.code_schemes import CodeSchemes
from src.lib.configuration_objects import CodingConfiguration, CodingModes, CodingPlan
def clean_age_with_range_filter(text):
"""
Cleans age from the given `text`, setting to NC if the cleaned age is not in the range 10 <= age < 100.
"""
age = swahili.DemographicCleaner.clean_age(text)
if type(age) == int and 10 <= age < 100:
return str(age)
# TODO: Once the cleaners are updated to not return Codes.NOT_CODED, this should be updated to still return
# NC in the case where age is an int but is out of range
else:
return Codes.NOT_CODED
def get_rqa_coding_plans(pipeline_name):
return [
CodingPlan(raw_field="rqa_s01e01_raw",
time_field="sent_on",
run_id_field="rqa_s01e01_run_id",
coda_filename="TEST_PIPELINE_DANIEL_s01e01.json",
icr_filename="s01e01.csv",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.MULTIPLE,
code_scheme=CodeSchemes.S01E01,
coded_field="rqa_s01e01_coded",
analysis_file_key="rqa_s01e01",
fold_strategy=lambda x, y: FoldStrategies.list_of_labels(CodeSchemes.S01E01, x, y)
)
],
ws_code=CodeSchemes.WS_CORRECT_DATASET.get_code_with_match_value("s01e01"),
raw_field_fold_strategy=FoldStrategies.concatenate),
]
def get_demog_coding_plans(pipeline_name):
return [
CodingPlan(raw_field="gender_raw",
time_field="gender_time",
coda_filename="TEST_PIPELINE_DANIEL_gender.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.GENDER,
cleaner=somali.DemographicCleaner.clean_gender,
coded_field="gender_coded",
analysis_file_key="gender",
fold_strategy=FoldStrategies.assert_label_ids_equal
)
],
ws_code=CodeSchemes.WS_CORRECT_DATASET.get_code_with_match_value("gender"),
raw_field_fold_strategy=FoldStrategies.assert_equal),
CodingPlan(raw_field="age_raw",
time_field="age_time",
coda_filename="TEST_PIPELINE_DANIEL_age.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.AGE,
cleaner=clean_age_with_range_filter,
coded_field="age_coded",
analysis_file_key="age",
include_in_theme_distribution=Codes.FALSE,
fold_strategy=FoldStrategies.assert_label_ids_equal
),
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.AGE_CATEGORY,
coded_field="age_category_coded",
analysis_file_key="age_category",
fold_strategy=FoldStrategies.assert_label_ids_equal
)
],
code_imputation_function=code_imputation_functions.impute_age_category,
ws_code=CodeSchemes.WS_CORRECT_DATASET.get_code_with_match_value("age"),
raw_field_fold_strategy=FoldStrategies.assert_equal),
CodingPlan(raw_field="location_raw",
time_field="location_time",
coda_filename="TEST_PIPELINE_DANIEL_location.json",
coding_configurations=[
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.KENYA_COUNTY,
coded_field="county_coded",
analysis_file_key="county",
fold_strategy=FoldStrategies.assert_label_ids_equal
),
CodingConfiguration(
coding_mode=CodingModes.SINGLE,
code_scheme=CodeSchemes.KENYA_CONSTITUENCY,
coded_field="constituency_coded",
analysis_file_key="constituency",
fold_strategy=FoldStrategies.assert_label_ids_equal
)
],
code_imputation_function=code_imputation_functions.impute_kenya_location_codes,
ws_code=CodeSchemes.WS_CORRECT_DATASET.get_code_with_match_value("location"),
raw_field_fold_strategy=FoldStrategies.assert_equal)
]
def get_follow_up_coding_plans(pipeline_name):
return []
def get_ws_correct_dataset_scheme(pipeline_name):
return CodeSchemes.WS_CORRECT_DATASET
| 47.403361
| 115
| 0.579684
|
1400f957a609ef78e81de88c1e6e6a23712aace4
| 564
|
py
|
Python
|
generators/app/templates/resources/lib/context.py
|
xbmc/generator-kodi
|
b5a70a4969c7eac339008ba7fc02b1cbdff082a4
|
[
"Apache-2.0"
] | 70
|
2017-02-27T19:38:07.000Z
|
2022-02-11T16:30:25.000Z
|
generators/app/templates/resources/lib/context.py
|
xbmc/generator-kodi
|
b5a70a4969c7eac339008ba7fc02b1cbdff082a4
|
[
"Apache-2.0"
] | 41
|
2017-01-16T07:42:15.000Z
|
2020-07-19T09:28:06.000Z
|
generators/app/templates/resources/lib/context.py
|
Razzeee/generator-kodi
|
b5a70a4969c7eac339008ba7fc02b1cbdff082a4
|
[
"Apache-2.0"
] | 30
|
2017-01-21T01:07:12.000Z
|
2022-02-09T09:31:47.000Z
|
# -*- coding: utf-8 -*-
import xbmcaddon
import xbmcgui
def run():
# Implement what your contextmenu aims to do here
# For example you could call executebuiltin to call another addon
# xbmc.executebuiltin("RunScript(script.example,action=show)")
# You might want to check your addon.xml for the visible condition of your contextmenu
# Read more here http://kodi.wiki/view/Context_Item_Add-ons
addon = xbmcaddon.Addon()
addon_name = addon.getAddonInfo('name')
line1 = "Hello World!"
xbmcgui.Dialog().ok(addon_name, line1)
| 29.684211
| 90
| 0.70922
|
27fc0287a1f6f179656962516d0d9b24da8380a1
| 1,685
|
py
|
Python
|
release/scripts/UpdateReleasedCohorts.py
|
HDRUK/PGS_Catalog
|
d59067fc61961770d1e0f8bb6081d10d8bbea3e9
|
[
"Apache-2.0"
] | 5
|
2020-01-29T18:04:08.000Z
|
2022-01-04T18:04:05.000Z
|
release/scripts/UpdateReleasedCohorts.py
|
PGScatalog/PGS_Catalog
|
d59067fc61961770d1e0f8bb6081d10d8bbea3e9
|
[
"Apache-2.0"
] | 37
|
2020-02-25T08:50:04.000Z
|
2022-02-15T10:11:34.000Z
|
release/scripts/UpdateReleasedCohorts.py
|
HDRUK/PGS_Catalog
|
d59067fc61961770d1e0f8bb6081d10d8bbea3e9
|
[
"Apache-2.0"
] | 3
|
2020-01-14T10:19:14.000Z
|
2020-09-08T20:11:34.000Z
|
from catalog.models import Cohort, Score
from pgs_web import constants
class UpdateReleasedCohorts:
cohorts_released = []
cohorts_not_released = []
def __init__(self):
self.cohorts = Cohort.objects.only('id','released').all()
self.score_id_list = Score.objects.values_list('id', flat=True).filter(date_released__isnull=False)
def update_cohorts(self):
count_all = 0
for cohort in self.cohorts:
c_associations = cohort.associated_pgs_ids
associations_list = set( c_associations['development'] + c_associations['evaluation'] )
released = False
if (set(self.score_id_list).intersection(associations_list)):
released = True
if released == False:
self.cohorts_not_released.append(cohort.id)
else:
self.cohorts_released.append(cohort.id)
# Update DB with new values
Cohort.objects.filter(id__in=self.cohorts_released).update(released=True)
Cohort.objects.filter(id__in=self.cohorts_not_released).update(released=False)
db_count_released = Cohort.objects.filter(released=True).count()
db_count_not_released = Cohort.objects.filter(released=False).count()
print(f'> Cohorts - all: {len(self.cohorts)}')
print(f'> Cohorts - released: {len(self.cohorts_released)} | DB: {db_count_released}')
print(f'> Cohorts - not released: {len(self.cohorts_not_released)} | DB: {db_count_not_released}')
def run():
""" Update the Cohort entries, setting the flag 'released'."""
released_cohorts = UpdateReleasedCohorts()
released_cohorts.update_cohorts()
| 40.119048
| 107
| 0.672997
|
7989581b7ce44ff65482bc210f5de0ff8f3a7d8e
| 1,877
|
py
|
Python
|
setup.py
|
AsciiShell/raspberrypi_omxplayer_control
|
ba1ae9c23e3050fada2c475af77adfe71ad7c2b7
|
[
"MIT"
] | null | null | null |
setup.py
|
AsciiShell/raspberrypi_omxplayer_control
|
ba1ae9c23e3050fada2c475af77adfe71ad7c2b7
|
[
"MIT"
] | null | null | null |
setup.py
|
AsciiShell/raspberrypi_omxplayer_control
|
ba1ae9c23e3050fada2c475af77adfe71ad7c2b7
|
[
"MIT"
] | null | null | null |
from pkg_resources import parse_requirements
from setuptools import find_packages, setup
def load_requirements(filename: str) -> list:
requirements = []
with open(filename, 'r') as f:
for requirement in parse_requirements(f.read()):
extras = '[{}]'.format(','.join(requirement.extras)) if requirement.extras else ''
requirements.append(
'{}{}{}'.format(requirement.name, extras, requirement.specifier)
)
return requirements
module_name = 'raspberrypi_omxplayer_control'
with open('README.md', 'rt') as f:
long_description = f.read()
setup(
name=module_name,
version='0.1.18',
description='Web remote control for raspberrypi omxplayer',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/AsciiShell/raspberrypi_omxplayer_control',
author='asciishell (Aleksey Podchezertsev)',
author_email='dev@asciishell.ru',
license='MIT',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
],
keywords=['raspberry-pi', 'omxplayer', 'remote-control', 'python3'],
packages=find_packages(exclude=['tests']),
python_requires='>=3.5',
entry_points={
'console_scripts': [
'{0} = {0}.__main__:main'.format(module_name),
]
},
include_package_data=True,
zip_safe=False,
install_requires=load_requirements('requirements.txt'),
)
| 34.127273
| 94
| 0.641982
|
aaa54687c4820c0e96b0d69911f46f5fab478635
| 37,063
|
py
|
Python
|
policy_sentry/writing/sid_group.py
|
agilgur5/policy_sentry
|
39c9e926488376cee671b2c5b5cbdc15b215b852
|
[
"MIT"
] | null | null | null |
policy_sentry/writing/sid_group.py
|
agilgur5/policy_sentry
|
39c9e926488376cee671b2c5b5cbdc15b215b852
|
[
"MIT"
] | null | null | null |
policy_sentry/writing/sid_group.py
|
agilgur5/policy_sentry
|
39c9e926488376cee671b2c5b5cbdc15b215b852
|
[
"MIT"
] | null | null | null |
"""
sid_group indicates that this is a collection of policy-related data organized by their SIDs
"""
import logging
import re
from policy_sentry.analysis.expand import determine_actions_to_expand
from policy_sentry.querying.all import get_all_actions
from policy_sentry.querying.actions import (
get_action_data,
get_actions_with_arn_type_and_access_level,
get_dependent_actions,
get_actions_that_support_wildcard_arns_only,
get_actions_at_access_level_that_support_wildcard_arns_only,
)
from policy_sentry.querying.arns import get_resource_type_name_with_raw_arn
from policy_sentry.util.arns import does_arn_match, get_service_from_arn, parse_arn
from policy_sentry.util.text import capitalize_first_character, strip_special_characters
from policy_sentry.writing.minimize import minimize_statement_actions
from policy_sentry.writing.validate import check_actions_schema, check_crud_schema
from policy_sentry.shared.constants import POLICY_LANGUAGE_VERSION
from policy_sentry.util.actions import get_lowercase_action_list
logger = logging.getLogger(__name__)
# pylint: disable=too-many-instance-attributes
class SidGroup:
"""
This class is critical to the creation of least privilege policies.
It uses the SIDs as namespaces. The namespaces follow this format:
`{Servicename}{Accesslevel}{Resourcetypename}`
So, a resulting statement's SID might look like 'S3ListBucket'
If a condition key is supplied (like `s3:RequestJob`), the SID string will be significantly longer.
It will resemble this format:
`{Servicename}{Accesslevel}{Resourcetypename}{Conditionkeystring}{Conditiontypestring}{Conditionkeyvalue}`
For example: EC2 write actions on the security-group resource, using the following condition map:
```
"Condition": {
"StringEquals": {"ec2:ResourceTag/Owner": "${aws:username}"}
}
```
The resulting SID would be:
`Ec2WriteSecuritygroupResourcetagownerStringequalsAwsusername`
Or, for actions that support wildcard ARNs only, an example could be:
`Ec2WriteMultResourcetagownerStringequalsAwsusername`
"""
def __init__(self):
# Dict instead of list
# sids instead of ARN
self.sids = {}
self.universal_conditions = {}
self.skip_resource_constraints = []
self.exclude_actions = []
self.wildcard_only_single_actions = []
# When a user requests all wildcard-only actions available under a service at a specific access level
self.wildcard_only_service_read = []
self.wildcard_only_service_write = []
self.wildcard_only_service_list = []
self.wildcard_only_service_tagging = []
self.wildcard_only_service_permissions_management = []
def get_sid_group(self):
"""
Get the whole SID group as JSON
"""
return self.sids
def get_sid(self, sid):
"""Get a single group by the SID identifier"""
if self.sids[sid]:
return self.sids[sid]
else:
raise Exception(f"No SID with the value of {sid}")
def list_sids(self):
"""
Get a list of all of the SIDs by their identifiers
Returns:
List: A list of SIDs in the SID group
"""
return self.sids.keys()
def add_exclude_actions(self, exclude_actions):
"""To exclude actions from the output"""
if exclude_actions:
expanded_actions = determine_actions_to_expand(exclude_actions)
self.exclude_actions = [x.lower() for x in expanded_actions]
else:
self.exclude_actions = []
def add_skip_resource_constraints(self, skip_resource_constraints_actions):
"""
To override resource constraint requirements - i.e., instead of restricting `s3:PutObject` to a path and
allowing `s3:PutObject` to `*` resources, put `s3:GetObject` here.
"""
if isinstance(skip_resource_constraints_actions, list):
self.skip_resource_constraints.extend(skip_resource_constraints_actions)
elif isinstance(skip_resource_constraints_actions, str):
self.skip_resource_constraints.append([skip_resource_constraints_actions])
else:
raise Exception("Please provide 'skip_resource_constraints' as a list of IAM actions.")
def add_sts_actions(self, sts_actions):
"""
To add STS actions to the output from special YAML section
"""
if sts_actions:
# Hard coded for this special case
service_prefix = "sts"
access_level = "Write"
for action, arns in sts_actions.items():
clean_action = action.replace('-','') # Convention to follow adding dashes instead of CamelCase
service_action_data = get_action_data(service_prefix, clean_action)
# Schema validation takes care of this, but just in case. No data returned for the action
if not service_action_data:
raise Exception(f"Could not find service action data for {service_prefix} - {clean_action}")
for row in service_action_data[service_prefix]:
for arn in arns:
if not arn: # skip the - '' situation
continue
if (
does_arn_match(arn, row["resource_arn_format"])
and row["access_level"] == access_level
):
raw_arn_format = row["resource_arn_format"]
# Each action will get its own namespace sts:AssumeRole -> AssumeRole
# -1 index is a neat trick if the colon ever goes away we won't get an index error.
sid_namespace = row["action"].split(':')[-1]
temp_sid_dict = {
"arn": [arn],
"service": service_prefix,
"access_level": access_level,
"arn_format": raw_arn_format,
"actions": [row["action"]],
"conditions": [], # TODO: Add conditions
}
# Using a custom namespace and not gathering actions so no need to find
# dependent actions either, though we could do it here
if sid_namespace in self.sids.keys():
# If the ARN already exists there, skip it.
if arn not in self.sids[sid_namespace]["arn"]:
self.sids[sid_namespace]["arn"].append(arn)
else:
self.sids[sid_namespace] = temp_sid_dict
def add_requested_service_wide(self, service_prefixes, access_level):
"""
When a user requests all wildcard-only actions available under a service at a specific access level
Arguments:
service_prefixes: A list of service prefixes
access_level: The requested access level
"""
if access_level == "Read":
self.wildcard_only_service_read = service_prefixes
elif access_level == "Write":
self.wildcard_only_service_write = service_prefixes
elif access_level == "List":
self.wildcard_only_service_list = service_prefixes
elif access_level == "Tagging":
self.wildcard_only_service_tagging = service_prefixes
elif access_level == "Permissions management":
self.wildcard_only_service_permissions_management = service_prefixes
def process_wildcard_only_actions(self):
"""
After (1) the list of wildcard-only single actions have been added and (2) the list of wildcard-only service-wide actions have been added, process them and store them under the proper SID.
"""
provided_wildcard_actions = (
self.wildcard_only_single_actions
+ get_wildcard_only_actions_matching_services_and_access_level(self.wildcard_only_service_read, "Read")
+ get_wildcard_only_actions_matching_services_and_access_level(self.wildcard_only_service_list, "List")
+ get_wildcard_only_actions_matching_services_and_access_level(self.wildcard_only_service_write, "Write")
+ get_wildcard_only_actions_matching_services_and_access_level(self.wildcard_only_service_tagging, "Tagging")
+ get_wildcard_only_actions_matching_services_and_access_level(self.wildcard_only_service_permissions_management, "Permissions management")
)
self.add_wildcard_only_actions(
provided_wildcard_actions
)
def get_rendered_policy(self, minimize=None):
"""
Get the JSON rendered policy
Arguments:
minimize: Reduce the character count of policies without creating overlap with other action names
Returns:
Dictionary: The IAM Policy JSON
"""
statements = []
# Only set the actions to lowercase if minimize is provided
all_actions = get_all_actions(lowercase=True)
# render the policy
sids_to_be_changed = []
for sid in self.sids:
temp_actions = self.sids[sid]["actions"]
if len(temp_actions) == 0:
logger.debug(f"No actions for sid {sid}")
continue
actions = []
if self.exclude_actions:
for temp_action in temp_actions:
if temp_action.lower() in self.exclude_actions:
logger.debug(f"\tExcluded action: {temp_action}")
else:
if temp_action not in actions:
actions.append(temp_action)
else:
actions = temp_actions
# temp_actions.clear()
# Check if SID is empty of actions. Continue if yes.
if not actions:
continue
match_found = False
if minimize is not None and isinstance(minimize, int):
logger.debug("Minimizing statements...")
actions = minimize_statement_actions(
actions, all_actions, minchars=minimize
)
# searching in the existing statements
# further minimizing the the output
for stmt in statements:
if stmt["Resource"] == self.sids[sid]["arn"]:
stmt["Action"].extend(actions)
match_found = True
sids_to_be_changed.append(stmt["Sid"])
break
logger.debug(f"Adding statement with SID {sid}")
logger.debug(f"{sid} SID has the actions: {actions}")
logger.debug(f"{sid} SID has the resources: {self.sids[sid]['arn']}")
if not match_found:
statements.append(
{
"Sid": sid,
"Effect": "Allow",
"Action": actions,
"Resource": self.sids[sid]["arn"],
}
)
if sids_to_be_changed:
for stmt in statements:
if stmt['Sid'] in sids_to_be_changed:
arn_details = parse_arn(stmt['Resource'][0])
resource_path = arn_details.get("resource_path")
resource_sid_segment = strip_special_characters(
f"{arn_details['resource']}{resource_path}"
)
stmt['Sid'] = create_policy_sid_namespace(arn_details['service'], "Mult", resource_sid_segment)
policy = {"Version": POLICY_LANGUAGE_VERSION, "Statement": statements}
return policy
# pylint: disable=unused-argument
def add_by_arn_and_access_level(
self, arn_list, access_level, conditions_block=None
):
"""
This adds the user-supplied ARN(s), service prefixes, access levels, and condition keys (if applicable) given
by the user. It derives the list of IAM actions based on the user's requested ARNs and access levels.
Arguments:
arn_list: Just a list of resource ARNs.
access_level: "Read", "List", "Tagging", "Write", or "Permissions management"
conditions_block: Optionally, a condition block with one or more conditions
"""
for arn in arn_list:
service_prefix = get_service_from_arn(arn)
service_action_data = get_action_data(service_prefix, "*")
for service_prefix in service_action_data:
for row in service_action_data[service_prefix]:
if (
does_arn_match(arn, row["resource_arn_format"])
and row["access_level"] == access_level
):
raw_arn_format = row["resource_arn_format"]
resource_type_name = get_resource_type_name_with_raw_arn(
raw_arn_format
)
sid_namespace = create_policy_sid_namespace(
service_prefix, access_level, resource_type_name
)
actions = get_actions_with_arn_type_and_access_level(
service_prefix, resource_type_name, access_level
)
# Make supplied actions lowercase
# supplied_actions = [x.lower() for x in actions]
supplied_actions = actions.copy()
dependent_actions = get_dependent_actions(supplied_actions)
# List comprehension to get all dependent actions that are not in the supplied actions.
dependent_actions = [
x for x in dependent_actions if x not in supplied_actions
]
if len(dependent_actions) > 0:
for dep_action in dependent_actions:
self.add_action_without_resource_constraint(dep_action)
# self.add_action_without_resource_constraint(
# str.lower(dep_action)
# )
temp_sid_dict = {
"arn": [arn],
"service": service_prefix,
"access_level": access_level,
"arn_format": raw_arn_format,
"actions": actions,
"conditions": [], # TODO: Add conditions
}
if sid_namespace in self.sids.keys():
# If the ARN already exists there, skip it.
if arn not in self.sids[sid_namespace]["arn"]:
self.sids[sid_namespace]["arn"].append(arn)
# If it did not exist before at all, create it.
else:
self.sids[sid_namespace] = temp_sid_dict
def add_action_without_resource_constraint(
self, action, sid_namespace="MultMultNone"
):
"""
This handles the cases where certain actions do not handle resource constraints - either by AWS, or for
flexibility when adding dependent actions.
Arguments:
action: The single action to add to the SID namespace. For instance, s3:ListAllMyBuckets
sid_namespace: MultMultNone by default. Other valid option is "SkipResourceConstraints"
"""
if sid_namespace == "SkipResourceConstraints":
temp_sid_dict = {
"arn": ["*"],
"service": "Skip",
"access_level": "ResourceConstraints",
"arn_format": "*",
"actions": [action],
}
elif sid_namespace == "MultMultNone":
temp_sid_dict = {
"arn": ["*"],
"service": "Mult",
"access_level": "Mult",
"arn_format": "*",
"actions": [action],
}
else:
raise Exception(
"Please specify the sid_namespace as either 'SkipResourceConstraints' or "
"'MultMultNone'."
)
if isinstance(action, str):
if sid_namespace in self.sids.keys():
if action not in self.sids[sid_namespace]["actions"]:
self.sids[sid_namespace]["actions"].append(action)
else:
self.sids[sid_namespace] = temp_sid_dict
else:
raise Exception("Please provide the action as a string, not a list.")
return self.sids
def add_by_list_of_actions(self, supplied_actions):
"""
Takes a list of actions, queries the database for corresponding arns, adds them to the object.
Arguments:
supplied_actions: A list of supplied actions
"""
# actions_list = get_dependent_actions(supplied_actions)
dependent_actions = get_dependent_actions(supplied_actions)
dependent_actions = [x for x in dependent_actions if x not in supplied_actions]
logger.debug("Adding by list of actions")
logger.debug(f"Supplied actions: {str(supplied_actions)}")
logger.debug(f"Dependent actions: {str(dependent_actions)}")
arns_matching_supplied_actions = []
# arns_matching_supplied_actions is a list of dicts.
# It must do this rather than dictionaries because there will be duplicate
# values by nature of how the entries in the IAM database are structured.
# I'll provide the example values here to improve readability.
for action in supplied_actions:
service_name, action_name = action.split(":")
action_data = get_action_data(service_name, action_name)
for row in action_data[service_name]:
if row["resource_arn_format"] not in arns_matching_supplied_actions:
arns_matching_supplied_actions.append(
{
"resource_arn_format": row["resource_arn_format"],
"access_level": row["access_level"],
"action": row["action"],
}
)
# Identify the actions that do not support resource constraints
# If that's the case, add it to the wildcard namespace. Otherwise, don't add it.
actions_without_resource_constraints = []
for item in arns_matching_supplied_actions:
if item["resource_arn_format"] != "*":
self.add_by_arn_and_access_level(
[item["resource_arn_format"]], item["access_level"]
)
else:
actions_without_resource_constraints.append(item["action"])
# If there are any dependent actions, we need to add them without resource constraints.
# Otherwise, we get into issues where the amount of extra SIDs will balloon.
# Also, the user has no way of knowing what those dependent actions are beforehand.
# TODO: This is, in fact, a great opportunity to introduce conditions. But we aren't there yet.
if len(dependent_actions) > 0:
for dep_action in dependent_actions:
self.add_action_without_resource_constraint(dep_action)
# self.add_action_without_resource_constraint(str.lower(dep_action))
# Now, because add_by_arn_and_access_level() adds all actions under an access level, we have to
# remove all actions that do not match the supplied_actions. This is done in-place.
logger.debug(
"Purging actions that do not match the requested actions and dependent actions"
)
logger.debug(f"Supplied actions: {str(supplied_actions)}")
logger.debug(f"Dependent actions: {str(dependent_actions)}")
self.remove_actions_not_matching_these(supplied_actions + dependent_actions)
for action in actions_without_resource_constraints:
logger.debug(
f"Deliberately adding the action {action} without resource constraints"
)
self.add_action_without_resource_constraint(action)
logger.debug(
"Removing actions that are in the wildcard arn (Resources = '*') as well as other statements that have "
"resource constraints "
)
self.remove_actions_duplicated_in_wildcard_arn()
logger.debug("Getting the rendered policy")
rendered_policy = self.get_rendered_policy()
return rendered_policy
def process_template(self, cfg, minimize=None):
"""
Process the Policy Sentry template as a dict. This auto-detects whether or not the file is in CRUD mode or
Actions mode.
Arguments:
cfg: The loaded YAML as a dict. Must follow Policy Sentry dictated format.
minimize: Minimize the resulting statement with *safe* usage of wildcards to reduce policy length. Set this to the character length you want - for example, 0, or 4. Defaults to none.
Returns:
Dictionary: The rendered IAM JSON Policy
"""
if cfg.get("mode") == "crud":
logger.debug("CRUD mode selected")
check_crud_schema(cfg)
# EXCLUDE ACTIONS
if cfg.get("exclude-actions"):
if cfg.get("exclude-actions")[0] != "":
self.add_exclude_actions(cfg["exclude-actions"])
# WILDCARD ONLY SECTION
if cfg.get("wildcard-only"):
if cfg.get("wildcard-only").get("single-actions"):
if cfg["wildcard-only"]["single-actions"][0] != "":
provided_wildcard_actions = cfg["wildcard-only"]["single-actions"]
logger.debug(f"Requested wildcard-only actions: {str(provided_wildcard_actions)}")
self.wildcard_only_single_actions = provided_wildcard_actions
if cfg.get("wildcard-only").get("service-read"):
if cfg["wildcard-only"]["service-read"][0] != "":
service_read = cfg["wildcard-only"]["service-read"]
logger.debug(f"Requested wildcard-only actions: {str(service_read)}")
self.wildcard_only_service_read = service_read
if cfg.get("wildcard-only").get("service-write"):
if cfg["wildcard-only"]["service-write"][0] != "":
service_write = cfg["wildcard-only"]["service-write"]
logger.debug(f"Requested wildcard-only actions: {str(service_write)}")
self.wildcard_only_service_write = service_write
if cfg.get("wildcard-only").get("service-list"):
if cfg["wildcard-only"]["service-list"][0] != "":
service_list = cfg["wildcard-only"]["service-list"]
logger.debug(f"Requested wildcard-only actions: {str(service_list)}")
self.wildcard_only_service_list = service_list
if cfg.get("wildcard-only").get("service-tagging"):
if cfg["wildcard-only"]["service-tagging"][0] != "":
service_tagging = cfg["wildcard-only"]["service-tagging"]
logger.debug(f"Requested wildcard-only actions: {str(service_tagging)}")
self.wildcard_only_service_tagging = service_tagging
if cfg.get("wildcard-only").get("service-permissions-management"):
if cfg["wildcard-only"]["service-permissions-management"][0] != "":
service_permissions_management = cfg["wildcard-only"]["service-permissions-management"]
logger.debug(f"Requested wildcard-only actions: {str(service_permissions_management)}")
self.wildcard_only_service_permissions_management = service_permissions_management
# Process the wildcard-only section
self.process_wildcard_only_actions()
# Standard access levels
if cfg.get("read"):
if cfg["read"][0] != "":
logger.debug(f"Requested access to arns: {str(cfg['read'])}")
self.add_by_arn_and_access_level(cfg["read"], "Read")
if cfg.get("write"):
if cfg["write"][0] != "":
logger.debug(f"Requested access to arns: {str(cfg['write'])}")
self.add_by_arn_and_access_level(cfg["write"], "Write")
if cfg.get("list"):
if cfg["list"][0] != "":
logger.debug(f"Requested access to arns: {str(cfg['list'])}")
self.add_by_arn_and_access_level(cfg["list"], "List")
if cfg.get("tagging"):
if cfg["tagging"][0] != "":
logger.debug(f"Requested access to arns: {str(cfg['tagging'])}")
self.add_by_arn_and_access_level(cfg["tagging"], "Tagging")
if cfg.get("permissions-management"):
if cfg["permissions-management"][0] != "":
logger.debug(f"Requested access to arns: {str(cfg['permissions-management'])}")
self.add_by_arn_and_access_level(cfg["permissions-management"], "Permissions management")
# SKIP RESOURCE CONSTRAINTS
if cfg.get("skip-resource-constraints"):
if cfg["skip-resource-constraints"][0] != "":
logger.debug(
f"Requested override: the actions {str(cfg['skip-resource-constraints'])} will "
f"skip resource constraints."
)
self.add_skip_resource_constraints(cfg["skip-resource-constraints"])
for skip_resource_constraints_action in self.skip_resource_constraints:
self.add_action_without_resource_constraint(
skip_resource_constraints_action, "SkipResourceConstraints"
)
# STS Section
if cfg.get("sts"):
logger.debug(
f"STS section detected. Building assume role policy statement"
)
self.add_sts_actions(cfg['sts'])
elif cfg.get("mode") == "actions":
check_actions_schema(cfg)
if "actions" in cfg.keys():
if cfg["actions"] is not None and cfg["actions"][0] != "":
self.add_by_list_of_actions(cfg["actions"])
rendered_policy = self.get_rendered_policy(minimize)
return rendered_policy
def add_wildcard_only_actions(self, provided_wildcard_actions):
"""
Given a list of IAM actions, add individual IAM Actions that do not support resource constraints to the MultMultNone SID
Arguments:
provided_wildcard_actions: list actions provided by the user.
"""
if isinstance(provided_wildcard_actions, list):
verified_wildcard_actions = remove_actions_that_are_not_wildcard_arn_only(
provided_wildcard_actions
)
if len(verified_wildcard_actions) > 0:
logger.debug(
"Attempting to add the following actions to the policy: %s",
verified_wildcard_actions,
)
self.add_by_list_of_actions(verified_wildcard_actions)
logger.debug(
"Added the following wildcard-only actions to the policy: %s",
verified_wildcard_actions,
)
def add_wildcard_only_actions_matching_services_and_access_level(
self, services, access_level
):
"""
Arguments:
services: A list of AWS services
access_level: An access level as it is written in the database, such as 'Read', 'Write', 'List', 'Permissions management', or 'Tagging'
"""
wildcard_only_actions_to_add = []
for service in services:
actions = get_actions_at_access_level_that_support_wildcard_arns_only(
service, access_level
)
wildcard_only_actions_to_add.extend(actions)
self.add_wildcard_only_actions(wildcard_only_actions_to_add)
def remove_actions_not_matching_these(self, actions_to_keep):
"""
Arguments:
actions_to_keep: A list of actions to leave in the policy. All actions not in this list are removed.
"""
actions_to_keep = get_lowercase_action_list(actions_to_keep)
actions_deleted = []
for sid in self.sids:
placeholder_actions_list = []
for action in self.sids[sid]["actions"]:
# if the action is not in the list of selected actions, don't copy it to the placeholder list
if action.lower() in actions_to_keep:
placeholder_actions_list.append(action)
elif action.lower() not in actions_to_keep:
logger.debug("%s not found in list of actions to keep: %s", action.lower(), actions_to_keep)
actions_deleted.append(action)
# Clear the list and then extend it to include the updated actions only
self.sids[sid]["actions"].clear()
self.sids[sid]["actions"].extend(placeholder_actions_list.copy())
# Highlight the actions that you remove
logger.debug("Actions deleted: %s", str(actions_deleted))
# Now that we've removed a bunch of actions, if there are SID groups without any actions,
# remove them so we don't get SIDs with empty action lists
self.remove_sids_with_empty_action_lists()
def remove_sids_with_empty_action_lists(self):
"""
Now that we've removed a bunch of actions, if there are SID groups without any actions, remove them so we don't get SIDs with empty action lists
"""
sid_namespaces_to_delete = []
for sid in self.sids:
if len(self.sids[sid]["actions"]) > 0:
pass
# If the size is zero, add it to the indexes_to_delete list.
else:
sid_namespaces_to_delete.append(sid)
# Loop through sid_namespaces_to_delete in reverse order (so we delete index
# 10 before index 8, for example)
if len(sid_namespaces_to_delete) > 0:
for i in reversed(range(len(sid_namespaces_to_delete))):
del self.sids[sid_namespaces_to_delete[i]]
def remove_actions_duplicated_in_wildcard_arn(self):
"""
Removes actions from the object that are in a resource-specific ARN, as well as the `*` resource.
For example, if `ssm:GetParameter` is restricted to a specific parameter path, as well as `*`, then we want to
remove the `*` option to force least privilege.
"""
actions_under_wildcard_resources = []
actions_under_wildcard_resources_to_nuke = []
# Build a temporary list. Contains actions in MultMultNone SID (where resources = "*")
for sid in self.sids:
if self.sids[sid]["arn_format"] == "*":
actions_under_wildcard_resources.extend(self.sids[sid]["actions"])
# If the actions under the MultMultNone SID exist under other SIDs
if len(actions_under_wildcard_resources) > 0:
for sid in self.sids:
if "*" not in self.sids[sid]["arn_format"]:
for action in actions_under_wildcard_resources:
if action in self.sids[sid]["actions"]:
if action not in self.skip_resource_constraints:
# add it to a list of actions to nuke when they are under other SIDs
actions_under_wildcard_resources_to_nuke.append(action)
# If there are actions that we need to remove from SIDs outside of MultMultNone SID
if len(actions_under_wildcard_resources_to_nuke) > 0:
for sid in self.sids:
if "*" in self.sids[sid]["arn_format"]:
for action in actions_under_wildcard_resources_to_nuke:
try:
self.sids[sid]["actions"].remove(str(action))
except BaseException: # pylint: disable=broad-except
logger.debug("Removal not successful")
def remove_actions_that_are_not_wildcard_arn_only(actions_list):
"""
Given a list of actions, remove the ones that CAN be restricted to ARNs, leaving only the ones that cannot.
Arguments:
actions_list: A list of actions
Returns:
List: An updated list of actions
"""
# remove duplicates, if there are any
actions_list = list(dict.fromkeys(actions_list))
actions_list_placeholder = []
for action in actions_list:
try:
service_name, action_name = action.split(":")
except ValueError as v_e:
# We will skip the action because this likely means that the wildcard action provided is not valid.
logger.debug(v_e)
logger.debug(
"The value provided in wildcard-only section is not formatted properly."
)
continue
rows = get_actions_that_support_wildcard_arns_only(service_name)
for row in rows:
if row.lower() == action.lower():
actions_list_placeholder.append(f"{service_name}:{action_name}")
return actions_list_placeholder
def get_wildcard_only_actions_matching_services_and_access_level(services, access_level):
"""
Get a list of wildcard-only actions matching the services and access level
Arguments:
services: A list of AWS services
access_level: An access level as it is written in the database, such as 'Read', 'Write', 'List', 'Permissions management', or 'Tagging'
Returns:
List: A list of wildcard-only actions matching the services and access level
"""
wildcard_only_actions_to_add = []
for service in services:
actions = get_actions_at_access_level_that_support_wildcard_arns_only(
service, access_level
)
wildcard_only_actions_to_add.extend(actions)
return wildcard_only_actions_to_add
def create_policy_sid_namespace(
service, access_level, resource_type_name, condition_block=None
):
"""
Simply generates the SID name. The SID groups ARN types that share an access level.
For example, S3 objects vs. SSM Parameter have different ARN types - as do S3 objects vs S3 buckets. That's how we
choose to group them.
Arguments:
service: `ssm`
access_level: `Read`
resource_type_name: `parameter`
condition_block: `{"condition_key_string": "ec2:ResourceTag/purpose", "condition_type_string": "StringEquals", "condition_value": "test"}`
Returns:
String: A string like `SsmReadParameter`
"""
# Sanitize the resource_type_name; otherwise we hit some list conversion
# errors
resource_type_name = re.sub("[^A-Za-z0-9]+", "", resource_type_name)
# Also remove the space from the Access level, if applicable. This only
# applies for "Permissions management"
access_level = re.sub("[^A-Za-z0-9]+", "", access_level)
sid_namespace_prefix = (
capitalize_first_character(strip_special_characters(service))
+ capitalize_first_character(access_level)
+ capitalize_first_character(resource_type_name)
)
if condition_block:
condition_key_namespace = re.sub(
"[^A-Za-z0-9]+", "", condition_block["condition_key_string"]
)
condition_type_namespace = condition_block["condition_type_string"]
condition_value_namespace = re.sub(
"[^A-Za-z0-9]+", "", condition_block["condition_value"]
)
sid_namespace_condition_suffix = (
f"{capitalize_first_character(condition_key_namespace)}"
f"{capitalize_first_character(condition_type_namespace)}"
f"{capitalize_first_character(condition_value_namespace)}"
)
sid_namespace = sid_namespace_prefix + sid_namespace_condition_suffix
else:
sid_namespace = sid_namespace_prefix
return sid_namespace
| 48.322034
| 196
| 0.601381
|
daa9d7e135497cb5eef2726c6bab6863c6fa5433
| 2,475
|
py
|
Python
|
ml/rl/models/convolutional_network.py
|
joshrose/Horizon
|
a2eb407b31a16560ae78aa6751eb83672a122a7e
|
[
"BSD-3-Clause"
] | 2
|
2021-01-11T18:16:32.000Z
|
2021-11-30T09:34:58.000Z
|
ml/rl/models/convolutional_network.py
|
joshrose/Horizon
|
a2eb407b31a16560ae78aa6751eb83672a122a7e
|
[
"BSD-3-Clause"
] | null | null | null |
ml/rl/models/convolutional_network.py
|
joshrose/Horizon
|
a2eb407b31a16560ae78aa6751eb83672a122a7e
|
[
"BSD-3-Clause"
] | 2
|
2021-01-06T01:06:50.000Z
|
2021-06-24T01:12:52.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from ml.rl.models.fully_connected_network import FullyConnectedNetwork
logger = logging.getLogger(__name__)
class ConvolutionalNetwork(nn.Module):
def __init__(self, cnn_parameters, layers, activations) -> None:
super().__init__()
self.conv_dims = cnn_parameters.conv_dims
self.conv_height_kernels = cnn_parameters.conv_height_kernels
self.conv_width_kernels = cnn_parameters.conv_width_kernels
self.conv_layers: nn.ModuleList = nn.ModuleList()
self.pool_layers: nn.ModuleList = nn.ModuleList()
for i, _ in enumerate(self.conv_dims[1:]):
self.conv_layers.append(
nn.Conv2d(
self.conv_dims[i],
self.conv_dims[i + 1],
kernel_size=(
self.conv_height_kernels[i],
self.conv_width_kernels[i],
),
)
)
nn.init.kaiming_normal_(self.conv_layers[i].weight)
if cnn_parameters.pool_types[i] == "max":
self.pool_layers.append(
nn.MaxPool2d(kernel_size=cnn_parameters.pool_kernels_strides[i])
)
else:
assert False, "Unknown pooling type".format(layers)
input_size = (
cnn_parameters.num_input_channels,
cnn_parameters.input_height,
cnn_parameters.input_width,
)
conv_out = self.conv_forward(torch.ones(1, *input_size))
self.fc_input_dim = int(np.prod(conv_out.size()[1:]))
layers[0] = self.fc_input_dim
self.feed_forward = FullyConnectedNetwork(layers, activations)
def conv_forward(self, input):
x = input
for i, _ in enumerate(self.conv_layers):
x = F.relu(self.conv_layers[i](x))
x = self.pool_layers[i](x)
return x
def forward(self, input) -> torch.FloatTensor:
""" Forward pass for generic convnet DNNs. Assumes activation names
are valid pytorch activation names.
:param input image tensor
"""
x = self.conv_forward(input)
x = x.view(-1, self.fc_input_dim)
return self.feed_forward.forward(x)
| 34.859155
| 84
| 0.612929
|
0542edd1da53c81abaa7cd7ba80bc26660163f3b
| 536
|
py
|
Python
|
benchmarks/ERAN-MNIST/properties/pyt/property_64.py
|
dlshriver/dnnv-benchmarks
|
84b5bf1e046226d269da1cdbd7a7690fd90d024b
|
[
"MIT"
] | 1
|
2022-03-01T08:59:32.000Z
|
2022-03-01T08:59:32.000Z
|
benchmarks/ERAN-MNIST/properties/pyt/property_64.py
|
dlshriver/dnnv-benchmarks
|
84b5bf1e046226d269da1cdbd7a7690fd90d024b
|
[
"MIT"
] | null | null | null |
benchmarks/ERAN-MNIST/properties/pyt/property_64.py
|
dlshriver/dnnv-benchmarks
|
84b5bf1e046226d269da1cdbd7a7690fd90d024b
|
[
"MIT"
] | null | null | null |
from dnnv.properties import *
import numpy as np
N = Network("N")
mean = 0.1307
stddev = 0.3081
denormalize = lambda x: x * stddev + mean
# x is not normalized
x = Image(__path__.parent.parent / "inputs/input64.npy")
epsilon = Parameter("epsilon", type=float, default=(2.0 / 255))
true_class = 7
Forall(
x_, # x_ is assumed to be normalized, so denormalize before comparing to x
Implies(
((x - epsilon) < denormalize(x_) < (x + epsilon)) & (0 < denormalize(x_) < 1),
np.argmax(N(x_)) == true_class,
),
)
| 25.52381
| 86
| 0.645522
|
00e46a13639454fcad8adb135ab8534dce9feab7
| 4,003
|
py
|
Python
|
src/oci/waf/models/collaborative_capability_weight.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/waf/models/collaborative_capability_weight.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/waf/models/collaborative_capability_weight.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CollaborativeCapabilityWeight(object):
"""
Defines how much a contributing capability contributes towards the action threshold of a collaborative protection capability.
"""
def __init__(self, **kwargs):
"""
Initializes a new CollaborativeCapabilityWeight object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param key:
The value to assign to the key property of this CollaborativeCapabilityWeight.
:type key: str
:param display_name:
The value to assign to the display_name property of this CollaborativeCapabilityWeight.
:type display_name: str
:param weight:
The value to assign to the weight property of this CollaborativeCapabilityWeight.
:type weight: int
"""
self.swagger_types = {
'key': 'str',
'display_name': 'str',
'weight': 'int'
}
self.attribute_map = {
'key': 'key',
'display_name': 'displayName',
'weight': 'weight'
}
self._key = None
self._display_name = None
self._weight = None
@property
def key(self):
"""
**[Required]** Gets the key of this CollaborativeCapabilityWeight.
Unique key of contributing protection capability.
:return: The key of this CollaborativeCapabilityWeight.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this CollaborativeCapabilityWeight.
Unique key of contributing protection capability.
:param key: The key of this CollaborativeCapabilityWeight.
:type: str
"""
self._key = key
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this CollaborativeCapabilityWeight.
The display name of contributing protection capability.
:return: The display_name of this CollaborativeCapabilityWeight.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this CollaborativeCapabilityWeight.
The display name of contributing protection capability.
:param display_name: The display_name of this CollaborativeCapabilityWeight.
:type: str
"""
self._display_name = display_name
@property
def weight(self):
"""
**[Required]** Gets the weight of this CollaborativeCapabilityWeight.
The weight of contributing protection capability.
:return: The weight of this CollaborativeCapabilityWeight.
:rtype: int
"""
return self._weight
@weight.setter
def weight(self, weight):
"""
Sets the weight of this CollaborativeCapabilityWeight.
The weight of contributing protection capability.
:param weight: The weight of this CollaborativeCapabilityWeight.
:type: int
"""
self._weight = weight
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 30.097744
| 245
| 0.648264
|
c42f65e1d28b9438e4f21340df8fc2d44e0e5316
| 591
|
py
|
Python
|
FeatureServer/Exceptions/LayerNotFoundException.py
|
AstunTechnology/featureserver
|
0697730de12b7bc4c8d90bab829d95a865253e77
|
[
"BSD-3-Clause-Open-MPI",
"MIT"
] | 55
|
2015-01-20T14:29:59.000Z
|
2020-12-13T12:54:28.000Z
|
FeatureServer/Exceptions/LayerNotFoundException.py
|
makinacorpus/featureserver
|
379c1a7f51e75517ae7237751e1908f45c0c4d9a
|
[
"BSD-3-Clause-Open-MPI",
"MIT"
] | 3
|
2015-06-24T23:34:03.000Z
|
2017-02-05T02:16:19.000Z
|
FeatureServer/Exceptions/LayerNotFoundException.py
|
makinacorpus/featureserver
|
379c1a7f51e75517ae7237751e1908f45c0c4d9a
|
[
"BSD-3-Clause-Open-MPI",
"MIT"
] | 19
|
2015-02-08T12:32:25.000Z
|
2021-12-01T08:14:32.000Z
|
'''
Created on October 15, 2012
@author: michel
'''
from FeatureServer.Exceptions.BaseException import BaseException
class LayerNotFoundException(BaseException):
message="Could not find the layer '%s': Check your config file for the missing layer. (Available layers are: %s)."
def __init__(self, locator, layer, layers, code="", message="", dump = ""):
self.message = self.message % (layer, ", ".join(layers))
if len(message) > 0:
self.message = message
BaseException.__init__(self, self.message, self.code, locator, layer, dump)
| 32.833333
| 118
| 0.664975
|
967eaa6933d0aca4bb70141be3fcdfda8c8c1b88
| 14,558
|
py
|
Python
|
functions/source/GreengrassLambda/urllib3/util/ssl_.py
|
jieatelement/quickstart-aws-industrial-machine-connectivity
|
ca6af4dcbf795ce4a91adcbec4b206147ab26bfa
|
[
"Apache-2.0"
] | 40
|
2020-07-11T10:07:51.000Z
|
2021-12-11T17:09:20.000Z
|
functions/source/GreengrassLambda/urllib3/util/ssl_.py
|
jieatelement/quickstart-aws-industrial-machine-connectivity
|
ca6af4dcbf795ce4a91adcbec4b206147ab26bfa
|
[
"Apache-2.0"
] | 18
|
2020-07-20T18:54:31.000Z
|
2021-11-04T13:14:28.000Z
|
functions/source/GreengrassLambda/urllib3/util/ssl_.py
|
jieatelement/quickstart-aws-industrial-machine-connectivity
|
ca6af4dcbf795ce4a91adcbec4b206147ab26bfa
|
[
"Apache-2.0"
] | 37
|
2020-07-09T23:12:30.000Z
|
2022-03-16T11:15:58.000Z
|
from __future__ import absolute_import
import errno
import warnings
import hmac
import sys
from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from .url import IPV4_RE, BRACELESS_IPV6_ADDRZ_RE
from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
from ..packages import six
SSLContext = None
HAS_SNI = False
IS_PYOPENSSL = False
IS_SECURETRANSPORT = False
# Maps the length of a digest to a possible hash function producing this digest
HASHFUNC_MAP = {32: md5, 40: sha1, 64: sha256}
def _const_compare_digest_backport(a, b):
"""
Compare two digests of equal length in constant time.
The digests must be of type str/bytes.
Returns True if the digests match, and False otherwise.
"""
result = abs(len(a) - len(b))
for l, r in zip(bytearray(a), bytearray(b)):
result |= l ^ r
return result == 0
_const_compare_digest = getattr(hmac, "compare_digest", _const_compare_digest_backport)
try: # Test for SSL features
import ssl
from ssl import wrap_socket, CERT_REQUIRED
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try: # Platform-specific: Python 3.6
from ssl import PROTOCOL_TLS
PROTOCOL_SSLv23 = PROTOCOL_TLS
except ImportError:
try:
from ssl import PROTOCOL_SSLv23 as PROTOCOL_TLS
PROTOCOL_SSLv23 = PROTOCOL_TLS
except ImportError:
PROTOCOL_SSLv23 = PROTOCOL_TLS = 2
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
# security,
# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
# - disable NULL authentication, MD5 MACs, DSS, and other
# insecure ciphers for security reasons.
# - NOTE: TLS 1.3 cipher suites are managed through a different interface
# not exposed by CPython (yet!) and are enabled by default if they're available.
DEFAULT_CIPHERS = ":".join(
[
"ECDHE+AESGCM",
"ECDHE+CHACHA20",
"DHE+AESGCM",
"DHE+CHACHA20",
"ECDH+AESGCM",
"DH+AESGCM",
"ECDH+AES",
"DH+AES",
"RSA+AESGCM",
"RSA+AES",
"!aNULL",
"!eNULL",
"!MD5",
"!DSS",
]
)
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
class SSLContext(object): # Platform-specific: Python 2
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, cafile=None, capath=None):
self.ca_certs = cafile
if capath is not None:
raise SSLError("CA directories not supported in older Pythons")
def set_ciphers(self, cipher_suite):
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None, server_side=False):
warnings.warn(
"A true SSLContext object is not available. This prevents "
"urllib3 from configuring SSL appropriately and may cause "
"certain SSL connections to fail. You can upgrade to a newer "
"version of Python to solve this. For more information, see "
"https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
"#ssl-warnings",
InsecurePlatformWarning,
)
kwargs = {
"keyfile": self.keyfile,
"certfile": self.certfile,
"ca_certs": self.ca_certs,
"cert_reqs": self.verify_mode,
"ssl_version": self.protocol,
"server_side": server_side,
}
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
fingerprint = fingerprint.replace(":", "").lower()
digest_length = len(fingerprint)
hashfunc = HASHFUNC_MAP.get(digest_length)
if not hashfunc:
raise SSLError("Fingerprint of invalid length: {0}".format(fingerprint))
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
cert_digest = hashfunc(cert).digest()
if not _const_compare_digest(cert_digest, fingerprint_bytes):
raise SSLError(
'Fingerprints did not match. Expected "{0}", got "{1}".'.format(
fingerprint, hexlify(cert_digest)
)
)
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbreviation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_REQUIRED
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, "CERT_" + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_TLS
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, "PROTOCOL_" + candidate)
return res
return candidate
def create_urllib3_context(
ssl_version=None, cert_reqs=None, options=None, ciphers=None
):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or PROTOCOL_TLS)
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
# Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is
# necessary for conditional client cert authentication with TLS 1.3.
# The attribute is None for OpenSSL <= 1.1.0 or does not exist in older
# versions of Python. We only enable on Python 3.7.4+ or if certificate
# verification is enabled to work around Python issue #37428
# See: https://bugs.python.org/issue37428
if (cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4)) and getattr(
context, "post_handshake_auth", None
) is not None:
context.post_handshake_auth = True
context.verify_mode = cert_reqs
if (
getattr(context, "check_hostname", None) is not None
): # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
def ssl_wrap_socket(
sock,
keyfile=None,
certfile=None,
cert_reqs=None,
ca_certs=None,
server_hostname=None,
ssl_version=None,
ciphers=None,
ssl_context=None,
ca_cert_dir=None,
key_password=None,
):
"""
All arguments except for server_hostname, ssl_context, and ca_cert_dir have
the same meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support.
:param ca_cert_dir:
A directory containing CA certificates in multiple separate files, as
supported by OpenSSL's -CApath flag or the capath argument to
SSLContext.load_verify_locations().
:param key_password:
Optional password if the keyfile is encrypted.
"""
context = ssl_context
if context is None:
# Note: This branch of code and all the variables in it are no longer
# used by urllib3 itself. We should consider deprecating and removing
# this code.
context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers)
if ca_certs or ca_cert_dir:
try:
context.load_verify_locations(ca_certs, ca_cert_dir)
except IOError as e: # Platform-specific: Python 2.7
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
elif ssl_context is None and hasattr(context, "load_default_certs"):
# try to load OS default certs; works well on Windows (require Python3.4+)
context.load_default_certs()
# Attempt to detect if we get the goofy behavior of the
# keyfile being encrypted and OpenSSL asking for the
# passphrase via the terminal and instead error out.
if keyfile and key_password is None and _is_key_file_encrypted(keyfile):
raise SSLError("Client private key is encrypted, password is required")
if certfile:
if key_password is None:
context.load_cert_chain(certfile, keyfile)
else:
context.load_cert_chain(certfile, keyfile, key_password)
# If we detect server_hostname is an IP address then the SNI
# extension should not be used according to RFC3546 Section 3.1
# We shouldn't warn the user if SNI isn't available but we would
# not be using SNI anyways due to IP address for server_hostname.
if (
server_hostname is not None and not is_ipaddress(server_hostname)
) or IS_SECURETRANSPORT:
if HAS_SNI and server_hostname is not None:
return context.wrap_socket(sock, server_hostname=server_hostname)
warnings.warn(
"An HTTPS request has been made, but the SNI (Server Name "
"Indication) extension to TLS is not available on this platform. "
"This may cause the server to present an incorrect TLS "
"certificate, which can cause validation failures. You can upgrade to "
"a newer version of Python to solve this. For more information, see "
"https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
"#ssl-warnings",
SNIMissingWarning,
)
return context.wrap_socket(sock)
def is_ipaddress(hostname):
"""Detects whether the hostname given is an IPv4 or IPv6 address.
Also detects IPv6 addresses with Zone IDs.
:param str hostname: Hostname to examine.
:return: True if the hostname is an IP address, False otherwise.
"""
if not six.PY2 and isinstance(hostname, bytes):
# IDN A-label bytes are ASCII compatible.
hostname = hostname.decode("ascii")
return bool(IPV4_RE.match(hostname) or BRACELESS_IPV6_ADDRZ_RE.match(hostname))
def _is_key_file_encrypted(key_file):
"""Detects if a key file is encrypted or not."""
with open(key_file, "r") as f:
for line in f:
# Look for Proc-Type: 4,ENCRYPTED
if "ENCRYPTED" in line:
return True
return False
| 35.681373
| 88
| 0.64796
|
ab1c8e3e5169741ae0a9d89e32f5c76ab56adffd
| 6,793
|
py
|
Python
|
scripts/exp.py
|
tomhoper/scibert
|
3cc65f433808f7879c973dc4fc41bd25e465dc15
|
[
"Apache-2.0"
] | 1,143
|
2019-03-27T01:49:11.000Z
|
2022-03-24T10:43:47.000Z
|
scripts/exp.py
|
tomhoper/scibert
|
3cc65f433808f7879c973dc4fc41bd25e465dc15
|
[
"Apache-2.0"
] | 91
|
2019-03-27T17:20:27.000Z
|
2022-03-29T09:29:58.000Z
|
scripts/exp.py
|
tomhoper/scibert
|
3cc65f433808f7879c973dc4fc41bd25e465dc15
|
[
"Apache-2.0"
] | 206
|
2019-03-28T02:22:30.000Z
|
2022-03-30T07:07:05.000Z
|
"""
Alternative to `exp.sh` script because it doesn't seem to run properly on UNIX machines
"""
import subprocess
import plac
import sys
import subprocess
@plac.annotations(
desc=("description", "positional", None, str),
not_dry_run=("actually run it ?", "flag", "not_dry_run", bool),
)
def main(desc: str, not_dry_run: bool = False):
dataset_sizes = {
'NCBI-disease': 5424,
'bc5cdr': 4942,
'JNLPBA': 18607,
'sciie': 2211,
'chemprot': 4169,
'citation_intent': 1688,
'mag': 84000,
'rct-20k': 180040,
'sciie-relation-extraction': 3219,
'sci-cite': 7320,
'ebmnlp': 38124,
'genia': 14326,
}
for dataset in [
# 'NCBI-disease',
# 'bc5cdr',
# 'JNLPBA',
# 'sciie',
# 'chemprot',
'citation_intent',
# 'mag',
# #'rct-20k',
# 'sciie-relation-extraction',
# 'sci-cite',
# 'ebmnlp',
# 'genia',
]:
for seed in [
15370,
15570,
15680,
15780,
15210,
16210,
16310,
16410,
18210,
18310,
# 18410,
# 18510,
# 18610
]:
pytorch_seed = seed // 10
numpy_seed = pytorch_seed // 10
for model in [
# 'bertbase_basevocab_uncased',
# 'bertbase_basevocab_cased',
# 'biobert_pmc_basevocab_cased',
# 'biobert_pubmed_pmc_basevocab_cased',
# 'biobert_pubmed_basevocab_cased',
'scibert_basevocab_uncased',
# 'scibert_basevocab_cased',
'scibert_scivocab_uncased',
# 'scibert_scivocab_cased',
]:
for with_finetuning in [
'_finetune',
# ''
]:
for grad_accum_batch_size in [
32
]:
for num_epochs in [
# 75 # no-finetuning
2,
3,
4,
5
]:
for learning_rate in [
# 0.001, # no-finetuning
5e-6,
1e-5,
2e-5,
5e-5
]:
if dataset in ['NCBI-disease', 'bc5cdr', 'JNLPBA', 'sciie']:
task = 'ner'
elif dataset in ['chemprot', 'citation_intent', 'mag', 'rct-20k', 'sciie-relation-extraction', 'sci-cite']:
task = 'text_classification'
elif dataset in ['ebmnlp']:
task = 'pico'
elif dataset in ['genia']:
task = 'parsing'
else:
assert False
dataset_size = dataset_sizes[dataset]
# determine casing from model name
if 'uncased' in model:
is_lowercase = 'true'
else:
is_lowercase = 'false'
# config file
config_file = f'allennlp_config/{task}{with_finetuning}.json'
# bert files
bert_vocab = f'/scibert/{model}/vocab.txt'
bert_weights = f'/scibert/{model}/weights.tar.gz'
# data files
train_path = f'data/{task}/{dataset}/train.txt'
dev_path = f'data/{task}/{dataset}/dev.txt'
test_path = f'data/{task}/{dataset}/test.txt'
cmd = ' '.join(['python', 'scripts/run_with_beaker.py',
f'{config_file}',
'--source ds_7jfhvq3h6sad:/scibert/',
'--include-package scibert',
f'--desc {desc}',
'--env CUDA_DEVICE=0',
f'--env DATASET_SIZE={dataset_size}',
f'--env BERT_VOCAB={bert_vocab}',
f'--env BERT_WEIGHTS={bert_weights}',
f'--env TRAIN_PATH={train_path}',
f'--env DEV_PATH={dev_path}',
f'--env TEST_PATH={test_path}',
f'--env IS_LOWERCASE={is_lowercase}',
f'--env SEED={seed}',
f'--env PYTORCH_SEED={pytorch_seed}',
f'--env NUMPY_SEED={numpy_seed}',
f'--env GRAD_ACCUM_BATCH_SIZE={grad_accum_batch_size}',
f'--env NUM_EPOCHS={num_epochs}',
f'--env LEARNING_RATE={learning_rate}'
])
print('\n')
print(cmd)
if not_dry_run:
completed = subprocess.run(cmd, shell=True)
print(f'returncode: {completed.returncode}')
plac.call(main, sys.argv[1:])
| 42.192547
| 139
| 0.319299
|
acedf682a38ba23fa0984b31b4a2e45edef39364
| 445
|
py
|
Python
|
stubs/micropython-esp32-1_14/ure.py
|
Josverl/micropython-stubs
|
3c32403ba2b57375f311ac0d023cd529340efe62
|
[
"MIT"
] | 38
|
2020-10-18T21:59:44.000Z
|
2022-03-17T03:03:28.000Z
|
all-stubs/micropython-esp32-1_14/ure.py
|
ks-tec/Hydroponic
|
d9347f82698841d85c0a45908e8671b36c50ffce
|
[
"MIT"
] | 176
|
2020-10-18T14:31:03.000Z
|
2022-03-30T23:22:39.000Z
|
all-stubs/micropython-esp32-1_14/ure.py
|
ks-tec/Hydroponic
|
d9347f82698841d85c0a45908e8671b36c50ffce
|
[
"MIT"
] | 6
|
2020-12-28T21:11:12.000Z
|
2022-02-06T04:07:50.000Z
|
"""
Module: 'ure' on micropython-esp32-1.14
"""
# MCU: {'ver': '1.14', 'port': 'esp32', 'arch': 'xtensawin', 'sysname': 'esp32', 'release': '1.14.0', 'name': 'micropython', 'mpy': 10757, 'version': '1.14.0', 'machine': 'ESP32 module (spiram) with ESP32', 'build': '', 'nodename': 'esp32', 'platform': 'esp32', 'family': 'micropython'}
# Stubber: 1.3.9
def compile():
pass
def match():
pass
def search():
pass
def sub():
pass
| 24.722222
| 286
| 0.579775
|
617c6868ce5790cd142ab736e8041cf5a7600c06
| 19,687
|
py
|
Python
|
python/pyserial-3.0/serial/serialutil.py
|
gotnone/hwa
|
4648cf6072a06552d22cbf6498b35f3e24ce38d5
|
[
"BSD-3-Clause"
] | 25
|
2015-08-05T12:36:24.000Z
|
2021-03-26T01:51:58.000Z
|
python/pyserial-3.0/serial/serialutil.py
|
gotnone/hwa
|
4648cf6072a06552d22cbf6498b35f3e24ce38d5
|
[
"BSD-3-Clause"
] | 3
|
2021-06-08T21:06:32.000Z
|
2022-01-13T02:22:38.000Z
|
python/pyserial-3.0/serial/serialutil.py
|
gotnone/hwa
|
4648cf6072a06552d22cbf6498b35f3e24ce38d5
|
[
"BSD-3-Clause"
] | 4
|
2016-09-18T08:58:35.000Z
|
2020-07-16T11:43:29.000Z
|
#! python
#
# Base class and support functions used by various backends.
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2001-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import io
import time
# ``memoryview`` was introduced in Python 2.7 and ``bytes(some_memoryview)``
# isn't returning the contents (very unfortunate). Therefore we need special
# cases and test for it. Ensure that there is a ``memoryview`` object for older
# Python versions. This is easier than making every test dependent on its
# existence.
try:
memoryview
except (NameError, AttributeError):
# implementation does not matter as we do not realy use it.
# it just must not inherit from something else we might care for.
class memoryview(object):
pass
try:
unicode
except (NameError, AttributeError):
unicode = str # for Python 3
# "for byte in data" fails for python3 as it returns ints instead of bytes
def iterbytes(b):
"""Iterate over bytes, returning bytes instead of ints (python3)"""
if isinstance(b, memoryview):
b = b.tobytes()
x = 0
while True:
a = b[x:x + 1]
x += 1
if a:
yield a
else:
break
# all Python versions prior 3.x convert ``str([17])`` to '[17]' instead of '\x11'
# so a simple ``bytes(sequence)`` doesn't work for all versions
def to_bytes(seq):
"""convert a sequence to a bytes type"""
if isinstance(seq, bytes):
return seq
elif isinstance(seq, bytearray):
return bytes(seq)
elif isinstance(seq, memoryview):
return seq.tobytes()
elif isinstance(seq, unicode):
raise TypeError('unicode strings are not supported, please encode to bytes: %r' % (seq,))
else:
b = bytearray()
for item in seq:
# this one handles int and bytes in Python 2.7
# add conversion in case of Python 3.x
if isinstance(item, bytes):
item = ord(item)
b.append(item)
return bytes(b)
# create control bytes
XON = to_bytes([17])
XOFF = to_bytes([19])
CR = to_bytes([13])
LF = to_bytes([10])
PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE = 'N', 'E', 'O', 'M', 'S'
STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO = (1, 1.5, 2)
FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS = (5, 6, 7, 8)
PARITY_NAMES = {
PARITY_NONE: 'None',
PARITY_EVEN: 'Even',
PARITY_ODD: 'Odd',
PARITY_MARK: 'Mark',
PARITY_SPACE: 'Space',
}
class SerialException(IOError):
"""Base class for serial port related exceptions."""
class SerialTimeoutException(SerialException):
"""Write timeouts give an exception"""
writeTimeoutError = SerialTimeoutException('Write timeout')
portNotOpenError = SerialException('Attempting to use a port that is not open')
class SerialBase(io.RawIOBase):
"""\
Serial port base class. Provides __init__ function and properties to
get/set port settings.
"""
# default values, may be overridden in subclasses that do not support all values
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200, 230400, 460800, 500000,
576000, 921600, 1000000, 1152000, 1500000, 2000000, 2500000,
3000000, 3500000, 4000000)
BYTESIZES = (FIVEBITS, SIXBITS, SEVENBITS, EIGHTBITS)
PARITIES = (PARITY_NONE, PARITY_EVEN, PARITY_ODD, PARITY_MARK, PARITY_SPACE)
STOPBITS = (STOPBITS_ONE, STOPBITS_ONE_POINT_FIVE, STOPBITS_TWO)
def __init__(self,
port=None, # number of device, numbering starts at
# zero. if everything fails, the user
# can specify a device string, note
# that this isn't portable anymore
# port will be opened if one is specified
baudrate=9600, # baud rate
bytesize=EIGHTBITS, # number of data bits
parity=PARITY_NONE, # enable parity checking
stopbits=STOPBITS_ONE, # number of stop bits
timeout=None, # set a timeout value, None to wait forever
xonxoff=False, # enable software flow control
rtscts=False, # enable RTS/CTS flow control
write_timeout=None, # set a timeout for writes
dsrdtr=False, # None: use rtscts setting, dsrdtr override if True or False
inter_byte_timeout=None # Inter-character timeout, None to disable
):
"""\
Initialize comm port object. If a port is given, then the port will be
opened immediately. Otherwise a Serial port object in closed state
is returned.
"""
self.is_open = False
self._port = None # correct value is assigned below through properties
self._baudrate = None # correct value is assigned below through properties
self._bytesize = None # correct value is assigned below through properties
self._parity = None # correct value is assigned below through properties
self._stopbits = None # correct value is assigned below through properties
self._timeout = None # correct value is assigned below through properties
self._write_timeout = None # correct value is assigned below through properties
self._xonxoff = None # correct value is assigned below through properties
self._rtscts = None # correct value is assigned below through properties
self._dsrdtr = None # correct value is assigned below through properties
self._inter_byte_timeout = None # correct value is assigned below through properties
self._rs485_mode = None # disabled by default
self._rts_state = True
self._dtr_state = True
self._break_state = False
# assign values using get/set methods using the properties feature
self.port = port
self.baudrate = baudrate
self.bytesize = bytesize
self.parity = parity
self.stopbits = stopbits
self.timeout = timeout
self.write_timeout = write_timeout
self.xonxoff = xonxoff
self.rtscts = rtscts
self.dsrdtr = dsrdtr
self.inter_byte_timeout = inter_byte_timeout
if port is not None:
self.open()
# - - - - - - - - - - - - - - - - - - - - - - - -
# to be implemented by subclasses:
# def open(self):
# def close(self):
# - - - - - - - - - - - - - - - - - - - - - - - -
@property
def port(self):
"""\
Get the current port setting. The value that was passed on init or using
setPort() is passed back. See also the attribute portstr which contains
the name of the port as a string.
"""
return self._port
@port.setter
def port(self, port):
"""\
Change the port. The attribute portstr is set to a string that
contains the name of the port.
"""
was_open = self.is_open
if was_open:
self.close()
self.portstr = port
self._port = port
self.name = self.portstr
if was_open:
self.open()
@property
def baudrate(self):
"""Get the current baud rate setting."""
return self._baudrate
@baudrate.setter
def baudrate(self, baudrate):
"""\
Change baud rate. It raises a ValueError if the port is open and the
baud rate is not possible. If the port is closed, then the value is
accepted and the exception is raised when the port is opened.
"""
try:
b = int(baudrate)
except TypeError:
raise ValueError("Not a valid baudrate: %r" % (baudrate,))
else:
if b <= 0:
raise ValueError("Not a valid baudrate: %r" % (baudrate,))
self._baudrate = b
if self.is_open:
self._reconfigure_port()
@property
def bytesize(self):
"""Get the current byte size setting."""
return self._bytesize
@bytesize.setter
def bytesize(self, bytesize):
"""Change byte size."""
if bytesize not in self.BYTESIZES:
raise ValueError("Not a valid byte size: %r" % (bytesize,))
self._bytesize = bytesize
if self.is_open:
self._reconfigure_port()
@property
def parity(self):
"""Get the current parity setting."""
return self._parity
@parity.setter
def parity(self, parity):
"""Change parity setting."""
if parity not in self.PARITIES:
raise ValueError("Not a valid parity: %r" % (parity,))
self._parity = parity
if self.is_open:
self._reconfigure_port()
@property
def stopbits(self):
"""Get the current stop bits setting."""
return self._stopbits
@stopbits.setter
def stopbits(self, stopbits):
"""Change stop bits size."""
if stopbits not in self.STOPBITS:
raise ValueError("Not a valid stop bit size: %r" % (stopbits,))
self._stopbits = stopbits
if self.is_open:
self._reconfigure_port()
@property
def timeout(self):
"""Get the current timeout setting."""
return self._timeout
@timeout.setter
def timeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
try:
timeout + 1 # test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % (timeout,))
if timeout < 0:
raise ValueError("Not a valid timeout: %r" % (timeout,))
self._timeout = timeout
if self.is_open:
self._reconfigure_port()
@property
def write_timeout(self):
"""Get the current timeout setting."""
return self._write_timeout
@write_timeout.setter
def write_timeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
if timeout < 0:
raise ValueError("Not a valid timeout: %r" % (timeout,))
try:
timeout + 1 # test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % timeout)
self._write_timeout = timeout
if self.is_open:
self._reconfigure_port()
@property
def inter_byte_timeout(self):
"""Get the current inter-character timeout setting."""
return self._inter_byte_timeout
@inter_byte_timeout.setter
def inter_byte_timeout(self, ic_timeout):
"""Change inter-byte timeout setting."""
if ic_timeout is not None:
if ic_timeout < 0:
raise ValueError("Not a valid timeout: %r" % ic_timeout)
try:
ic_timeout + 1 # test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: %r" % ic_timeout)
self._inter_byte_timeout = ic_timeout
if self.is_open:
self._reconfigure_port()
@property
def xonxoff(self):
"""Get the current XON/XOFF setting."""
return self._xonxoff
@xonxoff.setter
def xonxoff(self, xonxoff):
"""Change XON/XOFF setting."""
self._xonxoff = xonxoff
if self.is_open:
self._reconfigure_port()
@property
def rtscts(self):
"""Get the current RTS/CTS flow control setting."""
return self._rtscts
@rtscts.setter
def rtscts(self, rtscts):
"""Change RTS/CTS flow control setting."""
self._rtscts = rtscts
if self.is_open:
self._reconfigure_port()
@property
def dsrdtr(self):
"""Get the current DSR/DTR flow control setting."""
return self._dsrdtr
@dsrdtr.setter
def dsrdtr(self, dsrdtr=None):
"""Change DsrDtr flow control setting."""
if dsrdtr is None:
# if not set, keep backwards compatibility and follow rtscts setting
self._dsrdtr = self._rtscts
else:
# if defined independently, follow its value
self._dsrdtr = dsrdtr
if self.is_open:
self._reconfigure_port()
@property
def rts(self):
return self._rts_state
@rts.setter
def rts(self, value):
self._rts_state = value
if self.is_open:
self._update_rts_state()
@property
def dtr(self):
return self._dtr_state
@dtr.setter
def dtr(self, value):
self._dtr_state = value
if self.is_open:
self._update_dtr_state()
@property
def break_condition(self):
return self._break_state
@break_condition.setter
def break_condition(self, value):
self._break_state = value
if self.is_open:
self._update_break_state()
# - - - - - - - - - - - - - - - - - - - - - - - -
# functions useful for RS-485 adapters
@property
def rs485_mode(self):
"""\
Enable RS485 mode and apply new settings, set to None to disable.
See serial.rs485.RS485Settings for more info about the value.
"""
return self._rs485_mode
@rs485_mode.setter
def rs485_mode(self, rs485_settings):
self._rs485_mode = rs485_settings
if self.is_open:
self._reconfigure_port()
# - - - - - - - - - - - - - - - - - - - - - - - -
_SAVED_SETTINGS = ('baudrate', 'bytesize', 'parity', 'stopbits', 'xonxoff',
'dsrdtr', 'rtscts', 'timeout', 'write_timeout',
'inter_byte_timeout')
def get_settings(self):
"""\
Get current port settings as a dictionary. For use with
apply_settings().
"""
return dict([(key, getattr(self, '_' + key)) for key in self._SAVED_SETTINGS])
def apply_settings(self, d):
"""\
Apply stored settings from a dictionary returned from
get_settings(). It's allowed to delete keys from the dictionary. These
values will simply left unchanged.
"""
for key in self._SAVED_SETTINGS:
if key in d and d[key] != getattr(self, '_' + key): # check against internal "_" value
setattr(self, key, d[key]) # set non "_" value to use properties write function
# - - - - - - - - - - - - - - - - - - - - - - - -
def __repr__(self):
"""String representation of the current port settings and its state."""
return "%s<id=0x%x, open=%s>(port=%r, baudrate=%r, bytesize=%r, parity=%r, stopbits=%r, timeout=%r, xonxoff=%r, rtscts=%r, dsrdtr=%r)" % (
self.__class__.__name__,
id(self),
self.is_open,
self.portstr,
self.baudrate,
self.bytesize,
self.parity,
self.stopbits,
self.timeout,
self.xonxoff,
self.rtscts,
self.dsrdtr,
)
# - - - - - - - - - - - - - - - - - - - - - - - -
# compatibility with io library
def readable(self):
return True
def writable(self):
return True
def seekable(self):
return False
def readinto(self, b):
data = self.read(len(b))
n = len(data)
try:
b[:n] = data
except TypeError as err:
import array
if not isinstance(b, array.array):
raise err
b[:n] = array.array('b', data)
return n
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
# - - - - - - - - - - - - - - - - - - - - - - - -
def send_break(self, duration=0.25):
"""\
Send break condition. Timed, returns to idle state after given
duration.
"""
if not self.is_open:
raise portNotOpenError
self.break_condition = True
time.sleep(duration)
self.break_condition = False
# - - - - - - - - - - - - - - - - - - - - - - - -
# backwards compatibility / deprecated functions
def flushInput(self):
self.reset_input_buffer()
def flushOutput(self):
self.reset_output_buffer()
def inWaiting(self):
return self.in_waiting
def sendBreak(self, duration=0.25):
self.send_break(duration)
def setRTS(self, value=1):
self.rts = value
def setDTR(self, value=1):
self.dtr = value
def getCTS(self):
return self.cts
def getDSR(self):
return self.dsr
def getRI(self):
return self.ri
def getCD(self):
return self.cd
@property
def writeTimeout(self):
return self.write_timeout
@writeTimeout.setter
def writeTimeout(self, timeout):
self.write_timeout = timeout
@property
def interCharTimeout(self):
return self.inter_byte_timeout
@interCharTimeout.setter
def interCharTimeout(self, interCharTimeout):
self.inter_byte_timeout = interCharTimeout
def getSettingsDict(self):
return self.get_settings()
def applySettingsDict(self, d):
self.apply_settings(d)
def isOpen(self):
return self.is_open
# - - - - - - - - - - - - - - - - - - - - - - - -
# additional functionality
def read_all(self):
"""\
Read all bytes currently available in the buffer of the OS.
"""
return self.read(self.in_waiting)
def read_until(self, terminator=LF, size=None):
"""\
Read until a termination sequence is found ('\n' by default), the size
is exceeded or until timeout occurs.
"""
lenterm = len(terminator)
line = bytearray()
while True:
c = self.read(1)
if c:
line += c
if line[-lenterm:] == terminator:
break
if size is not None and len(line) >= size:
break
else:
break
return bytes(line)
def iread_until(self, *args, **kwargs):
"""\
Read lines, implemented as generator. It will raise StopIteration on
timeout (empty read).
"""
while True:
line = self.read_until(*args, **kwargs)
if not line:
break
yield line
# - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
import sys
s = SerialBase()
sys.stdout.write('port name: %s\n' % s.name)
sys.stdout.write('baud rates: %s\n' % s.BAUDRATES)
sys.stdout.write('byte sizes: %s\n' % s.BYTESIZES)
sys.stdout.write('parities: %s\n' % s.PARITIES)
sys.stdout.write('stop bits: %s\n' % s.STOPBITS)
sys.stdout.write('%s\n' % s)
| 31.398724
| 146
| 0.563773
|
8f8bd0f955838bb7e1873f5fed5daf807a2f8f70
| 2,543
|
py
|
Python
|
python/lib/python2.7/site-packages/python_stdnum-1.6-py2.7.egg/stdnum/pl/nip.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | 1
|
2019-12-19T01:53:13.000Z
|
2019-12-19T01:53:13.000Z
|
python/lib/python2.7/site-packages/python_stdnum-1.6-py2.7.egg/stdnum/pl/nip.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
python/lib/python2.7/site-packages/python_stdnum-1.6-py2.7.egg/stdnum/pl/nip.py
|
gtfarng/Odoo_migrade
|
9cc28fae4c379e407645248a29d22139925eafe7
|
[
"Apache-2.0"
] | null | null | null |
# nip.py - functions for handling Polish VAT numbers
#
# Copyright (C) 2012-2015 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""NIP (Numer Identyfikacji Podatkowej, Polish VAT number).
The NIP (Numer Identyfikacji Podatkowej) number consists of 10 digit with
a straightforward weighted checksum.
>>> validate('PL 8567346215')
'8567346215'
>>> validate('PL 8567346216') # invalid check digits
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('PL 8567346215')
'856-734-62-15'
"""
from stdnum.exceptions import *
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
number = clean(number, ' -').upper().strip()
if number.startswith('PL'):
number = number[2:]
return number
def checksum(number):
"""Calculate the checksum."""
weights = (6, 5, 7, 2, 3, 4, 5, 6, 7, -1)
return sum(w * int(n) for w, n in zip(weights, number)) % 11
def validate(number):
"""Checks to see if the number provided is a valid VAT number. This
checks the length, formatting and check digit."""
number = compact(number)
if not number.isdigit():
raise InvalidFormat()
if len(number) != 10:
raise InvalidLength()
if checksum(number) != 0:
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid VAT number. This
checks the length, formatting and check digit."""
try:
return bool(validate(number))
except ValidationError:
return False
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return '-'.join((number[0:3], number[3:6], number[6:8], number[8:]))
| 31.7875
| 73
| 0.698781
|
a0436e0ce116e40953d444c851b22df53267b5f6
| 210
|
py
|
Python
|
simple_machine_learning_models/models/random_forest/__init__.py
|
lnblanke/OCR
|
b235faa85fedd9f764f71ea592e8693a2a7ac42a
|
[
"MIT"
] | null | null | null |
simple_machine_learning_models/models/random_forest/__init__.py
|
lnblanke/OCR
|
b235faa85fedd9f764f71ea592e8693a2a7ac42a
|
[
"MIT"
] | null | null | null |
simple_machine_learning_models/models/random_forest/__init__.py
|
lnblanke/OCR
|
b235faa85fedd9f764f71ea592e8693a2a7ac42a
|
[
"MIT"
] | null | null | null |
# @Time: 9/23/2021
# @Author: lnblanke
# @Email: fjh314.84@gmail.com
# @File: __init__.py.py
from .RandomForestClassifier import RandomForestClassifier
from .RandomForestRegressor import RandomForestRegressor
| 26.25
| 58
| 0.8
|
e9353e547b36992b00c9465a6e587f0baf0b01cf
| 10,042
|
py
|
Python
|
rasotools/plot/_helpers.py
|
MBlaschek/rasotools
|
a8b954518a1e39b554f850aac0f5bd8fa1f23dc6
|
[
"MIT"
] | 1
|
2019-10-06T22:26:43.000Z
|
2019-10-06T22:26:43.000Z
|
rasotools/plot/_helpers.py
|
MBlaschek/rasotools
|
a8b954518a1e39b554f850aac0f5bd8fa1f23dc6
|
[
"MIT"
] | null | null | null |
rasotools/plot/_helpers.py
|
MBlaschek/rasotools
|
a8b954518a1e39b554f850aac0f5bd8fa1f23dc6
|
[
"MIT"
] | 1
|
2020-04-19T13:47:52.000Z
|
2020-04-19T13:47:52.000Z
|
# -*- coding: utf-8 -*-
def plot_levels(imin, m=None, p=0, n=13):
"""plotlevels with logspace
"""
import numpy as np
if not isinstance(imin, (int, float)):
raise ValueError("Require a int or float")
if m is not None and not isinstance(m, (int, float)):
raise ValueError("Require a int or float")
if np.log(np.abs(imin)) < 2 and p == 0:
p += 1
# Centered around 0
if m is None:
values = (-1 * np.round(imin * np.logspace(0, 2, n/2) / 100., p)).tolist() + [0] + np.round(
imin * np.logspace(0, 2, n/2) / 100., p).tolist()
else:
if imin > m:
tmp = imin
imin = m
m = tmp
if imin == 0:
# only Positive
values = np.round(m * np.logspace(0, 2, n) / 100., p).tolist()
elif imin < 0 and m < 0:
# only negative
values = np.round(imin * np.logspace(0, 2, n) / 100., p).tolist()
else:
# positve and negative
values = np.round(imin * np.logspace(0, 2, n/2) / 100., p).tolist() + np.round(
m * np.logspace(0, 2, n/2) / 100., p).tolist()
return np.unique(np.sort(np.asarray(values))).tolist()
def plot_arange(imin, m=None, p=0, n=7):
import numpy as np
if not isinstance(imin, (int, float)):
raise ValueError("Require a int or float")
if m is not None and not isinstance(m, (int, float)):
raise ValueError("Require a int or float")
if np.log(np.abs(imin)) < 2 and p == 0:
p += 1
if m is None:
values = np.linspace(-1 * imin, imin, n)
else:
values = np.linspace(imin, m, n)
values = np.round(values, p)
return np.unique(np.sort(np.asarray(values))).tolist()
def get_info(x):
return x.attrs.get('standard_name', x.name if x.name is not None else 'var') + ' [' + x.attrs.get('units', '1') + ']'
def set_labels(known, **kwargs):
for ikey, ival in kwargs.items():
known.update({ikey: known.get(ikey, ival)})
def line(dates, values, title='', ylabel='', xlabel='', xerr=None, yerr=None, filled=False, minmax=False, ax=None, **kwargs):
"""
Args:
dates (ndarray): datetime
values (ndarray): values
title (str): title
ylabel (str): y-axis label
xlabel (str): x-axis label
xerr (ndarray): x error
yerr (ndarray): y error
filled (bool): fill between error lines
ax (axes): matplotlib axis
**kwargs: optional keyword arguments for plotting
Returns:
axes : matplotlib axis
"""
import matplotlib.pyplot as plt
if ax is None:
f, ax = plt.subplots(figsize=kwargs.get('figsize', None)) # 1D SNHT PLOT
if xerr is None and yerr is None:
ax.plot(dates, values,
ls=kwargs.get('ls', '-'),
lw=kwargs.get('lw', 1),
label=kwargs.get('label', None),
marker=kwargs.get('marker', None),
alpha=kwargs.get('alpha', 1),
color=kwargs.get('color', None),
zorder=kwargs.get('zorder', 1)) # Line Plot
elif filled:
ax.plot(dates, values,
ls=kwargs.get('ls', '-'),
lw=kwargs.get('lw', 1),
label=kwargs.get('label', None),
marker=kwargs.get('marker', None),
# alpha=kwargs.get('alpha', 1),
color=kwargs.get('color', None)) # Line Plot
low, high = lowhigh(dates, values, xerr=xerr, yerr=yerr, minmax=minmax)
if xerr is None:
ax.fill_between(dates, low, high,
alpha=kwargs.get('alpha', 0.5),
color=ax.get_lines()[-1].get_color(),
hatch=kwargs.get('hatch', None),
zorder=-1)
else:
ax.fill_betweenx(values, low, high,
alpha=kwargs.get('alpha', 0.5),
color=ax.get_lines()[-1].get_color(),
hatch=kwargs.get('hatch', None),
zorder=-1)
else:
ax.errorbar(dates, values, xerr=xerr, yerr=yerr,
ls=kwargs.get('ls', '-'),
lw=kwargs.get('lw', 1),
label=kwargs.get('label', None),
marker=kwargs.get('marker', None),
alpha=kwargs.get('alpha', 1),
color=kwargs.get('color', None),
zorder=kwargs.get('zorder', 1)) # Line Plot
ax.grid('gray', ls='--')
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
return ax
def lowhigh(dates, values, xerr=None, yerr=None, minmax=False):
import numpy as np
if xerr is None:
if hasattr(yerr, '__iter__') and len(np.shape(yerr)) == 2:
if minmax:
low = yerr[0]
high = yerr[1]
else:
low = values - yerr[0]
high = values + yerr[1]
else:
low = values - yerr
high = values + yerr
else:
if hasattr(xerr, '__iter__') and len(np.shape(xerr)) == 2:
if minmax:
low = xerr[0]
high = xerr[1]
else:
low = dates - xerr[0]
high = dates + xerr[1]
else:
low = dates - xerr
high = dates + xerr
return low, high
def contour(ax, dates, plevs, test, logy=False, colorlevels=None, yticklabels=None, legend=True,
title='', xlabel='', ylabel='', clabel='', **kwargs):
import numpy as np
import matplotlib.pyplot as plt
if ax is None:
f, ax = plt.subplots(figsize=kwargs.get('figsize', None)) # 1D SNHT PLOT
if kwargs.get('use_pcolormesh', False):
from matplotlib.colors import BoundaryNorm
cmap = plt.get_cmap(kwargs.pop('cmap', 'RdYlBu_r'))
norm = BoundaryNorm(colorlevels, ncolors=cmap.N, clip=True)
cs = ax.pcolormesh(dates, plevs, test.T, cmap=cmap, norm=kwargs.pop('norm', norm),
vmin=kwargs.pop('vmin', None),
vmax=kwargs.pop('vmax', None))
else:
cs = ax.contourf(dates, plevs, test.T, levels=colorlevels,
cmap=kwargs.pop('cmap', 'RdYlBu_r'),
extend=kwargs.get('extend', 'neither'),
vmin=kwargs.pop('vmin', None),
vmax=kwargs.pop('vmax', None),
norm=kwargs.pop('norm', None)
) # hatches=kwargs.pop('hatches', [])
if logy:
ax.set_yscale('log')
# xlim auto range
tmp = np.isfinite(test).sum(-1)
tmp = np.where(tmp > 0)[0]
ax.set_xlim(dates[np.min(tmp)], dates[np.max(tmp)])
ax.set_yticks(plevs)
if yticklabels is not None:
yticklabels = np.asarray(yticklabels) # can not calc on list
ax.set_yticks(yticklabels)
ax.set_yticklabels(np.int_(yticklabels / kwargs.get('levfactor', 100)))
else:
ax.set_yticks(plevs[::2])
ax.set_yticklabels(np.int_(plevs[::2] / kwargs.get('levfactor', 100)))
ax.set_title(title)
ax.set_ylabel(ylabel)
ax.set_xlabel(xlabel)
if np.diff(ax.get_ylim()) > 0:
ax.invert_yaxis()
ax.grid('gray', ls='--')
if legend:
cbar = plt.colorbar(cs, ax=ax, orientation=kwargs.get('orientation', "vertical"),
fraction=kwargs.get('fraction', 0.01),
aspect=kwargs.get('aspect', 50),
extend=kwargs.get('extend', 'neither'),
shrink=kwargs.get('shrink', 0.8),
ticks=kwargs.get('legend_ticks', None))
if kwargs.get('legend_ticklabels', None) is not None:
cbar.set_ticklabels(kwargs.get('legend_ticklabels', None))
cbar.set_label(clabel)
else:
return ax, cs
return ax
def cost(lon, lat, values):
""" Estimate Cost between Points
Parameters
----------
lon array/list Longitudes
lat array/list Latitudes
values array/list Values
Returns
-------
float Cost
"""
import numpy as np
n = lon.shape[0]
cost = np.zeros((n))
for i in range(n):
# Distance of all points * difference of values
#
cost[i] = np.nansum((distance(lon[i], lat[i], lat, lon) * (values[i] - values)) ** 2)
return cost # np.nansum(cost)/np.sum(np.isfinite(values))
def distance(ilon, ilat, lats, lons):
""" Calculate Distance between one point and others
Parameters
----------
ilon
ilat
lats
lons
Returns
-------
array Distances
"""
import numpy as np
ix = np.cos(ilat * np.pi / 180.) * np.cos(ilon * np.pi / 180.)
iy = np.cos(ilat * np.pi / 180.) * np.sin(ilon * np.pi / 180.)
iz = np.sin(ilat * np.pi / 180.)
x = np.cos(lats * np.pi / 180.) * np.cos(lons * np.pi / 180.)
y = np.cos(lats * np.pi / 180.) * np.sin(lons * np.pi / 180.)
z = np.sin(lats * np.pi / 180.)
dists = ix * x + iy * y + iz * z
return np.arccos(dists * 0.999999)
def stats(data, dim='time'):
from ..met.time import statistics
med = data.median().values
std = data.std().values
rmse = statistics(data, f='rmse', dim=dim).median().values
return "R:{:.2f} M:{:.2f} S:{:.2f}".format(rmse, med, std)
def discrete_colormap(values, cmap='jet'):
import matplotlib as mpl
import matplotlib.pyplot as plt
cmap = plt.get_cmap(cmap) # define the colormap
# extract all colors from the .jet map
cmaplist = [cmap(i) for i in range(cmap.N)]
# create the new map
cmap = mpl.colors.LinearSegmentedColormap.from_list(
'Custom cmap', cmaplist, cmap.N)
# define the bins and normalize
norm = mpl.colors.BoundaryNorm(values, cmap.N)
return cmap, norm
| 33.251656
| 125
| 0.526787
|
0cb133b5feca695976170cc9568c4433744d3395
| 1,098
|
py
|
Python
|
Processor/Components/memory_hierarchy/cache/cache_set.py
|
Atelier-Developers/atelier-artemisia-processor
|
39e44c4bfccd27ef39c2f5e56c0c5202f86f7f74
|
[
"MIT"
] | 3
|
2020-08-01T15:26:38.000Z
|
2020-08-30T16:37:24.000Z
|
Processor/Components/memory_hierarchy/cache/cache_set.py
|
Atelier-Developers/atelier-artemisia-processor
|
39e44c4bfccd27ef39c2f5e56c0c5202f86f7f74
|
[
"MIT"
] | null | null | null |
Processor/Components/memory_hierarchy/cache/cache_set.py
|
Atelier-Developers/atelier-artemisia-processor
|
39e44c4bfccd27ef39c2f5e56c0c5202f86f7f74
|
[
"MIT"
] | null | null | null |
from Components.memory_hierarchy.cache.cache_block import CacheBlock
class CacheSet:
def __init__(self, clock, inputs, name="Cache_Set"):
self.clock = clock # Maybe two clocks?
self.inputs = inputs
self.name = name
self.output = None
self.blocks = None
self.build()
def build(self):
self.blocks = [CacheBlock(self.clock[i], self.inputs) for i in range(2)]
self.output = self.blocks
def logic(self, depend=None):
if depend is None:
depend = []
if self in depend:
return self.output
depend.append(self)
for block in self.output:
block.logic(depend)
return self.output
def set_clock(self, clocks):
self.clock = clocks
for i, block in enumerate(self.blocks):
block.set_clock(self.clock[i])
def set_input(self, inputs):
self.inputs = inputs
def get_output(self):
return [block.get_output() for block in self.output]
def __repr__(self):
return f"{self.name} : {self.output}"
| 27.45
| 80
| 0.599271
|
3c731b6e01b7e24a63377ab05632b3b58371a047
| 7,977
|
py
|
Python
|
web/dynitag/dynitag/admin/views.py
|
f0k/dynitag
|
2dcbe838e32f9fe59b31252ff9b3f7537e32bd47
|
[
"MIT"
] | null | null | null |
web/dynitag/dynitag/admin/views.py
|
f0k/dynitag
|
2dcbe838e32f9fe59b31252ff9b3f7537e32bd47
|
[
"MIT"
] | null | null | null |
web/dynitag/dynitag/admin/views.py
|
f0k/dynitag
|
2dcbe838e32f9fe59b31252ff9b3f7537e32bd47
|
[
"MIT"
] | null | null | null |
import os
import uuid
import re
from flask import redirect, request, jsonify, flash, url_for, Markup, Response
from wtforms import Form
from wtforms.validators import ValidationError
import flask_admin
from sqlalchemy import exc, and_
from flask_admin.contrib.sqla import ModelView
from flask_login import login_required, current_user
from werkzeug.utils import secure_filename
from werkzeug.datastructures import FileStorage
from dynitag import app, db, login_manager
from dynitag.admin.forms import GetAnnotationsForm
from dynitag.models import Project, Audio, AnnotationTag, TagType, FeedbackType, VisualizationType, User, Annotation
from dynitag.admin.fields import CustomAdminConverter
class MyHomeView(flask_admin.AdminIndexView):
@flask_admin.expose('/')
def index(self):
getannotations_form = GetAnnotationsForm()
return self.render('admin/index.html',
getannotations_form=getannotations_form)
def is_accessible(self):
return current_user.is_authenticated and current_user.role=="admin"
def inaccessible_callback(self, name, **kwargs):
flash('Only admins can access Admin.', 'error')
# redirect to login page if user doesn't have access
return redirect(url_for(login_manager.login_view, next=request.url))
class AdminModelView(ModelView):
model_form_converter = CustomAdminConverter
column_labels = dict(name='Name', visualizationtype='Visualization Type',
feedbacktype='Feedback Type', allowRegions='Allow Regions',
n_annotations_per_file='Number of annotations per file',
audios_filename='Audio list filename',
annotations_filename='Annotations filename')
def is_accessible(self):
return current_user.is_authenticated and current_user.role=="admin"
def inaccessible_callback(self, name, **kwargs):
# redirect to login page if user doesn't have access
return redirect(url_for(login_manager.login_view, next=request.url))
def random_name(obj, file_data):
return str(uuid.uuid4()) + ".csv"
def validate_annotationtags(form, field):
data = field.data
if data and isinstance(data, FileStorage):
is_empty = True
for line in data:
is_empty = False
if not re.match(r'^[a-zA-Z0-9_\s\?]+,[a-zA-Z0-9_\s]+$', line.decode().strip()):
raise ValidationError('Wrong annotation file format.')
data.stream.seek(0) # needed to parse it later
if is_empty:
raise ValidationError('Annotation file is empty.')
def validate_audios(form, field):
data = field.data
if data and isinstance(data, FileStorage):
is_empty = True
for line in data:
is_empty = False
if not re.match(r'.+(wav|mp3)$', line.decode().strip()):
raise ValidationError('Wrong audio list file format.')
data.stream.seek(0) # needed to parse it later
if is_empty:
raise ValidationError('Audio list file is empty.')
class ProjectAdminView(AdminModelView):
form_excluded_columns = ['feedbacktype', 'audios', 'annotations']
def scaffold_form(self):
form_class = super(ProjectAdminView, self).scaffold_form()
form_class.audios_filename = flask_admin.form.FileUploadField(
base_path=os.path.join(app.config['UPLOAD_DIR'], 'audios'),
allow_overwrite=False,
namegen=random_name,
validators=[validate_audios])
form_class.annotations_filename = flask_admin.form.FileUploadField(
base_path=os.path.join(app.config['UPLOAD_DIR'], 'annotations'),
allow_overwrite=False,
namegen=random_name,
validators=[validate_annotationtags])
return form_class
def on_model_change(self, form, project, is_created):
audios_file = form.audios_filename.data
annotations_file = form.annotations_filename.data
if audios_file and isinstance(audios_file, FileStorage):
if not is_created:
# delete audio-project relationships
project.audios = []
self.session.add(project)
audios_file.stream.seek(0) # I need it here. Why ?
for line in audios_file:
rel_path = line.decode().strip()
audio = Audio.query.filter(Audio.rel_path==rel_path).first()
if not audio:
audio = Audio()
audio.rel_path = line.decode().strip()
project.audios.append(audio)
audios_file.stream.seek(0) # needed to save it all
if not is_created:
# delete orphan audios
self.session.query(Audio).\
filter(~Audio.projects.any()).\
delete(synchronize_session=False)
if annotations_file and isinstance(annotations_file, FileStorage):
if not is_created:
# delete annotationtag-project relationships
project.annotationtags = []
self.session.add(project)
annotations_file.stream.seek(0) # I need it here. Why ?
for line in annotations_file:
tagtype_name, anntag_name = line.decode().split(',')
tagtype = TagType.query.filter(TagType.name==tagtype_name).first()
if not tagtype:
tagtype = TagType()
tagtype.name = tagtype_name
self.session.add(tagtype)
self.session.commit()
anntag = AnnotationTag.query.filter(AnnotationTag.name==anntag_name).filter(AnnotationTag.tagtype_id==tagtype.id).first()
if not anntag:
anntag = AnnotationTag()
anntag.name = anntag_name
anntag.tagtype_id = tagtype.id
project.annotationtags.append(anntag)
annotations_file.stream.seek(0) # needed to save it all
if not is_created:
# delete orphan annotation tags
self.session.query(AnnotationTag).\
filter(~AnnotationTag.projects.any()).\
delete(synchronize_session=False)
self.session.commit()
@app.route('/get_annotations', methods=['GET'])
@login_required
def get_annotations():
form = GetAnnotationsForm(request.args)
project_id = form.project.data
project = Project.query.get(project_id)
audios = []
for audio in project.audios:
annotations = []
for ann in Annotation.query.filter(and_(Annotation.project_id==project_id, Annotation.audio_id==audio.id)):
annotations.append({
"id": ann.id,
"annotationtag_id": ann.annotationtag_id,
"user_id": ann.user_id
})
if project.allowRegions:
annotations[-1]["start_time"] = ann.start_time
annotations[-1]["end_time"] = ann.end_time
audios.append({
"id": audio.id,
"rel_path": audio.rel_path,
"annotations": annotations
})
users = User.query.join(Annotation).filter(Annotation.project_id==project_id).all()
annotationtags = AnnotationTag.query.filter(AnnotationTag.projects.any(Project.id==project_id)).all()
data = {
"users": [{"id": u.id, "name": u.username, "email": u.email} for u in users],
"annotation_tags": [{"id": a.id, "name": a.name, "tagtype_name": a.tagtype.name} for a in annotationtags],
"project": project.name,
"audio_root_url": project.audio_root_url,
"audios": audios
}
response = jsonify(data)
response.headers['Content-Disposition'] = 'attachment;filename={}_annotations.json'.format(re.sub(r'[^a-zA-Z0-9_]','_', project.name))
return response
| 38.912195
| 138
| 0.629435
|
3ab12894aa04e105a4a5c36ab3b5919602fede16
| 2,393
|
py
|
Python
|
src/tinuous/state.py
|
yarikoptic/tinuous
|
23bcccce77a0c118cd07f06ad1cc6ee1a4edb58e
|
[
"MIT"
] | null | null | null |
src/tinuous/state.py
|
yarikoptic/tinuous
|
23bcccce77a0c118cd07f06ad1cc6ee1a4edb58e
|
[
"MIT"
] | null | null | null |
src/tinuous/state.py
|
yarikoptic/tinuous
|
23bcccce77a0c118cd07f06ad1cc6ee1a4edb58e
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from pathlib import Path
from typing import Optional, Union
from pydantic import BaseModel
from .util import log
STATE_FILE = ".tinuous.state.json"
OLD_STATE_FILE = ".dlstate.json"
class State(BaseModel):
github: Optional[datetime] = None
travis: Optional[datetime] = None
appveyor: Optional[datetime] = None
class StateFile(BaseModel):
default_since: datetime
path: Path
state: State
migrating: bool = False
modified: bool = False
@classmethod
def from_file(
cls, default_since: datetime, path: Union[str, Path, None] = None
) -> "StateFile":
migrating = False
p: Path
if path is None:
cwd = Path.cwd()
if (cwd / STATE_FILE).exists():
p = cwd / STATE_FILE
elif (cwd / OLD_STATE_FILE).exists():
log.debug("Statefile with old name found; will rename on first write")
p = cwd / OLD_STATE_FILE
migrating = True
else:
p = cwd / STATE_FILE
else:
p = Path(path)
try:
s = p.read_text()
except FileNotFoundError:
state = State()
else:
if s.strip() == "":
state = State()
else:
state = State.parse_raw(s)
return cls(
default_since=default_since, path=p, state=state, migrating=migrating
)
def get_since(self, ciname: str) -> datetime:
if (t := getattr(self.state, ciname)) is not None:
assert isinstance(t, datetime)
return max(t, self.default_since)
else:
return self.default_since
def set_since(self, ciname: str, since: datetime) -> None:
if getattr(self.state, ciname) == since:
return
setattr(self.state, ciname, since)
log.debug("%s timestamp floor updated to %s", ciname, since)
if self.migrating:
log.debug("Renaming old statefile %s to %s", OLD_STATE_FILE, STATE_FILE)
newpath = self.path.with_name(STATE_FILE)
newpath.write_text(self.state.json())
self.path.unlink(missing_ok=True)
self.path = newpath
self.migrating = False
else:
self.path.write_text(self.state.json())
self.modified = True
| 30.291139
| 86
| 0.572085
|
49453d7eef4a7e8308d7b3e30c992d5c3f8faac5
| 25,758
|
py
|
Python
|
networkbrowser/src/AutoMount.py
|
wedebe/enigma2-plugins
|
58e1897866ad65294283970e96e5f2841c3cb6e2
|
[
"OLDAP-2.3"
] | null | null | null |
networkbrowser/src/AutoMount.py
|
wedebe/enigma2-plugins
|
58e1897866ad65294283970e96e5f2841c3cb6e2
|
[
"OLDAP-2.3"
] | null | null | null |
networkbrowser/src/AutoMount.py
|
wedebe/enigma2-plugins
|
58e1897866ad65294283970e96e5f2841c3cb6e2
|
[
"OLDAP-2.3"
] | null | null | null |
# -*- coding: utf-8 -*-
# for localized messages
#from __init__ import _
import os
from enigma import eTimer
from Components.Console import Console
from Components.Harddisk import harddiskmanager #global harddiskmanager
from xml.etree.cElementTree import parse as cet_parse
from shutil import rmtree
XML_FSTAB = "/etc/enigma2/automounts.xml"
def rm_rf(d): # only for removing the ipkg stuff from /media/hdd subdirs
try:
for path in (os.path.join(d, f) for f in os.listdir(d)):
if os.path.isdir(path):
rm_rf(path)
else:
os.unlink(path)
os.rmdir(d)
except Exception, ex:
print "AutoMount failed to remove", d, "Error:", ex
class AutoMount():
"""Manages Mounts declared in a XML-Document."""
def __init__(self):
self.automounts = {}
self.restartConsole = Console()
self.MountConsole = Console()
self.removeConsole = Console()
self.activeMountsCounter = 0
# Initialize Timer
self.callback = None
self.timer = eTimer()
self.timer.callback.append(self.mountTimeout)
self.getAutoMountPoints()
def getAutoMountPoints(self, callback=None, restart=False):
# Initialize mounts to empty list
automounts = []
self.automounts = {}
self.activeMountsCounter = 0
if not os.path.exists(XML_FSTAB):
return
file = open(XML_FSTAB, 'r')
tree = cet_parse(file).getroot()
file.close()
def getValue(definitions, default):
# Initialize Output
ret = ""
# How many definitions are present
Len = len(definitions)
return Len > 0 and definitions[Len - 1].text or default
mountusing = 0 # 0=old_enigma2, 1 =fstab, 2=enigma2
# Config is stored in "mountmanager" element
# Read out NFS Mounts
for autofs in tree.findall("autofs"):
mountusing = 1
for nfs in autofs.findall("nfs"):
for mount in nfs.findall("mount"):
data = {'isMounted': False, 'mountusing': False, 'active': False, 'ip': False, 'sharename': False, 'sharedir': False, 'username': False, 'password': False, 'mounttype': False, 'options': False, 'hdd_replacement': False}
try:
data['mountusing'] = 'autofs'.encode("UTF-8")
data['mounttype'] = 'nfs'.encode("UTF-8")
data['active'] = getValue(mount.findall("active"), False).encode("UTF-8")
if data["active"] == 'True' or data["active"] == True:
self.activeMountsCounter += 1
data['hdd_replacement'] = getValue(mount.findall("hdd_replacement"), "False").encode("UTF-8")
data['ip'] = getValue(mount.findall("ip"), "192.168.0.0").encode("UTF-8")
data['sharedir'] = getValue(mount.findall("sharedir"), "/media/hdd/").encode("UTF-8")
data['sharename'] = getValue(mount.findall("sharename"), "MEDIA").encode("UTF-8")
data['options'] = getValue(mount.findall("options"), "rw,nolock,tcp,utf8").encode("UTF-8")
self.automounts[data['sharename']] = data
except Exception, e:
print "[MountManager] Error reading Mounts:", e
for cifs in autofs.findall("cifs"):
for mount in cifs.findall("mount"):
data = {'isMounted': False, 'mountusing': False, 'active': False, 'ip': False, 'sharename': False, 'sharedir': False, 'username': False, 'password': False, 'mounttype': False, 'options': False, 'hdd_replacement': False}
try:
data['mountusing'] = 'autofs'.encode("UTF-8")
data['mounttype'] = 'cifs'.encode("UTF-8")
data['active'] = getValue(mount.findall("active"), False).encode("UTF-8")
if data["active"] == 'True' or data["active"] == True:
self.activeMountsCounter += 1
data['hdd_replacement'] = getValue(mount.findall("hdd_replacement"), "False").encode("UTF-8")
data['ip'] = getValue(mount.findall("ip"), "192.168.0.0").encode("UTF-8")
data['sharedir'] = getValue(mount.findall("sharedir"), "/media/hdd/").encode("UTF-8")
data['sharename'] = getValue(mount.findall("sharename"), "MEDIA").encode("UTF-8")
data['options'] = getValue(mount.findall("options"), "rw,utf8").encode("UTF-8")
data['username'] = getValue(mount.findall("username"), "guest").encode("UTF-8")
data['password'] = getValue(mount.findall("password"), "").encode("UTF-8")
self.automounts[data['sharename']] = data
except Exception, e:
print "[MountManager] Error reading Mounts:", e
for fstab in tree.findall("fstab"):
mountusing = 2
for nfs in fstab.findall("nfs"):
for mount in nfs.findall("mount"):
data = {'isMounted': False, 'mountusing': False, 'active': False, 'ip': False, 'sharename': False, 'sharedir': False, 'username': False, 'password': False, 'mounttype': False, 'options': False, 'hdd_replacement': False}
try:
data['mountusing'] = 'fstab'.encode("UTF-8")
data['mounttype'] = 'nfs'.encode("UTF-8")
data['active'] = getValue(mount.findall("active"), False).encode("UTF-8")
if data["active"] == 'True' or data["active"] == True:
self.activeMountsCounter += 1
data['hdd_replacement'] = getValue(mount.findall("hdd_replacement"), "False").encode("UTF-8")
data['ip'] = getValue(mount.findall("ip"), "192.168.0.0").encode("UTF-8")
data['sharedir'] = getValue(mount.findall("sharedir"), "/media/hdd/").encode("UTF-8")
data['sharename'] = getValue(mount.findall("sharename"), "MEDIA").encode("UTF-8")
data['options'] = getValue(mount.findall("options"), "rw,nolock,tcp,utf8").encode("UTF-8")
self.automounts[data['sharename']] = data
except Exception, e:
print "[MountManager] Error reading Mounts:", e
for cifs in fstab.findall("cifs"):
for mount in cifs.findall("mount"):
data = {'isMounted': False, 'mountusing': False, 'active': False, 'ip': False, 'sharename': False, 'sharedir': False, 'username': False, 'password': False, 'mounttype': False, 'options': False, 'hdd_replacement': False}
try:
data['mountusing'] = 'fstab'.encode("UTF-8")
data['mounttype'] = 'cifs'.encode("UTF-8")
data['active'] = getValue(mount.findall("active"), False).encode("UTF-8")
if data["active"] == 'True' or data["active"] == True:
self.activeMountsCounter += 1
data['hdd_replacement'] = getValue(mount.findall("hdd_replacement"), "False").encode("UTF-8")
data['ip'] = getValue(mount.findall("ip"), "192.168.0.0").encode("UTF-8")
data['sharedir'] = getValue(mount.findall("sharedir"), "/media/hdd/").encode("UTF-8")
data['sharename'] = getValue(mount.findall("sharename"), "MEDIA").encode("UTF-8")
data['options'] = getValue(mount.findall("options"), "rw,utf8").encode("UTF-8")
data['username'] = getValue(mount.findall("username"), "guest").encode("UTF-8")
data['password'] = getValue(mount.findall("password"), "").encode("UTF-8")
self.automounts[data['sharename']] = data
except Exception, e:
print "[MountManager] Error reading Mounts:", e
for enigma2 in tree.findall("enigma2"):
mountusing = 3
for nfs in enigma2.findall("nfs"):
for mount in nfs.findall("mount"):
data = {'isMounted': False, 'mountusing': False, 'active': False, 'ip': False, 'sharename': False, 'sharedir': False, 'username': False, 'password': False, 'mounttype': False, 'options': False, 'hdd_replacement': False}
try:
data['mountusing'] = 'enigma2'.encode("UTF-8")
data['mounttype'] = 'nfs'.encode("UTF-8")
data['active'] = getValue(mount.findall("active"), False).encode("UTF-8")
if data["active"] == 'True' or data["active"] == True:
self.activeMountsCounter += 1
data['hdd_replacement'] = getValue(mount.findall("hdd_replacement"), "False").encode("UTF-8")
data['ip'] = getValue(mount.findall("ip"), "192.168.0.0").encode("UTF-8")
data['sharedir'] = getValue(mount.findall("sharedir"), "/exports/").encode("UTF-8")
data['sharename'] = getValue(mount.findall("sharename"), "MEDIA").encode("UTF-8")
data['options'] = getValue(mount.findall("options"), "rw,nolock,tcp,utf8").encode("UTF-8")
self.automounts[data['sharename']] = data
except Exception, e:
print "[MountManager] Error reading Mounts:", e
# Read out CIFS Mounts
for cifs in enigma2.findall("cifs"):
for mount in cifs.findall("mount"):
data = {'isMounted': False, 'mountusing': False, 'active': False, 'ip': False, 'sharename': False, 'sharedir': False, 'username': False, 'password': False, 'mounttype': False, 'options': False, 'hdd_replacement': False}
try:
data['mountusing'] = 'enigma2'.encode("UTF-8")
data['mounttype'] = 'cifs'.encode("UTF-8")
data['active'] = getValue(mount.findall("active"), False).encode("UTF-8")
if data["active"] == 'True' or data["active"] == True:
self.activeMountsCounter += 1
data['hdd_replacement'] = getValue(mount.findall("hdd_replacement"), "False").encode("UTF-8")
data['ip'] = getValue(mount.findall("ip"), "192.168.0.0").encode("UTF-8")
data['sharedir'] = getValue(mount.findall("sharedir"), "/exports/").encode("UTF-8")
data['sharename'] = getValue(mount.findall("sharename"), "MEDIA").encode("UTF-8")
data['options'] = getValue(mount.findall("options"), "rw,utf8").encode("UTF-8")
data['username'] = getValue(mount.findall("username"), "guest").encode("UTF-8")
data['password'] = getValue(mount.findall("password"), "").encode("UTF-8")
self.automounts[data['sharename']] = data
except Exception, e:
print "[MountManager] Error reading Mounts:", e
if mountusing == 0:
for nfs in tree.findall("nfs"):
for mount in nfs.findall("mount"):
data = {'isMounted': False, 'mountusing': False, 'active': False, 'ip': False, 'sharename': False, 'sharedir': False, 'username': False, 'password': False, 'mounttype': False, 'options': False, 'hdd_replacement': False}
try:
data['mountusing'] = 'old_enigma2'.encode("UTF-8")
data['mounttype'] = 'nfs'.encode("UTF-8")
data['active'] = getValue(mount.findall("active"), False).encode("UTF-8")
if data["active"] == 'True' or data["active"] == True:
self.activeMountsCounter += 1
data['hdd_replacement'] = getValue(mount.findall("hdd_replacement"), "False").encode("UTF-8")
data['ip'] = getValue(mount.findall("ip"), "192.168.0.0").encode("UTF-8")
data['sharedir'] = getValue(mount.findall("sharedir"), "/exports/").encode("UTF-8")
data['sharename'] = getValue(mount.findall("sharename"), "MEDIA").encode("UTF-8")
data['options'] = getValue(mount.findall("options"), "rw,nolock,tcp,utf8").encode("UTF-8")
self.automounts[data['sharename']] = data
except Exception, e:
print "[MountManager] Error reading Mounts:", e
for cifs in tree.findall("cifs"):
for mount in cifs.findall("mount"):
data = {'isMounted': False, 'mountusing': False, 'active': False, 'ip': False, 'sharename': False, 'sharedir': False, 'username': False, 'password': False, 'mounttype': False, 'options': False, 'hdd_replacement': False}
try:
data['mountusing'] = 'old_enigma2'.encode("UTF-8")
data['mounttype'] = 'cifs'.encode("UTF-8")
data['active'] = getValue(mount.findall("active"), False).encode("UTF-8")
if data["active"] == 'True' or data["active"] == True:
self.activeMountsCounter += 1
data['hdd_replacement'] = getValue(mount.findall("hdd_replacement"), "False").encode("UTF-8")
data['ip'] = getValue(mount.findall("ip"), "192.168.0.0").encode("UTF-8")
data['sharedir'] = getValue(mount.findall("sharedir"), "/exports/").encode("UTF-8")
data['sharename'] = getValue(mount.findall("sharename"), "MEDIA").encode("UTF-8")
data['options'] = getValue(mount.findall("options"), "rw,utf8").encode("UTF-8")
data['username'] = getValue(mount.findall("username"), "guest").encode("UTF-8")
data['password'] = getValue(mount.findall("password"), "").encode("UTF-8")
self.automounts[data['sharename']] = data
except Exception, e:
print "[MountManager] Error reading Mounts:", e
self.checkList = self.automounts.keys()
if not self.checkList:
# print "[NetworkBrowser] self.automounts without mounts",self.automounts
if callback is not None:
callback(True)
else:
self.CheckMountPoint(self.checkList.pop(), callback, restart)
def sanitizeOptions(self, origOptions, cifs=False, fstab=False, autofs=False):
options = origOptions.strip()
options = options.replace('utf8', 'iocharset=utf8')
if fstab:
if not options:
options = 'rw'
if not cifs:
options += ',nfsvers=3,rsize=8192,wsize=8192,proto=tcp'
else:
if not cifs:
options += ',nfsvers=3'
if 'rsize' not in options:
options += ',rsize=8192'
if 'wsize' not in options:
options += ',wsize=8192'
if 'tcp' not in options and 'udp' not in options:
options += ',proto=tcp'
options = options + ',timeo=14,soft'
elif autofs:
if not options:
options = 'rw'
if not cifs:
options += ',nfsvers=3,rsize=8192,wsize=8192'
else:
if not cifs:
options += ',nfsvers=3'
if 'rsize' not in options:
options += ',rsize=8192'
if 'wsize' not in options:
options += ',wsize=8192'
if 'tcp' not in options and 'udp' not in options:
options += ',proto=tcp'
options = options + ',timeo=14,soft'
else:
if not options:
options = 'rw,rsize=8192,wsize=8192'
if not cifs:
options += ',proto=tcp'
else:
if not cifs:
if 'rsize' not in options:
options += ',rsize=8192'
if 'wsize' not in options:
options += ',wsize=8192'
if 'tcp' not in options and 'udp' not in options:
options += ',proto=tcp'
return options
def CheckMountPoint(self, item, callback, restart):
data = self.automounts[item]
if not self.MountConsole:
self.MountConsole = Console()
command = []
mountcommand = None
unmountcommand = []
if data['mountusing'] == 'autofs':
path = os.path.join('/media/autofs', data['sharename'])
elif data['hdd_replacement'] == 'True' or data['hdd_replacement'] is True:
path = os.path.join('/media/hdd')
else:
path = os.path.join('/media/net', data['sharename'])
if data['mountusing'] == 'autofs' and restart:
unmountcommand.append("/etc/init.d/autofs stop")
if os.path.ismount(path) and 'autofs' not in path:
unmountcommand.append('umount -fl ' + path)
if self.activeMountsCounter != 0:
if data['active'] == 'True' or data['active'] is True:
if data['mountusing'] == 'autofs' and restart:
mountcommand = "/etc/init.d/autofs start"
elif data['mountusing'] == 'fstab':
if data['mounttype'] == 'nfs':
tmpcmd = 'mount ' + data['ip'] + ':/' + data['sharedir']
elif data['mounttype'] == 'cifs':
tmpcmd = 'mount //' + data['ip'] + '/' + data['sharedir']
mountcommand = tmpcmd.encode("UTF-8")
elif data['mountusing'] == 'enigma2' or data['mountusing'] == 'old_enigma2':
tmpsharedir = data['sharedir'].replace(" ", "\\ ")
if tmpsharedir[-1:] == "$":
tmpdir = tmpsharedir.replace("$", "\\$")
tmpsharedir = tmpdir
if data['mounttype'] == 'nfs':
if not os.path.ismount(path):
tmpcmd = 'mount -t nfs -o ' + self.sanitizeOptions(data['options']) + ' ' + data['ip'] + ':/' + tmpsharedir + ' ' + path
mountcommand = tmpcmd.encode("UTF-8")
elif data['mounttype'] == 'cifs':
if not os.path.ismount(path):
tmpusername = data['username'].replace(" ", "\\ ")
tmpcmd = 'mount -t cifs -o ' + self.sanitizeOptions(data['options'], cifs=True) + ',noatime,noserverino,username=' + tmpusername + ',password=' + data['password'] + ' //' + data['ip'] + '/' + tmpsharedir + ' ' + path
mountcommand = tmpcmd.encode("UTF-8")
if len(unmountcommand) > 0 or mountcommand is not None:
if len(unmountcommand) > 0:
for x in unmountcommand:
command.append(x)
if not os.path.exists(path) and data['mountusing'] != 'autofs':
command.append('mkdir -p ' + path)
if command is not None:
command.append('sleep 2')
if mountcommand is not None:
command.append(mountcommand)
print 'command', command
self.MountConsole.eBatch(command, self.CheckMountPointFinished, [data, callback, restart], debug=True)
else:
self.CheckMountPointFinished([data, callback, restart])
def CheckMountPointFinished(self, extra_args):
# print "[NetworkBrowser] CheckMountPointFinished"
(data, callback, restart) = extra_args
hdd_dir = '/media/hdd'
sharepath = os.path.join('/media/net', data['sharename'])
if data['mountusing'] == 'autofs':
sharepath = os.path.join('/media/autofs', data['sharename'])
path = os.path.join('/media/autofs', data['sharename'])
elif data['hdd_replacement'] == 'True' or data['hdd_replacement'] is True:
path = os.path.join('/media/hdd')
else:
path = os.path.join('/media/net', data['sharename'])
if os.path.exists(path):
if data['mountusing'] == 'autofs':
if data['sharename'] in self.automounts:
self.automounts[data['sharename']]['isMounted'] = True
desc = data['sharename']
harddiskmanager.addMountedPartition(sharepath, desc)
if data['hdd_replacement'] == 'True' or data['hdd_replacement'] is True:
if os.path.islink(hdd_dir):
if os.readlink(hdd_dir) != path:
os.unlink(hdd_dir)
os.symlink(path, hdd_dir)
elif not os.path.exists(hdd_dir):
os.symlink(path, hdd_dir)
elif os.path.ismount(path):
if data['sharename'] in self.automounts:
self.automounts[data['sharename']]['isMounted'] = True
desc = data['sharename']
harddiskmanager.addMountedPartition(path, desc)
else:
if data['sharename'] in self.automounts:
self.automounts[data['sharename']]['isMounted'] = False
if os.path.exists(path):
if not os.path.ismount(path):
try:
rmtree(path)
harddiskmanager.removeMountedPartition(path)
except Exception, ex:
print "Failed to remove", path, "Error:", ex
if self.checkList:
# Go to next item in list...
self.CheckMountPoint(self.checkList.pop(), callback, restart)
if self.MountConsole:
if len(self.MountConsole.appContainers) == 0:
if callback is not None:
self.callback = callback
self.timer.startLongTimer(1)
def mountTimeout(self):
self.timer.stop()
if self.MountConsole:
if len(self.MountConsole.appContainers) == 0:
if self.callback is not None:
self.callback(True)
elif self.removeConsole:
if len(self.removeConsole.appContainers) == 0:
if self.callback is not None:
self.callback(True)
def getMountsList(self):
return self.automounts
def getMountsAttribute(self, mountpoint, attribute):
if mountpoint in self.automounts:
if attribute in self.automounts[mountpoint]:
return self.automounts[mountpoint][attribute]
return None
def setMountsAttribute(self, mountpoint, attribute, value):
if mountpoint in self.automounts:
self.automounts[mountpoint][attribute] = value
def removeEntryFromFile(self, entry, filename, separator=None):
if os.path.exists(filename):
f = open(filename)
tmpfile = open(filename + '.tmp', 'w')
tmpfile.writelines([line for line in f.readlines() if entry not in line.split(separator)])
tmpfile.close()
f.close()
os.rename(filename + '.tmp', filename)
def escape(self, data):
return data.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''')
def generateMountXML(self, sharedata):
res = []
mounttype = self.escape(sharedata['mounttype'])
mountusing = self.escape(sharedata['mountusing'])
if mountusing != 'old_enigma2':
res.append('<' + mountusing + '>\n')
res.append(' <' + mounttype + '>\n')
res.append(' <mount>\n')
res.append(' <active>' + self.escape(str(sharedata['active'])) + '</active>\n')
res.append(' <hdd_replacement>' + self.escape(str(sharedata['hdd_replacement'])) + '</hdd_replacement>\n')
res.append(' <ip>' + self.escape(sharedata['ip']) + '</ip>\n')
res.append(' <sharename>' + self.escape(sharedata['sharename']) + '</sharename>\n')
res.append(' <sharedir>' + self.escape(sharedata['sharedir']) + '</sharedir>\n')
res.append(' <options>' + self.escape(sharedata['options']) + '</options>\n')
if mounttype == 'cifs':
res.append(" <username>" + self.escape(sharedata['username']) + "</username>\n")
res.append(" <password>" + self.escape(sharedata['password']) + "</password>\n")
res.append(' </mount>\n')
res.append(' </' + mounttype + '>\n')
if mountusing != 'old_enigma2':
res.append('</' + mountusing + '>\n')
return res
def writeMountsConfig(self):
# Generate List in RAM
list = ['<?xml version="1.0" ?>\n<mountmanager>\n']
for sharename, sharedata in self.automounts.items():
mounttype = sharedata['mounttype']
mountusing = sharedata['mountusing']
if sharedata['hdd_replacement'] == 'True' or sharedata['hdd_replacement'] is True: #hdd replacement hack
path = os.path.join('/media/hdd')
sharepath = os.path.join('/media/net', sharedata['sharename'])
else:
path = os.path.join('/media/net', sharedata['sharename'])
sharepath = ""
sharetemp = None
if mounttype == 'nfs':
sharetemp = sharedata['ip'] + ':/' + sharedata['sharedir']
self.removeEntryFromFile(sharetemp + '\n', '/etc/auto.network', ' ')
self.removeEntryFromFile(sharetemp, '/etc/fstab')
elif mounttype == 'cifs':
sharetemp = '//' + sharedata['ip'] + '/' + sharedata['sharedir']
self.removeEntryFromFile(":" + sharetemp + '\n', '/etc/auto.network', ' ')
self.removeEntryFromFile(sharetemp, '/etc/fstab')
list += self.generateMountXML(sharedata)
if mountusing == 'autofs':
if sharedata['active'] == True or sharedata['active'] == 'True':
out = open('/etc/auto.network', 'a')
if mounttype == 'nfs':
line = sharedata['sharename'] + ' -fstype=' + mounttype + ',' + self.sanitizeOptions(sharedata['options'], autofs=True) + ' ' + sharedata['ip'] + ':/' + sharedata['sharedir'] + '\n'
elif sharedata['mounttype'] == 'cifs':
tmpusername = sharedata['username'].replace(" ", "\ ")
tmppassword = sharedata['password'].replace(" ", "\ ")
tmpaddress = sharedata['ip']
line = sharedata['sharename'] + ' -fstype=' + mounttype + ',user=' + tmpusername + ',pass=' + tmppassword + ',' + self.sanitizeOptions(sharedata['options'], cifs=True, autofs=True) + ' ://' + tmpaddress + '/' + sharedata['sharedir'] + '\n'
out.write(line)
out.close()
elif mountusing == 'fstab':
if sharedata['active'] == True or sharedata['active'] == 'True':
out = open('/etc/fstab', 'a')
if sharedata['mounttype'] == 'nfs':
line = sharedata['ip'] + ':/' + sharedata['sharedir'] + '\t' + path + '\tnfs\t_netdev,' + self.sanitizeOptions(sharedata['options'], fstab=True) + '\t0 0\n'
elif sharedata['mounttype'] == 'cifs':
line = '//' + sharedata['ip'] + '/' + sharedata['sharedir'] + '\t' + path + '\tcifs\tuser=' + sharedata['username'] + ',pass=' + sharedata['password'] + ',_netdev,' + self.sanitizeOptions(sharedata['options'], cifs=True, fstab=True) + '\t0 0\n'
out.write(line)
out.close()
# Close Mountmanager Tag
list.append('</mountmanager>\n')
# Try Saving to Flash
try:
f = open(XML_FSTAB, "w")
f.writelines(list)
f.close()
# print "[NetworkBrowser] Saving Mounts List:"
except Exception, e:
print "[NetworkBrowser] Error Saving Mounts List:", e
def stopMountConsole(self):
if self.MountConsole is not None:
self.MountConsole = None
def removeMount(self, mountpoint, callback=None):
# print "[NetworkBrowser] removing mount: ",mountpoint
self.newautomounts = {}
for sharename, sharedata in self.automounts.items():
sharepath = os.path.join('/media/net', sharedata['sharename'])
if sharedata['mountusing'] == 'autofs':
sharepath = os.path.join('/media/autofs', sharedata['sharename'])
path = os.path.join('/media/autofs', sharedata['sharename'])
if sharedata['hdd_replacement'] == 'True' or sharedata['hdd_replacement'] is True:
if os.path.islink('/media/hdd'):
if os.readlink('/media/hdd') == path:
os.unlink('/media/hdd')
elif sharedata['hdd_replacement'] == 'True' or sharedata['hdd_replacement'] is True:
path = os.path.join('/media/hdd')
else:
path = os.path.join('/media/net', sharedata['sharename'])
if sharename is not mountpoint.strip():
self.newautomounts[sharename] = sharedata
if sharedata['mounttype'] == 'nfs':
sharetemp = sharedata['ip'] + ':/' + sharedata['sharedir']
elif sharedata['mounttype'] == 'cifs':
sharetemp = '://' + sharedata['ip'] + '/' + sharedata['sharedir']
if sharetemp:
self.removeEntryFromFile(sharetemp + '\n', '/etc/auto.network', ' ')
self.removeEntryFromFile(sharetemp, '/etc/fstab')
self.automounts.clear()
self.automounts = self.newautomounts
if not self.removeConsole:
self.removeConsole = Console()
command = []
autofsstop = None
if sharedata['mountusing'] == 'autofs':
command.append("/etc/init.d/autofs stop")
command.append("sleep 2")
command.append("/etc/init.d/autofs start")
else:
command.append('umount -fl ' + path)
# print "[NetworkBrowser] UMOUNT-CMD--->",umountcmd
self.removeConsole.eBatch(command, self.removeMountPointFinished, [path, callback], debug=True)
def removeMountPointFinished(self, extra_args):
(path, callback) = extra_args
if os.path.exists(path):
if not os.path.ismount(path):
try:
os.rmdir(path)
harddiskmanager.removeMountedPartition(path)
except Exception, ex:
print "Failed to remove", path, "Error:", ex
if self.removeConsole:
if len(self.removeConsole.appContainers) == 0:
if callback is not None:
self.callback = callback
self.timer.startLongTimer(1)
iAutoMount = AutoMount()
| 45.914439
| 250
| 0.643451
|
0ad631cdf6597275c367b066b7c97fe3721e1d65
| 224
|
py
|
Python
|
pylogger/formats/__init__.py
|
agSant01/pylogger
|
99a5d08b0f486c43dc4936cd89474e21a86f377a
|
[
"MIT"
] | null | null | null |
pylogger/formats/__init__.py
|
agSant01/pylogger
|
99a5d08b0f486c43dc4936cd89474e21a86f377a
|
[
"MIT"
] | null | null | null |
pylogger/formats/__init__.py
|
agSant01/pylogger
|
99a5d08b0f486c43dc4936cd89474e21a86f377a
|
[
"MIT"
] | null | null | null |
from .timestamp import Timestamp
from .caller import ClassCaller, FunctionCaller, FileCaller, FileLine
from .format import Format
__all__ = ['Timestamp', 'ClassCaller', 'FunctionCaller', 'FileCaller', 'FileLine', 'Format']
| 37.333333
| 92
| 0.776786
|
c9eea8f4fad1041f4c5e29b482e2298a10152677
| 934
|
py
|
Python
|
Back-End/Python/Basics/Part -2 - Iteration & Generators/02 - Iterables & Iterators/14_test_iter.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 25
|
2021-04-28T02:51:26.000Z
|
2022-03-24T13:58:04.000Z
|
Back-End/Python/Basics/Part -2 - Iteration & Generators/02 - Iterables & Iterators/14_test_iter.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 1
|
2022-03-03T23:33:41.000Z
|
2022-03-03T23:35:41.000Z
|
Back-End/Python/Basics/Part -2 - Iteration & Generators/02 - Iterables & Iterators/14_test_iter.py
|
ASHISHKUMAR2411/Programming-CookBook
|
9c60655d64d21985ccb4196360858d98344701f9
|
[
"MIT"
] | 15
|
2021-05-30T01:35:20.000Z
|
2022-03-25T12:38:25.000Z
|
class SimpleIter:
def __init__(self):
pass
def __iter__(self):
return 'Nope'
s = SimpleIter()
print('__iter__' in dir(s) ) # => True
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
obj = 100
if is_iterable(obj):
for i in obj:
print(i)
else:
print('Error: obj is not iterable')
obj = 100
if is_iterable(obj):
for i in obj:
print(i)
else:
print('Error: obj is not iterable')
print('Taking some action as a consequence of this error')
obj = 100
try:
for i in obj:
print(i)
except TypeError:
print('Error: obj is not iterable')
print('Taking some action as a consequence of this error')
# True
# Error: obj is not iterable
# Error: obj is not iterable
# Taking some action as a consequence of this error
# Error: obj is not iterable
# Taking some action as a consequence of this error
| 19.061224
| 62
| 0.638116
|
a9bcbb02778d6b6a69a02c099de595a109e09a64
| 8,380
|
py
|
Python
|
configs/common/MemConfig.py
|
liyupeng/NVPage
|
1a3caee8bdd6533239e076cd464b9f2c85ba7d21
|
[
"BSD-3-Clause"
] | 7
|
2015-01-12T11:37:19.000Z
|
2021-09-23T18:31:48.000Z
|
configs/common/MemConfig.py
|
liyupeng/NVPage
|
1a3caee8bdd6533239e076cd464b9f2c85ba7d21
|
[
"BSD-3-Clause"
] | null | null | null |
configs/common/MemConfig.py
|
liyupeng/NVPage
|
1a3caee8bdd6533239e076cd464b9f2c85ba7d21
|
[
"BSD-3-Clause"
] | 2
|
2018-11-15T03:42:47.000Z
|
2019-03-06T06:53:15.000Z
|
# Copyright (c) 2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
# Andreas Hansson
import m5.objects
import inspect
import sys
from textwrap import TextWrapper
from m5.util import *
from SysPaths import pram_image
# Dictionary of mapping names of real memory controller models to
# classes.
_mem_classes = {}
# Memory aliases. We make sure they exist before we add them to the
# final list. A target may be specified as a tuple, in which case the
# first available memory controller model in the tuple will be used.
_mem_aliases_all = [
("simple_mem", "SimpleMemory"),
("ddr3_1600_x64", "DDR3_1600_x64"),
("lpddr2_s4_1066_x32", "LPDDR2_S4_1066_x32"),
("lpddr3_1600_x32", "LPDDR3_1600_x32"),
("wio_200_x128", "WideIO_200_x128"),
("dramsim2", "DRAMSim2"),
("pram", "PRAMCtrl")
]
# Filtered list of aliases. Only aliases for existing memory
# controllers exist in this list.
_mem_aliases = {}
def is_mem_class(cls):
"""Determine if a class is a memory controller that can be instantiated"""
# We can't use the normal inspect.isclass because the ParamFactory
# and ProxyFactory classes have a tendency to confuse it.
try:
return issubclass(cls, m5.objects.AbstractMemory) and \
not cls.abstract
except TypeError:
return False
def get(name):
"""Get a memory class from a user provided class name or alias."""
real_name = _mem_aliases.get(name, name)
try:
mem_class = _mem_classes[real_name]
return mem_class
except KeyError:
print "%s is not a valid memory controller." % (name,)
sys.exit(1)
def print_mem_list():
"""Print a list of available memory classes including their aliases."""
print "Available memory classes:"
doc_wrapper = TextWrapper(initial_indent="\t\t", subsequent_indent="\t\t")
for name, cls in _mem_classes.items():
print "\t%s" % name
# Try to extract the class documentation from the class help
# string.
doc = inspect.getdoc(cls)
if doc:
for line in doc_wrapper.wrap(doc):
print line
if _mem_aliases:
print "\nMemory aliases:"
for alias, target in _mem_aliases.items():
print "\t%s => %s" % (alias, target)
def mem_names():
"""Return a list of valid memory names."""
return _mem_classes.keys() + _mem_aliases.keys()
# Add all memory controllers in the object hierarchy.
for name, cls in inspect.getmembers(m5.objects, is_mem_class):
_mem_classes[name] = cls
for alias, target in _mem_aliases_all:
if isinstance(target, tuple):
# Some aliases contain a list of memory controller models
# sorted in priority order. Use the first target that's
# available.
for t in target:
if t in _mem_classes:
_mem_aliases[alias] = t
break
elif target in _mem_classes:
# Normal alias
_mem_aliases[alias] = target
def config_mem(options, system):
"""
Create the memory controllers based on the options and attach them.
If requested, we make a multi-channel configuration of the
selected memory controller class by creating multiple instances of
the specific class. The individual controllers have their
parameters set such that the address range is interleaved between
them.
"""
nbr_mem_ctrls = options.mem_channels
import math
from m5.util import fatal
intlv_bits = int(math.log(nbr_mem_ctrls, 2))
if 2 ** intlv_bits != nbr_mem_ctrls:
fatal("Number of memory channels must be a power of 2")
cls = get(options.mem_type)
nvcls = get("pram")
# print "nvcls memory type is %s." % nvcls
mem_ctrls = []
# The default behaviour is to interleave on cache line granularity
cache_line_bit = int(math.log(system.cache_line_size.value, 2)) - 1
intlv_low_bit = cache_line_bit
#
cur_mem_size = 0
mem_size = convert.toMemorySize(options.mem_size)
# print "mem_size is %d." % mem_size
# For every range (most systems will only have one), create an
# array of controllers and set their parameters to match their
# address mapping in the case of a DRAM
for r in system.mem_ranges:
cur_mem_size += r.size()
# print "cur_mem_size is %d." % cur_mem_size
for i in xrange(nbr_mem_ctrls):
# Create an instance so we can figure out the address
# mapping and row-buffer size
if cur_mem_size <= mem_size: # dram controller
ctrl = cls()
else: # pram controller
ctrl = nvcls()
ctrl.image_file = pram_image(options.pram_image)
# print "pram image name is %s." % ctrl.image_file
# Only do this for DRAMs
if issubclass(cls, m5.objects.DRAMCtrl):
# Inform each controller how many channels to account
# for
ctrl.channels = nbr_mem_ctrls
# If the channel bits are appearing after the column
# bits, we need to add the appropriate number of bits
# for the row buffer size
if ctrl.addr_mapping.value == 'RoRaBaChCo':
# This computation only really needs to happen
# once, but as we rely on having an instance we
# end up having to repeat it for each and every
# one
rowbuffer_size = ctrl.device_rowbuffer_size.value * \
ctrl.devices_per_rank.value
intlv_low_bit = int(math.log(rowbuffer_size, 2)) - 1
# We got all we need to configure the appropriate address
# range
ctrl.range = m5.objects.AddrRange(r.start, size = r.size(),
intlvHighBit = \
intlv_low_bit + intlv_bits,
intlvBits = intlv_bits,
intlvMatch = i)
mem_ctrls.append(ctrl)
system.mem_ctrls = mem_ctrls
# Connect the controllers to the membus
for i in xrange(len(system.mem_ctrls)):
system.mem_ctrls[i].port = system.membus.master
| 39.528302
| 78
| 0.664797
|
316f55f1cd3e8179efa0050ca9bca0b11b6f8acb
| 380
|
py
|
Python
|
tools/perf/benchmarks/memory_pressure.py
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2019-01-16T03:57:28.000Z
|
2021-01-23T15:29:45.000Z
|
tools/perf/benchmarks/memory_pressure.py
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/perf/benchmarks/memory_pressure.py
|
shaochangbin/chromium-crosswalk
|
634d34e4cf82b4f7400357c53ec12efaffe94add
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1
|
2017-03-15T13:21:38.000Z
|
2017-03-15T13:21:38.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import test
from measurements import memory_pressure
class MemoryPressure(test.Test):
test = memory_pressure.MemoryPressure
page_set = 'page_sets/typical_25.json'
options = {'pageset_repeat': 6}
| 31.666667
| 72
| 0.784211
|
20f8feb7268b4ad287c1e4a159d773297a837425
| 446
|
py
|
Python
|
pyai/metrics.py
|
bpesquet/pyai
|
09f6e9989c9c3d3619b45a0aab2bd363141dfe58
|
[
"MIT"
] | null | null | null |
pyai/metrics.py
|
bpesquet/pyai
|
09f6e9989c9c3d3619b45a0aab2bd363141dfe58
|
[
"MIT"
] | null | null | null |
pyai/metrics.py
|
bpesquet/pyai
|
09f6e9989c9c3d3619b45a0aab2bd363141dfe58
|
[
"MIT"
] | null | null | null |
"""
Metrics for distances and losses
"""
import numpy as np
def euclidean_distance(a, b):
"""
Euclidean distance: https://en.wikipedia.org/wiki/Euclidean_distance
"""
squared_diff = (a - b) ** 2
sum_squared_diff = np.sum(squared_diff)
return np.sqrt(sum_squared_diff)
def mean_squared_error(y_true, y_pred):
"""Compute the Mean Squared Error between two vectors."""
return np.square(y_true - y_pred).mean()
| 20.272727
| 72
| 0.686099
|
5db19092d846203da3b338833d9e6abf8f994fc7
| 406
|
py
|
Python
|
plugins/keepkey/cmdline.py
|
theubiquitous/electrum-wallet
|
c2b30d270b724887267b5cd8814bcad04f567cf2
|
[
"MIT"
] | 4
|
2018-08-16T16:02:21.000Z
|
2021-02-14T06:19:10.000Z
|
plugins/keepkey/cmdline.py
|
theubiquitous/electrum-wallet
|
c2b30d270b724887267b5cd8814bcad04f567cf2
|
[
"MIT"
] | 6
|
2018-05-25T01:46:32.000Z
|
2021-11-15T17:47:51.000Z
|
plugins/keepkey/cmdline.py
|
theubiquitous/electrum-wallet
|
c2b30d270b724887267b5cd8814bcad04f567cf2
|
[
"MIT"
] | 4
|
2018-05-22T09:04:09.000Z
|
2021-04-21T22:24:05.000Z
|
from electrum_lcc.plugins import hook
from .keepkey import KeepKeyPlugin
from ..hw_wallet import CmdLineHandler
class Plugin(KeepKeyPlugin):
handler = CmdLineHandler()
@hook
def init_keystore(self, keystore):
if not isinstance(keystore, self.keystore_class):
return
keystore.handler = self.handler
def create_handler(self, window):
return self.handler
| 27.066667
| 57
| 0.714286
|
4849326d5f2864496a94bf5145f71dee5cdb0069
| 379
|
py
|
Python
|
sphinxcontrib/needs/services/base.py
|
David-Le-Nir/sphinxcontrib-needs
|
fe809445505fa1e9bf5963eab1d6283dad405e92
|
[
"MIT"
] | 1
|
2022-03-24T08:55:28.000Z
|
2022-03-24T08:55:28.000Z
|
sphinxcontrib/needs/services/base.py
|
David-Le-Nir/sphinxcontrib-needs
|
fe809445505fa1e9bf5963eab1d6283dad405e92
|
[
"MIT"
] | null | null | null |
sphinxcontrib/needs/services/base.py
|
David-Le-Nir/sphinxcontrib-needs
|
fe809445505fa1e9bf5963eab1d6283dad405e92
|
[
"MIT"
] | null | null | null |
from sphinxcontrib.needs.logging import get_logger
class BaseService:
def __init__(self, *args, **kwargs):
self.log = get_logger(__name__)
def request(self, *args, **kwargs):
raise NotImplementedError("Must be implemented by the service!")
def debug(self, *args, **kwargs):
raise NotImplementedError("Must be implemented by the service!")
| 29.153846
| 72
| 0.693931
|
b801a047bde1849af47a8728d0e8ff8893c7240d
| 556
|
py
|
Python
|
code/src/run_prediction_day1_to_day2.py
|
hiroyasuando/CH_traffic
|
b653e9ee02f6b1a88097eaae736d51cad3f6c77b
|
[
"MIT"
] | null | null | null |
code/src/run_prediction_day1_to_day2.py
|
hiroyasuando/CH_traffic
|
b653e9ee02f6b1a88097eaae736d51cad3f6c77b
|
[
"MIT"
] | null | null | null |
code/src/run_prediction_day1_to_day2.py
|
hiroyasuando/CH_traffic
|
b653e9ee02f6b1a88097eaae736d51cad3f6c77b
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import load_data
from utils import *
############
target = '05e'
rlist=['05e'] # remove_list
delay = 7
interval = 17
p=0.
depth=2
depth_ar=6
############
## load data
# df_sum1 : data on day 1
# df_sum2 : data on day2
df_sum1, df_sum2 = load_data.load_data(interval=interval,p=p)
# Learn W_out on day 1
W1 = calc_W(df_sum1, target=target, remove_list=None, delay=delay,depth=depth)
# prediction from W of day 1 to day 2
print('cross 14->18 NRMSE : ')
predict_by_W(df_sum2, W1, target, delay,depth,plot=True)
| 19.172414
| 78
| 0.688849
|
b8350e7f0e6f14a476745dfb1acb3f64762bf493
| 4,813
|
py
|
Python
|
docs/source/scripts/api/solutions/exercise2_question8.py
|
EBI-Metagenomics/mgnify-ebi-2020
|
ff33f148e660a2ecfe464310a8ea92d252ef1814
|
[
"Apache-2.0"
] | 1
|
2021-06-01T13:59:07.000Z
|
2021-06-01T13:59:07.000Z
|
docs/source/scripts/api/solutions/exercise2_question8.py
|
EBI-Metagenomics/mgnify-ebi-2020
|
ff33f148e660a2ecfe464310a8ea92d252ef1814
|
[
"Apache-2.0"
] | null | null | null |
docs/source/scripts/api/solutions/exercise2_question8.py
|
EBI-Metagenomics/mgnify-ebi-2020
|
ff33f148e660a2ecfe464310a8ea92d252ef1814
|
[
"Apache-2.0"
] | 1
|
2021-11-15T04:17:41.000Z
|
2021-11-15T04:17:41.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import csv
from jsonapi_client import Session, Filter
from plotnine import *
import pandas
API_BASE = "https://www.ebi.ac.uk/metagenomics/api/v1"
TAX_RANK = "phylum"
# MGYS00002474 (DRP001073) Metabolically active microbial communities
# in marine sediment under high-CO2 and low-pH extremes
study_accession = "MGYS00002474"
# MGYS00002421 (ERP009568) Prokaryotic microbiota associated to the digestive
# cavity of the jellyfish Cotylorhiza tuberculata
# study_accession = "MGYS00002421"
# MGYS00002371 (DRP000490) Porcupine Seabight 16S Ribosomal RNA
# study_accession = "MGYS00002371"
# MGYS00002441 EMG produced TPA metagenomics assembly of
# the doi: 10.3389/fmicb.2016.00579
# study_accession = "MGYS00002441"
# MGYS00002394 (SRP051741) Subgingival plaque and peri-implant biofilm
# study_accession = "MGYS00002394"
# MGYS00001211 (SRP076746) Human gut metagenome Metagenome
# study_accession = "MGYS00001211"
# MGYS00000601 (ERP013908) Assessment of Bacterial DNA Extraction Procedures for Metagenomic Sequencing Evaluated
# on Different Environmental Matrices.
# study_accession = "MGYS00000601"
# MGYS00002115
# The study includes fungal genetic diversity assessment by ITS-1 next generation sequencing (NGS) analyses
# study_accession = "MGYS00002115"
resource = "studies/" + study_accession + "/analyses"
rows = []
with Session(API_BASE) as session:
analyses = session.get(resource).resources
analyses_accs = [a.accession for a in analyses]
for analysis_accession in analyses_accs:
tax_annotations = session.get(
"/".join(["analyses", analysis_accession, "taxonomy", "ssu"])
).resources
for t in tax_annotations:
if t.hierarchy.get(TAX_RANK):
rows.append(
{
"analysis": analysis_accession,
"study": study_accession,
TAX_RANK: t.hierarchy.get(TAX_RANK),
"count": t.count,
"rel_abundance": 0, # this will be filled afterwards
},
)
data_frame = pandas.DataFrame(rows)
# let's aggregate by Phyla
data_frame = data_frame.groupby(["analysis", TAX_RANK])["count"].sum().reset_index()
# let's get the relative abundance of each phyla
for analysis, frame in data_frame.groupby("analysis"):
data_frame.loc[data_frame["analysis"] == analysis, "rel_abundance"] = (
frame["count"] / frame["count"].sum() * 100
)
# let's save a copy in csv
data_frame.to_csv(study_accession + "_" + TAX_RANK + ".csv")
# let's aggregate the abundances to reduce the noise, let's keep the top 20
# and move the small ones to the Other category
top20 = sorted(
list(
data_frame.groupby([TAX_RANK])["rel_abundance"]
.agg("sum")
.nlargest(20)
.index
)
)
for analysis, frame in data_frame.groupby("analysis"):
top_rows = data_frame.loc[
(data_frame["analysis"] == analysis) & (data_frame[TAX_RANK].isin(top20)),
"rel_abundance",
]
# The Other aggregated row
data_frame = data_frame.append(
{
"analysis": analysis,
"study": study_accession,
"rel_abundance": 100 - top_rows.sum(),
TAX_RANK: "Other",
"count": 0,
},
ignore_index=True,
)
# keep only top20 or Other
data_frame = data_frame.drop(
data_frame[
(~data_frame[TAX_RANK].isin(top20)) & (data_frame[TAX_RANK] != "Other")
].index
)
top20.insert(0, "Other")
data_frame[TAX_RANK] = pandas.Categorical(data_frame[TAX_RANK], top20)
data_frame = data_frame.sort_values(TAX_RANK)
gb = geom_bar(stat="identity", colour="darkgrey", size=0.3, width=0.6, alpha=0.7)
gg = (
ggplot(
data_frame,
aes(
x=data_frame["analysis"],
y=data_frame["rel_abundance"],
fill=TAX_RANK,
),
)
+ gb
+ ggtitle(study_accession)
+ ylab("Relative abundance (%)")
+ theme(panel_grid_major=element_blank(), panel_grid_minor=element_blank())
+ scale_fill_hue()
+ theme(axis_text_x=element_text(angle=90))
+ theme(axis_title_y=element_text(size=10))
+ theme(axis_text_y=element_text(size=10))
+ theme(axis_title_x=element_blank())
+ theme(axis_text_x=element_text(size=10))
)
ggsave(
filename=study_accession + "_" + TAX_RANK + "_plot.png",
plot=gg,
device="png",
dpi=600,
)
| 31.457516
| 113
| 0.61978
|
2d70ef23dad0fb3518ba96114bfa425974262ca8
| 1,429
|
py
|
Python
|
partition.py
|
kirito25/integer-partition
|
73aa0a2683aec2585d2171302c3e913a804719bc
|
[
"MIT"
] | null | null | null |
partition.py
|
kirito25/integer-partition
|
73aa0a2683aec2585d2171302c3e913a804719bc
|
[
"MIT"
] | null | null | null |
partition.py
|
kirito25/integer-partition
|
73aa0a2683aec2585d2171302c3e913a804719bc
|
[
"MIT"
] | null | null | null |
import sys
"""
Generates all the partitions of number n.
If doall is true it will also generate the
partitions for n-1,...,0
"""
def partitions(n, doall=False):
# base case of recursion: zero
if n == 0:
yield [0]
return
# recurively build partition
for p in partitions(n - 1, doall):
if doall:
yield p
yield [1] + p
if p and (len(p) < 2 or p[1] > p[0]):
yield [p[0] + 1] + p[1:]
"""
Takes an iterable of consisting of lists of number
and calculate the product and keeps the maximum using
the sum of the numbers as a key
"""
def product(numbers):
num_map = {}
for i in numbers:
key = sum(i)
x = 1
for j in i:
x *= j
try :
if x > num_map[key][0][1]:
num_map[key] = [(i, x)]
if x == num_map[key][0][1] and (i,x) not in num_map[key]:
num_map[key] += [(i, x)]
except KeyError:
num_map[key] = [(i, x)]
for key in num_map.keys():
for val in num_map[key]:
i, x = val
print("Number %s is the sum of %s whose product is %s" % (key, i, x))
print('')
def main():
doall = False
if len(sys.argv) > 2 and sys.argv[2] == 'doall':
doall = True
result = partitions(int(sys.argv[1]), doall)
product(result)
if __name__ == '__main__':
main()
| 19.053333
| 81
| 0.511547
|
643032abe0052bba6515567b51f8597e38797bcb
| 2,109
|
py
|
Python
|
sos/sos/doctype/salary_payout/salary_payout.py
|
tushar2488/sos
|
d97e57453f98394a0957a5a00675b44b3e3c37df
|
[
"MIT"
] | null | null | null |
sos/sos/doctype/salary_payout/salary_payout.py
|
tushar2488/sos
|
d97e57453f98394a0957a5a00675b44b3e3c37df
|
[
"MIT"
] | null | null | null |
sos/sos/doctype/salary_payout/salary_payout.py
|
tushar2488/sos
|
d97e57453f98394a0957a5a00675b44b3e3c37df
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Youtility Technologies Pvt. Ltd and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe import _
from frappe.model.document import Document
from frappe.utils import add_days, cint, cstr, flt, getdate, rounded, date_diff, money_in_words
class SalaryPayout(Document):
def validate(self):
company_currency = erpnext.get_company_currency(self.company)
self.total_in_words = money_in_words(self.total, company_currency)
def before_submit(self):
#self.get_employees_salary_details()
pass
def get_employees_salary_details(self):
self.set('empdata', [])
ss_list = self.get_ss_list()
#print("::::::::::::: Salary Slip List::::::::::::: %s" % ss_list)
if not ss_list:
frappe.throw(_("No salary slips created for the mentioned criteria"))
self.total=0
sp_list= self.validate_payout_already_done()
print("########### sp_list ####### %s ###" % sp_list)
for d in ss_list:
if d.employee not in sp_list:
self.total += d.net_pay
self.append('empdata', {'employee':d.employee,
'salary_slip_name': d.name,
'payroll_entry': d.payroll_entry,
'net_pay': d.net_pay,
'rounded_total': d.rounded_total
})
def get_ss_list(self):
""" Returns list of salary slips based on selected criteria """
ss_list = frappe.db.sql("""
SELECT * FROM `tabSalary Slip` t1
WHERE t1.docstatus = 1
and t1.start_date >= '%s'
and t1.end_date <= '%s'
and t1.bank_name = '%s'
ORDER BY t1.bank_account_no DESC """% (self.start_date, self.end_date, self.bank_name), as_dict=True)
return ss_list
def validate_payout_already_done(self):
""" Returns list of salary slips based on selected criteria """
sp=[]
sp_list = frappe.db.sql("""
SELECT employee FROM `tabSalary Payout Detail` s1
WHERE s1.docstatus = 1
and s1.start_date >= '%s'
and s1.end_date <= '%s'
and s1.bank_name = '%s' """% (self.start_date, self.end_date, self.bank_name), as_dict=True)
if sp_list:
sp= [x.employee for x in sp_list]
return sp
| 34.016129
| 104
| 0.69559
|
211a5a7c0c3cd5944d505deb1a024d3c2ff1c717
| 863
|
py
|
Python
|
src/adventofcode2021/solutions/day01.py
|
RoelAdriaans/adventofcode2021
|
d9c01ca9b855ca7e15c7d334c715d322601b8e19
|
[
"MIT"
] | null | null | null |
src/adventofcode2021/solutions/day01.py
|
RoelAdriaans/adventofcode2021
|
d9c01ca9b855ca7e15c7d334c715d322601b8e19
|
[
"MIT"
] | 26
|
2021-11-30T21:39:56.000Z
|
2022-03-31T04:31:32.000Z
|
src/adventofcode2021/solutions/day01.py
|
RoelAdriaans/adventofcode2021
|
d9c01ca9b855ca7e15c7d334c715d322601b8e19
|
[
"MIT"
] | null | null | null |
from adventofcode2021.utils.abstract import FileReaderSolution
class Day01:
pass
class Day01PartA(Day01, FileReaderSolution):
def solve(self, input_data: str) -> int:
num_changes = 0
depths = [int(n) for n in input_data.split("\n") if n]
last_number = depths[0]
for depth in depths:
if depth > last_number:
num_changes += 1
last_number = depth
return num_changes
class Day01PartB(Day01, FileReaderSolution):
def solve(self, input_data: str) -> int:
depths = [int(n) for n in input_data.split("\n") if n]
total_difference = 0
for idx in range(len(depths)):
a = depths[idx : idx + 3]
b = depths[idx + 1 : idx + 4]
if sum(a) < sum(b):
total_difference += 1
return total_difference
| 26.151515
| 62
| 0.578216
|
e03aca64fce9a69296cd837c5bf4e3829e5e5ba8
| 6,160
|
py
|
Python
|
examples/pytorch/train_pytorch_mnist.py
|
ainoam/clearml-serving
|
4c55b123435829da615b3190ddb29e60072aef8d
|
[
"Apache-2.0"
] | null | null | null |
examples/pytorch/train_pytorch_mnist.py
|
ainoam/clearml-serving
|
4c55b123435829da615b3190ddb29e60072aef8d
|
[
"Apache-2.0"
] | null | null | null |
examples/pytorch/train_pytorch_mnist.py
|
ainoam/clearml-serving
|
4c55b123435829da615b3190ddb29e60072aef8d
|
[
"Apache-2.0"
] | null | null | null |
# ClearML - Example of pytorch with tensorboard>=v1.14
#
from __future__ import print_function
import argparse
import os
from tempfile import gettempdir
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter
from clearml import Task, OutputModel
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def train(model, epoch, train_loader, args, optimizer, writer):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data.item()))
niter = epoch*len(train_loader)+batch_idx
writer.add_scalar('Train/Loss', loss.data.item(), niter)
def test(model, test_loader, args, optimizer, writer):
model.eval()
test_loss = 0
correct = 0
for niter, (data, target) in enumerate(test_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').data.item() # sum up batch loss
pred = output.data.max(1)[1] # get the index of the max log-probability
pred = pred.eq(target.data).cpu().sum()
writer.add_scalar('Test/Loss', pred, niter)
correct += pred
if niter % 100 == 0:
writer.add_image('test', data[0, :, :, :], niter)
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
# Connecting ClearML with the current process,
# from here on everything is logged automatically
task = Task.init(project_name='serving examples', task_name='train pytorch model', output_uri=True) # noqa: F841
writer = SummaryWriter('runs')
writer.add_text('TEXT', 'This is some text', 0)
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
kwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(datasets.MNIST('./data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(datasets.MNIST('./data', train=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net()
if args.cuda:
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
train(model, epoch, train_loader, args, optimizer, writer)
# store in a way we can easily load into triton without having to have the model class
torch.jit.script(model).save('serving_model.pt')
OutputModel().update_weights('serving_model.pt')
test(model, test_loader, args, optimizer, writer)
if __name__ == "__main__":
main()
| 43.076923
| 117
| 0.5875
|
53320bd8ba173308f1c1b5daade3c36264526a54
| 1,773
|
py
|
Python
|
auv/auv_marine.py
|
PanosMallioris/Autonomous-Marine-Exploration-Water-Simulation-ROS-GAZEBO
|
cde420f50f49471614775198dcb142311c79725b
|
[
"Apache-2.0"
] | 4
|
2019-03-06T21:09:47.000Z
|
2019-10-17T20:26:43.000Z
|
auv/auv_marine.py
|
PanosMallioris/Autonomous-Marine-Exploration-Water-Simulation-ROS-GAZEBO
|
cde420f50f49471614775198dcb142311c79725b
|
[
"Apache-2.0"
] | 1
|
2019-02-25T16:03:11.000Z
|
2019-03-06T21:05:57.000Z
|
auv/auv_marine.py
|
PanosMallioris/Autonomous-Marine-Exploration-Water-Simulation-ROS-GAZEBO
|
cde420f50f49471614775198dcb142311c79725b
|
[
"Apache-2.0"
] | 1
|
2021-04-20T15:09:22.000Z
|
2021-04-20T15:09:22.000Z
|
import rospy
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
import time
def scan_callback(msg):
global g_range_ahead
global l_range
global r_range
g_range_ahead = min(msg.ranges)
r_range = msg.ranges[0]
l_range = msg.ranges[189]
print "min range ahead : %0.1f" % g_range_ahead
print "range left : %0.1f" % l_range
print "range right : %0.1f" % r_range
print len(msg.ranges)
g_range_ahead = 1 # anything to start
scan_sub = rospy.Subscriber('/scan', LaserScan, scan_callback)
cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
rospy.init_node('wander')
#state_change_time = rospy.Time.now()
driving_forward = True
rate = rospy.Rate(10)# rythmos ektelesis tou controller
while not rospy.is_shutdown():
twist = Twist()
if (l_range >= 1 and r_range >=1 and g_range_ahead >=1) :#driving_forward:
# BEGIN FORWARD
twist.linear.x = 1 # h 1 analoga ama thelete na kanete xartografisi h oxi
cmd_vel_pub.publish(twist)
else :
if r_range < 15:
twist.angular.z = 1.5 #turn left
twist.linear.x=0.25
cmd_vel_pub.publish(twist)
time.sleep(0.1)
twist.angular.z = 0
#driving_forward = False
#state_change_time = rospy.Time.now() + rospy.Duration(1)
# END FORWARD
elif r_range > 15 :
twist.angular.z = -1.5 #turn right
cmd_vel_pub.publish(twist)
time.sleep(0.1) # we're not driving_forward
twist.angular.z=0
else :
twist.linear.x= 0.2
cmd_vel_pub.publish(twist)
time.sleep(0.5)
twist.linear.x= 0.0
twist.angular.z = 0.0
#cmd_vel_pub.publish(twist)
rate.sleep()# xronos ypologismenos apo ro ROS anamesa sta loop
# END ALL
| 29.065574
| 78
| 0.658206
|
087aefdccb5e00290c38dfa2f708d634cb3252ce
| 35,885
|
py
|
Python
|
rx3/__init__.py
|
samiur/RxPY
|
ea6b3554ab06cfc70e28b532c0a54b910b6ee470
|
[
"MIT"
] | null | null | null |
rx3/__init__.py
|
samiur/RxPY
|
ea6b3554ab06cfc70e28b532c0a54b910b6ee470
|
[
"MIT"
] | null | null | null |
rx3/__init__.py
|
samiur/RxPY
|
ea6b3554ab06cfc70e28b532c0a54b910b6ee470
|
[
"MIT"
] | null | null | null |
# pylint: disable=too-many-lines,redefined-outer-name,redefined-builtin
from asyncio.futures import Future as _Future
from typing import Iterable, Callable, Any, Optional, Union, Mapping
from .core import Observable, pipe, typing
from .internal.utils import alias
# Please make sure the version here remains the same as in project.cfg
__version__ = '1.0.0'
def amb(*sources: Observable) -> Observable:
"""Propagates the observable sequence that emits first.
.. marble::
:alt: amb
---8--6--9-----------|
--1--2--3---5--------|
----------10-20-30---|
[ amb() ]
--1--2--3---5--------|
Example:
>>> winner = rx.amb(xs, ys, zs)
Args:
sources: Sequence of observables to monitor for first emission.
Returns:
An observable sequence that surfaces any of the given sequences,
whichever emitted the first element.
"""
from .core.observable.amb import _amb
return _amb(*sources)
def case(mapper: Callable[[], Any],
sources: Mapping,
default_source: Optional[Union[Observable, _Future]] = None
) -> Observable:
"""Uses mapper to determine which source in sources to use.
.. marble::
:alt: case
--1---------------|
a--1--2--3--4--|
b--10-20-30---|
[case(mapper, { 1: a, 2: b })]
---1--2--3--4--|
Examples:
>>> res = rx.case(mapper, { '1': obs1, '2': obs2 })
>>> res = rx.case(mapper, { '1': obs1, '2': obs2 }, obs0)
Args:
mapper: The function which extracts the value for to test in a
case statement.
sources: An object which has keys which correspond to the case
statement labels.
default_source: [Optional] The observable sequence or Future that will
be run if the sources are not matched. If this is not provided,
it defaults to :func:`empty`.
Returns:
An observable sequence which is determined by a case statement.
"""
from .core.observable.case import _case
return _case(mapper, sources, default_source)
def catch(*sources: Observable) -> Observable:
"""Continues observable sequences which are terminated with an
exception by switching over to the next observable sequence.
.. marble::
:alt: catch
---1---2---3-*
a-7-8-|
[ catch(a) ]
---1---2---3---7-8-|
Examples:
>>> res = rx.catch(xs, ys, zs)
Args:
sources: Sequence of observables.
Returns:
An observable sequence containing elements from consecutive observables
from the sequence of sources until one of them terminates successfully.
"""
from .core.observable.catch import _catch_with_iterable
return _catch_with_iterable(sources)
def catch_with_iterable(sources: Iterable[Observable]) -> Observable:
"""Continues observable sequences that are terminated with an
exception by switching over to the next observable sequence.
.. marble::
:alt: catch
---1---2---3-*
a-7-8-|
[ catch(a) ]
---1---2---3---7-8-|
Examples:
>>> res = rx.catch([xs, ys, zs])
>>> res = rx.catch(src for src in [xs, ys, zs])
Args:
sources: An Iterable of observables; thus, a generator can also
be used here.
Returns:
An observable sequence containing elements from consecutive observables
from the sequence of sources until one of them terminates successfully.
"""
from .core.observable.catch import _catch_with_iterable
return _catch_with_iterable(sources)
def create(subscribe: typing.Subscription) -> Observable:
"""Creates an observable sequence object from the specified
subscription function.
.. marble::
:alt: create
[ create(a) ]
---1---2---3---4---|
Args:
subscribe: Subscription function.
Returns:
An observable sequence that can be subscribed to via the given
subscription function.
"""
return Observable(subscribe)
def combine_latest(*sources: Observable) -> Observable:
"""Merges the specified observable sequences into one observable
sequence by creating a tuple whenever any of the observable
sequences emits an element.
.. marble::
:alt: combine_latest
---a-----b--c------|
--1---2--------3---|
[ combine_latest() ]
---a1-a2-b2-c2-c3--|
Examples:
>>> obs = rx.combine_latest(obs1, obs2, obs3)
Args:
sources: Sequence of observables.
Returns:
An observable sequence containing the result of combining elements from
each source in given sequence.
"""
from .core.observable.combinelatest import _combine_latest
return _combine_latest(*sources)
def concat(*sources: Observable) -> Observable:
"""Concatenates all of the specified observable sequences.
.. marble::
:alt: concat
---1--2--3--|
--6--8--|
[ concat() ]
---1--2--3----6--8-|
Examples:
>>> res = rx.concat(xs, ys, zs)
Args:
sources: Sequence of observables.
Returns:
An observable sequence that contains the elements of each source in
the given sequence, in sequential order.
"""
from .core.observable.concat import _concat_with_iterable
return _concat_with_iterable(sources)
def concat_with_iterable(sources: Iterable[Observable]) -> Observable:
"""Concatenates all of the specified observable sequences.
.. marble::
:alt: concat
---1--2--3--|
--6--8--|
[ concat() ]
---1--2--3----6--8-|
Examples:
>>> res = rx.concat_with_iterable([xs, ys, zs])
>>> res = rx.concat_with_iterable(for src in [xs, ys, zs])
Args:
sources: An Iterable of observables; thus, a generator can also
be used here.
Returns:
An observable sequence that contains the elements of each given
sequence, in sequential order.
"""
from .core.observable.concat import _concat_with_iterable
return _concat_with_iterable(sources)
def defer(factory: Callable[[typing.Scheduler], Union[Observable, _Future]]
) -> Observable:
"""Returns an observable sequence that invokes the specified
factory function whenever a new observer subscribes.
.. marble::
:alt: defer
[ defer(1,2,3) ]
---1--2--3--|
---1--2--3--|
Example:
>>> res = rx.defer(lambda: of(1, 2, 3))
Args:
factory: Observable factory function to invoke for each observer
which invokes :func:`subscribe() <rx.Observable.subscribe>` on
the resulting sequence.
Returns:
An observable sequence whose observers trigger an invocation
of the given factory function.
"""
from .core.observable.defer import _defer
return _defer(factory)
def empty(scheduler: Optional[typing.Scheduler] = None) -> Observable:
"""Returns an empty observable sequence.
.. marble::
:alt: empty
[ empty() ]
--|
Example:
>>> obs = rx.empty()
Args:
scheduler: [Optional] Scheduler instance to send the termination call
on. By default, this will use an instance of
:class:`ImmediateScheduler <rx.scheduler.ImmediateScheduler>`.
Returns:
An observable sequence with no elements.
"""
from .core.observable.empty import _empty
return _empty(scheduler)
def for_in(values: Iterable[Any], mapper: typing.Mapper) -> Observable:
"""Concatenates the observable sequences obtained by running the
specified result mapper for each element in the specified values.
.. marble::
:alt: for_in
a--1--2-|
b--10--20-|
[for_in((a, b), lambda i: i+1)]
---2--3--11--21-|
Note:
This is just a wrapper for
:func:`rx.concat(map(mapper, values)) <rx.concat>`
Args:
values: An Iterable of values to turn into an observable
source.
mapper: A function to apply to each item in the values list to turn
it into an observable sequence; this should return instances of
:class:`rx.Observable`.
Returns:
An observable sequence from the concatenated observable
sequences.
"""
return concat_with_iterable(map(mapper, values))
def from_callable(supplier: Callable[[], Any],
scheduler: Optional[typing.Scheduler] = None
) -> Observable:
"""Returns an observable sequence that contains a single element generated
by the given supplier, using the specified scheduler to send out observer
messages.
.. marble::
:alt: from_callable
[ from_callable() ]
--1--|
Examples:
>>> res = rx.from_callable(lambda: calculate_value())
>>> res = rx.from_callable(lambda: 1 / 0) # emits an error
Args:
supplier: Function which is invoked to obtain the single element.
scheduler: [Optional] Scheduler instance to schedule the values on.
If not specified, the default is to use an instance of
:class:`CurrentThreadScheduler <rx.scheduler.CurrentThreadScheduler>`.
Returns:
An observable sequence containing the single element obtained by
invoking the given supplier function.
"""
from .core.observable.returnvalue import _from_callable
return _from_callable(supplier, scheduler)
def from_callback(func: Callable,
mapper: Optional[typing.Mapper] = None
) -> Callable[[], Observable]:
"""Converts a callback function to an observable sequence.
Args:
func: Function with a callback as the last argument to
convert to an Observable sequence.
mapper: [Optional] A mapper which takes the arguments
from the callback to produce a single item to yield on
next.
Returns:
A function, when executed with the required arguments minus
the callback, produces an Observable sequence with a single
value of the arguments to the callback as a list.
"""
from .core.observable.fromcallback import _from_callback
return _from_callback(func, mapper)
def from_future(future: _Future) -> Observable:
"""Converts a Future to an Observable sequence
.. marble::
:alt: from_future
[ from_future() ]
------1|
Args:
future: A Python 3 compatible future.
https://docs.python.org/3/library/asyncio-task.html#future
http://www.tornadoweb.org/en/stable/concurrent.html#tornado.concurrent.Future
Returns:
An observable sequence which wraps the existing future success
and failure.
"""
from .core.observable.fromfuture import _from_future
return _from_future(future)
def from_iterable(iterable: Iterable, scheduler: Optional[typing.Scheduler] = None) -> Observable:
"""Converts an iterable to an observable sequence.
.. marble::
:alt: from_iterable
[ from_iterable(1,2,3) ]
---1--2--3--|
Example:
>>> rx.from_iterable([1,2,3])
Args:
iterable: An Iterable to change into an observable sequence.
scheduler: [Optional] Scheduler instance to schedule the values on.
If not specified, the default is to use an instance of
:class:`CurrentThreadScheduler <rx.scheduler.CurrentThreadScheduler>`.
Returns:
The observable sequence whose elements are pulled from the
given iterable sequence.
"""
from .core.observable.fromiterable import from_iterable as from_iterable_
return from_iterable_(iterable, scheduler)
from_ = alias('from_', 'Alias for :func:`rx.from_iterable`.', from_iterable)
from_list = alias('from_list', 'Alias for :func:`rx.from_iterable`.', from_iterable)
def from_marbles(string: str,
timespan: typing.RelativeTime = 0.1,
scheduler: Optional[typing.Scheduler] = None,
lookup: Optional[Mapping] = None,
error: Optional[Exception] = None
) -> Observable:
"""Convert a marble diagram string to a cold observable sequence, using
an optional scheduler to enumerate the events.
.. marble::
:alt: from_marbles
[ from_marbles(-1-2-3-) ]
-1-2-3-|
Each character in the string will advance time by timespan
(except for space). Characters that are not special (see the table below)
will be interpreted as a value to be emitted. Numbers will be cast
to int or float.
Special characters:
+------------+--------------------------------------------------------+
| :code:`-` | advance time by timespan |
+------------+--------------------------------------------------------+
| :code:`#` | on_error() |
+------------+--------------------------------------------------------+
| :code:`|` | on_completed() |
+------------+--------------------------------------------------------+
| :code:`(` | open a group of marbles sharing the same timestamp |
+------------+--------------------------------------------------------+
| :code:`)` | close a group of marbles |
+------------+--------------------------------------------------------+
| :code:`,` | separate elements in a group |
+------------+--------------------------------------------------------+
| <space> | used to align multiple diagrams, does not advance time |
+------------+--------------------------------------------------------+
In a group of elements, the position of the initial :code:`(` determines the
timestamp at which grouped elements will be emitted.
E.g. :code:`--(12,3,4)--` will emit 12, 3, 4 at 2 * timespan and then
advance virtual time by 8 * timespan.
Examples:
>>> from_marbles('--1--(2,3)-4--|')
>>> from_marbles('a--b--c-', lookup={'a': 1, 'b': 2, 'c': 3})
>>> from_marbles('a--b---#', error=ValueError('foo'))
Args:
string: String with marble diagram
timespan: [Optional] Duration of each character in seconds.
If not specified, defaults to :code:`0.1`.
scheduler: [Optional] Scheduler to run the the input sequence
on. If not specified, defaults to the subscribe scheduler
if defined, else to an instance of
:class:`NewThreadScheduler <rx.scheduler.NewThreadScheduler`.
lookup: [Optional] A dict used to convert an element into a specified
value. If not specified, defaults to :code:`{}`.
error: [Optional] Exception that will be use in place of the :code:`#`
symbol. If not specified, defaults to :code:`Exception('error')`.
Returns:
The observable sequence whose elements are pulled from the
given marble diagram string.
"""
from .core.observable.marbles import from_marbles as _from_marbles
return _from_marbles(string, timespan, lookup=lookup, error=error, scheduler=scheduler)
cold = alias('cold', 'Alias for :func:`rx.from_marbles`.', from_marbles)
def generate_with_relative_time(initial_state: Any,
condition: typing.Predicate,
iterate: typing.Mapper,
time_mapper: Callable[[Any], typing.RelativeTime]
) -> Observable:
"""Generates an observable sequence by iterating a state from an
initial state until the condition fails.
.. marble::
:alt: generate_with_relative_time
[generate_with_relative_time()]
-1-2-3-4-|
Example:
>>> res = rx.generate_with_relative_time(0, lambda x: True, lambda x: x + 1, lambda x: 0.5)
Args:
initial_state: Initial state.
condition: Condition to terminate generation (upon returning
:code:`False`).
iterate: Iteration step function.
time_mapper: Time mapper function to control the speed of
values being produced each iteration, returning relative times, i.e.
either a :class:`float` denoting seconds, or an instance of
:class:`timedelta`.
Returns:
The generated sequence.
"""
from .core.observable.generatewithrelativetime import _generate_with_relative_time
return _generate_with_relative_time(initial_state, condition, iterate, time_mapper)
def generate(initial_state: Any,
condition: typing.Predicate,
iterate: typing.Mapper
) -> Observable:
"""Generates an observable sequence by running a state-driven loop
producing the sequence's elements.
.. marble::
:alt: generate
[ generate() ]
-1-2-3-4-|
Example:
>>> res = rx.generate(0, lambda x: x < 10, lambda x: x + 1)
Args:
initial_state: Initial state.
condition: Condition to terminate generation (upon returning
:code:`False`).
iterate: Iteration step function.
Returns:
The generated sequence.
"""
from .core.observable.generate import _generate
return _generate(initial_state, condition, iterate)
def hot(string: str,
timespan: typing.RelativeTime=0.1,
duetime:typing.AbsoluteOrRelativeTime = 0.0,
scheduler: Optional[typing.Scheduler] = None,
lookup: Optional[Mapping] = None,
error: Optional[Exception] = None
) -> Observable:
"""Convert a marble diagram string to a hot observable sequence, using
an optional scheduler to enumerate the events.
.. marble::
:alt: hot
[ from_marbles(-1-2-3-) ]
-1-2-3-|
-2-3-|
Each character in the string will advance time by timespan
(except for space). Characters that are not special (see the table below)
will be interpreted as a value to be emitted. Numbers will be cast
to int or float.
Special characters:
+------------+--------------------------------------------------------+
| :code:`-` | advance time by timespan |
+------------+--------------------------------------------------------+
| :code:`#` | on_error() |
+------------+--------------------------------------------------------+
| :code:`|` | on_completed() |
+------------+--------------------------------------------------------+
| :code:`(` | open a group of elements sharing the same timestamp |
+------------+--------------------------------------------------------+
| :code:`)` | close a group of elements |
+------------+--------------------------------------------------------+
| :code:`,` | separate elements in a group |
+------------+--------------------------------------------------------+
| <space> | used to align multiple diagrams, does not advance time |
+------------+--------------------------------------------------------+
In a group of elements, the position of the initial :code:`(` determines the
timestamp at which grouped elements will be emitted.
E.g. :code:`--(12,3,4)--` will emit 12, 3, 4 at 2 * timespan and then
advance virtual time by 8 * timespan.
Examples:
>>> hot("--1--(2,3)-4--|")
>>> hot("a--b--c-", lookup={'a': 1, 'b': 2, 'c': 3})
>>> hot("a--b---#", error=ValueError("foo"))
Args:
string: String with marble diagram
timespan: [Optional] Duration of each character in seconds.
If not specified, defaults to :code:`0.1`.
duetime: [Optional] Absolute datetime or timedelta from now that
determines when to start the emission of elements.
scheduler: [Optional] Scheduler to run the the input sequence
on. If not specified, defaults to an instance of
:class:`NewThreadScheduler <rx.scheduler.NewThreadScheduler>`.
lookup: [Optional] A dict used to convert an element into a specified
value. If not specified, defaults to :code:`{}`.
error: [Optional] Exception that will be use in place of the :code:`#`
symbol. If not specified, defaults to :code:`Exception('error')`.
Returns:
The observable sequence whose elements are pulled from the
given marble diagram string.
"""
from .core.observable.marbles import hot as _hot
return _hot(string, timespan, duetime, lookup=lookup, error=error, scheduler=scheduler)
def if_then(condition: Callable[[], bool],
then_source: Union[Observable, _Future],
else_source: Union[None, Observable, _Future] = None
) -> Observable:
"""Determines whether an observable collection contains values.
.. marble::
:alt: if_then
---1--2--3--|
--6--8--|
[ if_then() ]
---1--2--3--|
Examples:
>>> res = rx.if_then(condition, obs1)
>>> res = rx.if_then(condition, obs1, obs2)
Args:
condition: The condition which determines if the then_source or
else_source will be run.
then_source: The observable sequence or :class:`Future` that
will be run if the condition function returns :code:`True`.
else_source: [Optional] The observable sequence or :class:`Future`
that will be run if the condition function returns :code:`False`.
If this is not provided, it defaults to :func:`empty() <rx.empty>`.
Returns:
An observable sequence which is either the then_source or
else_source.
"""
from .core.observable.ifthen import _if_then
return _if_then(condition, then_source, else_source)
def interval(period: typing.RelativeTime, scheduler: Optional[typing.Scheduler] = None) -> Observable:
"""Returns an observable sequence that produces a value after each period.
.. marble::
:alt: interval
[ interval() ]
---1---2---3---4--->
Example:
>>> res = rx.interval(1.0)
Args:
period: Period for producing the values in the resulting sequence
(specified as a :class:`float` denoting seconds or an instance of
:class:`timedelta`).
scheduler: Scheduler to run the interval on. If not specified, an
instance of :class:`TimeoutScheduler <rx.scheduler.TimeoutScheduler>`
is used.
Returns:
An observable sequence that produces a value after each period.
"""
from .core.observable.interval import _interval
return _interval(period, scheduler)
def merge(*sources: Observable) -> Observable:
"""Merges all the observable sequences into a single observable sequence.
.. marble::
:alt: merge
---1---2---3---4-|
-a---b---c---d--|
[ merge() ]
-a-1-b-2-c-3-d-4-|
Example:
>>> res = rx.merge(obs1, obs2, obs3)
Args:
sources: Sequence of observables.
Returns:
The observable sequence that merges the elements of the
observable sequences.
"""
from .core.observable.merge import _merge
return _merge(*sources)
def never() -> Observable:
"""Returns a non-terminating observable sequence, which can be used
to denote an infinite duration (e.g. when using reactive joins).
.. marble::
:alt: never
[ never() ]
-->
Returns:
An observable sequence whose observers will never get called.
"""
from .core.observable.never import _never
return _never()
def of(*args: Any) -> Observable:
"""This method creates a new observable sequence whose elements are taken
from the arguments.
.. marble::
:alt: of
[ of(1,2,3) ]
---1--2--3--|
Note:
This is just a wrapper for
:func:`rx.from_iterable(args) <rx.from_iterable>`
Example:
>>> res = rx.of(1,2,3)
Args:
args: The variable number elements to emit from the observable.
Returns:
The observable sequence whose elements are pulled from the
given arguments
"""
return from_iterable(args)
def on_error_resume_next(*sources: Union[Observable, _Future]) -> Observable:
"""Continues an observable sequence that is terminated normally or
by an exception with the next observable sequence.
.. marble::
:alt: on_error_resume_next
--1--2--*
a--3--4--*
b--6-|
[on_error_resume_next(a,b)]
--1--2----3--4----6-|
Examples:
>>> res = rx.on_error_resume_next(xs, ys, zs)
Args:
sources: Sequence of sources, each of which is expected to be an
instance of either :class:`Observable` or :class:`Future`.
Returns:
An observable sequence that concatenates the source sequences,
even if a sequence terminates with an exception.
"""
from .core.observable.onerrorresumenext import _on_error_resume_next
return _on_error_resume_next(*sources)
def range(start: int,
stop: Optional[int] = None,
step: Optional[int] = None,
scheduler: Optional[typing.Scheduler] = None
) -> Observable:
"""Generates an observable sequence of integral numbers within a
specified range, using the specified scheduler to send out observer
messages.
.. marble::
:alt: range
[ range(4) ]
--0--1--2--3--|
Examples:
>>> res = rx.range(10)
>>> res = rx.range(0, 10)
>>> res = rx.range(0, 10, 1)
Args:
start: The value of the first integer in the sequence.
count: The number of sequential integers to generate.
scheduler: [Optional] The scheduler to schedule the values on. If not
specified, the default is to use an instance of
:class:`CurrentThreadScheduler <rx.scheduler.CurrentThreadScheduler>`.
Returns:
An observable sequence that contains a range of sequential
integral numbers.
"""
from .core.observable.range import _range
return _range(start, stop, step, scheduler)
def return_value(value: Any, scheduler: Optional[typing.Scheduler] = None) -> Observable:
"""Returns an observable sequence that contains a single element,
using the specified scheduler to send out observer messages.
There is an alias called 'just'.
.. marble::
:alt: return_value
[ return_value(4) ]
-4-|
Examples:
>>> res = rx.return_value(42)
>>> res = rx.return_value(42, timeout_scheduler)
Args:
value: Single element in the resulting observable sequence.
Returns:
An observable sequence containing the single specified element.
"""
from .core.observable.returnvalue import _return_value
return _return_value(value, scheduler)
just = alias('just', 'Alias for :func:`rx.return_value`.', return_value)
def repeat_value(value: Any = None, repeat_count: Optional[int] = None) -> Observable:
"""Generates an observable sequence that repeats the given element
the specified number of times.
.. marble::
:alt: repeat_value
[ repeat_value(4) ]
-4-4-4-4->
Examples:
>>> res = rx.repeat_value(42)
>>> res = rx.repeat_value(42, 4)
Args:
value: Element to repeat.
repeat_count: [Optional] Number of times to repeat the element.
If not specified, repeats indefinitely.
Returns:
An observable sequence that repeats the given element the
specified number of times.
"""
from .core.observable.repeat import _repeat_value
return _repeat_value(value, repeat_count)
def start(func: Callable, scheduler: Optional[typing.Scheduler] = None) -> Observable:
"""Invokes the specified function asynchronously on the specified
scheduler, surfacing the result through an observable sequence.
.. marble::
:alt: start
[ start(lambda i: return 4) ]
-4-|
-4-|
Note:
The function is called immediately, not during the subscription
of the resulting sequence. Multiple subscriptions to the
resulting sequence can observe the function's result.
Example:
>>> res = rx.start(lambda: pprint('hello'))
>>> res = rx.start(lambda: pprint('hello'), rx.Scheduler.timeout)
Args:
func: Function to run asynchronously.
scheduler: [Optional] Scheduler to run the function on. If
not specified, defaults to an instance of
:class:`TimeoutScheduler <rx.scheduler.TimeoutScheduler>`.
Returns:
An observable sequence exposing the function's result value,
or an exception.
"""
from .core.observable.start import _start
return _start(func, scheduler)
def start_async(function_async: Callable[[], _Future]) -> Observable:
"""Invokes the asynchronous function, surfacing the result through
an observable sequence.
.. marble::
:alt: start_async
[ start_async() ]
------1|
Args:
function_async: Asynchronous function which returns a :class:`Future`
to run.
Returns:
An observable sequence exposing the function's result value,
or an exception.
"""
from .core.observable.startasync import _start_async
return _start_async(function_async)
def throw(exception: Exception, scheduler: Optional[typing.Scheduler] = None) -> Observable:
"""Returns an observable sequence that terminates with an exception,
using the specified scheduler to send out the single OnError message.
.. marble::
:alt: throw
[ throw() ]
-*
Example:
>>> res = rx.throw(Exception('Error'))
Args:
exception: An object used for the sequence's termination.
scheduler: [Optional] Scheduler to schedule the error notification on.
If not specified, the default is to use an instance of
:class:`ImmediateScheduler <rx.scheduler.ImmediateScheduler>`.
Returns:
The observable sequence that terminates exceptionally with the
specified exception object.
"""
from .core.observable.throw import _throw
return _throw(exception, scheduler)
def timer(duetime: typing.AbsoluteOrRelativeTime, period: Optional[typing.RelativeTime] = None,
scheduler: Optional[typing.Scheduler] = None) -> Observable:
"""Returns an observable sequence that produces a value after
duetime has elapsed and then after each period.
.. marble::
:alt: timer
[ timer(2) ]
--0-|
Examples:
>>> res = rx.timer(datetime(...))
>>> res = rx.timer(datetime(...), 0.1)
>>> res = rx.timer(5.0)
>>> res = rx.timer(5.0, 1.0)
Args:
duetime: Absolute (specified as a datetime object) or relative time
(specified as a float denoting seconds or an instance of timedelta)
at which to produce the first value.
period: [Optional] Period to produce subsequent values (specified as a
float denoting seconds or an instance of timedelta).
If not specified, the resulting timer is not recurring.
scheduler: [Optional] Scheduler to run the timer on. If not specified,
the default is to use an instance of
:class:`TimeoutScheduler <rx.scheduler.TimeoutScheduler>`.
Returns:
An observable sequence that produces a value after due time has
elapsed and then each period.
"""
from .core.observable.timer import _timer
return _timer(duetime, period, scheduler)
def to_async(func: Callable, scheduler: Optional[typing.Scheduler] = None) -> Callable:
"""Converts the function into an asynchronous function. Each
invocation of the resulting asynchronous function causes an
invocation of the original synchronous function on the specified
scheduler.
.. marble::
:alt: to_async
[ to_async()() ]
------1|
Examples:
>>> res = rx.to_async(lambda x, y: x + y)(4, 3)
>>> res = rx.to_async(lambda x, y: x + y, Scheduler.timeout)(4, 3)
>>> res = rx.to_async(lambda x: log.debug(x), Scheduler.timeout)('hello')
Args:
func: Function to convert to an asynchronous function.
scheduler: [Optional] Scheduler to run the function on. If not
specified, defaults to an instance of
:class:`TimeoutScheduler <rx.scheduler.TimeoutScheduler>`.
Returns:
Asynchronous function.
"""
from .core.observable.toasync import _to_async
return _to_async(func, scheduler)
def using(resource_factory: Callable[[], typing.Disposable],
observable_factory: Callable[[typing.Disposable], Observable]
) -> Observable:
"""Constructs an observable sequence that depends on a resource
object, whose lifetime is tied to the resulting observable
sequence's lifetime.
Example:
>>> res = rx.using(lambda: AsyncSubject(), lambda: s: s)
Args:
resource_factory: Factory function to obtain a resource object.
observable_factory: Factory function to obtain an observable
sequence that depends on the obtained resource.
Returns:
An observable sequence whose lifetime controls the lifetime
of the dependent resource object.
"""
from .core.observable.using import _using
return _using(resource_factory, observable_factory)
def with_latest_from(*sources: Observable) -> Observable:
"""Merges the specified observable sequences into one observable
sequence by creating a :class:`tuple` only when the first
observable sequence produces an element.
.. marble::
:alt: with_latest_from
---1---2---3----4-|
--a-----b----c-d----|
[with_latest_from() ]
---1,a-2,a-3,b--4,d-|
Examples:
>>> obs = rx.with_latest_from(obs1)
>>> obs = rx.with_latest_from([obs1, obs2, obs3])
Args:
sources: Sequence of observables.
Returns:
An observable sequence containing the result of combining
elements of the sources into a :class:`tuple`.
"""
from .core.observable.withlatestfrom import _with_latest_from
return _with_latest_from(*sources)
def zip(*args: Observable) -> Observable:
"""Merges the specified observable sequences into one observable
sequence by creating a :class:`tuple` whenever all of the
observable sequences have produced an element at a corresponding
index.
.. marble::
:alt: zip
--1--2---3-----4---|
-a----b----c-d-----|
[ zip() ]
--1,a-2,b--3,c-4,d-|
Example:
>>> res = rx.zip(obs1, obs2)
Args:
args: Observable sources to zip.
Returns:
An observable sequence containing the result of combining
elements of the sources as a :class:`tuple`.
"""
from .core.observable.zip import _zip
return _zip(*args)
| 32.29973
| 102
| 0.594287
|
92b9dd4f6cbfff489b9f7f7a7b016de3350251b8
| 825
|
py
|
Python
|
tracker.py
|
sasthabhoot/score-tracker
|
914351be2ccbad36cfcf8dae7290e39200fccf34
|
[
"MIT"
] | null | null | null |
tracker.py
|
sasthabhoot/score-tracker
|
914351be2ccbad36cfcf8dae7290e39200fccf34
|
[
"MIT"
] | null | null | null |
tracker.py
|
sasthabhoot/score-tracker
|
914351be2ccbad36cfcf8dae7290e39200fccf34
|
[
"MIT"
] | null | null | null |
import bs4 as bs
import urllib.request
import time
import matplotlib.pyplot as plt
crickbuzz = input('Copy and paste the cricbuzz score page url here: ')
print("")
max_limit = str(input('Limit the over (eg. 16.2): '))
print("")
temp = 'start'
ovs = []
runs = []
wicks = []
over = 0
while over != max_limit :
sauce = urllib.request.urlopen(crickbuzz)
soup = bs.BeautifulSoup(sauce,'lxml')
rawscore = soup.find_all('span', class_='cb-font-20')
score = rawscore[0].text
if score != temp:
print(score)
split_score = score.split()
over = split_score[2].replace("(","")
ovs.append(over)
run = split_score[1].split('/')
runs.append(int(run[0]))
wicks.append(run[1])
temp = score
time.sleep(15)
plt.plot(ovs, runs)
plt.xlabel('Overs')
plt.ylabel('Runs')
plt.title('Progress of run with overs')
plt.show()
| 20.625
| 70
| 0.673939
|
d9f44fd953051fbc3da3a2107fa97d8915002f3d
| 437
|
py
|
Python
|
CURSOEMVIDEO/ex019.py
|
souzasnp/PrimeiroProjeto
|
e88bde45df14d9c584b24caabb5186cb98141291
|
[
"MIT"
] | null | null | null |
CURSOEMVIDEO/ex019.py
|
souzasnp/PrimeiroProjeto
|
e88bde45df14d9c584b24caabb5186cb98141291
|
[
"MIT"
] | null | null | null |
CURSOEMVIDEO/ex019.py
|
souzasnp/PrimeiroProjeto
|
e88bde45df14d9c584b24caabb5186cb98141291
|
[
"MIT"
] | null | null | null |
#Um professor quer sortear um dos seus quatros alunos para apagar o quadro. faça um programa que ajude ele lendo o nome deles e escrevendo o nome do escolhido.
from random import choice
aluno1 = input('Primeiro Aluno: ')
aluno2 = input('Segundo Aluno: ')
aluno3 = input('Terceiro Aluno: ')
aluno4 = input('Quarto Aluno: ')
resul = [aluno1, aluno2, aluno3, aluno4]
escolhido = choice(resul)
print('Aluno escolhido: {}'.format(escolhido))
| 43.7
| 159
| 0.745995
|
bc098a8159e2fdb906c93175f85750ad43f153d5
| 4,663
|
py
|
Python
|
git_upstream/rebase_editor.py
|
emonty/git-upstream
|
f1e2cc53371d98f2bdcf7d105a1bbb0750bfcdc8
|
[
"Apache-2.0"
] | null | null | null |
git_upstream/rebase_editor.py
|
emonty/git-upstream
|
f1e2cc53371d98f2bdcf7d105a1bbb0750bfcdc8
|
[
"Apache-2.0"
] | null | null | null |
git_upstream/rebase_editor.py
|
emonty/git-upstream
|
f1e2cc53371d98f2bdcf7d105a1bbb0750bfcdc8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2012, 2013, 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Command line editor for modifying git rebase instructions file through use
of the interactive mode. Will in turn launch an editor in turn if the user
wished to use the interactive mode of git-rebase.
Script will replace all occurances of 'pick' or any other instruction entry
with a list of instructions read from the given input file.
Avoid use of stdin for passing such information as many editors have problems
if exec'ed and stdin is a pipe.
"""
from argparse import ArgumentParser
import fileinput
import os
import sys
def rebase_replace_insn(path, istream):
"""
Function replaces the current instructions listed in the rebase
instructions (insn) file with those read from the given istream.
"""
echo_out = False
for line in fileinput.input(path, inplace=True):
stripped = line.strip()
# first blank line indicates end of rebase instructions
if not stripped:
if not echo_out:
while True:
replacement = istream.readline().strip()
if not replacement:
break
if not replacement.startswith("#"):
print replacement
print ""
echo_out = True
continue
if echo_out:
print stripped
def main():
parser = ArgumentParser(
description=__doc__.strip(),
)
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Enable verbose mode')
parser.add_argument('-i', '--interactive', action='store_true',
help='Enable interactive mode, where the user can edit'
' the list of commits before being applied')
parser.add_argument('ifile', metavar='<new-list>',
help='File containing the new list of instructions to '
'be placed into the rebase instructions file.')
parser.add_argument('extra_args', metavar='<args>', nargs='?', default=[],
help='Additional arguments to be passed to the '
'subsequent editor')
parser.add_argument('ofile', metavar='<todo-list>',
help='Filename containing the list of instructions to'
'be edited.')
args = parser.parse_args()
VERBOSE = args.verbose
# don't attempt to use stdin to pass the information between the parent
# process through 'git-rebase' and this script, as many editors will
# have problems if stdin is a pipe.
if VERBOSE:
print "rebase-editor: Replacing contents of rebase instructions file"
rebase_replace_insn(args.ofile, open(args.ifile, 'r'))
# if interactive mode, attempt to exec the editor defined by the user
# for use with git
if not args.interactive:
if VERBOSE:
print "rebase-editor: Interactive mode not enabled"
sys.exit(0)
# calling code should only override one of the two editor variables,
# starting with the one with the highest precedence
editor = None
env = os.environ
for var in ['GIT_SEQUENCE_EDITOR', 'GIT_EDITOR']:
editor = env.get('GIT_UPSTREAM_' + var, None)
if editor:
del env['GIT_UPSTREAM_' + var]
env[var] = editor
break
if editor:
editor_args = [editor]
editor_args.extend(args.extra_args)
editor_args.append(args.ofile)
sys.stdin.flush()
sys.stdout.flush()
sys.stderr.flush()
#os.dup2(sys.stdin.fileno(), 0)
#os.dup2(sys.stdout.fileno(), 1)
#os.dup2(sys.stderr.fileno(), 2)
os.execvpe(editor, editor_args, env=env)
sys.stderr.write("rebase-editor: No git EDITOR variables defined in "
"environment to call as requested by the "
"--interactive option.\n")
sys.exit(2)
if __name__ == '__main__':
return main()
| 36.716535
| 79
| 0.628994
|
319e2748e10ff3e8abcf2c31191a620dd863a28a
| 25,649
|
py
|
Python
|
windows/benji.py
|
shubhpatel108/B.E.N.J.I.
|
c545bf9f53fc0f77b86d65260912153c5ab7c726
|
[
"MIT"
] | null | null | null |
windows/benji.py
|
shubhpatel108/B.E.N.J.I.
|
c545bf9f53fc0f77b86d65260912153c5ab7c726
|
[
"MIT"
] | null | null | null |
windows/benji.py
|
shubhpatel108/B.E.N.J.I.
|
c545bf9f53fc0f77b86d65260912153c5ab7c726
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import tkinter as tk
from tkinter import ttk
import wx
import regex
import os
import wikipedia
import time
import webbrowser
import youtube_dl
#import winshell
import json
import requests
import ctypes
import random
import urllib
import ssl
from bs4 import BeautifulSoup
import win32com.client as wicl
from urllib.request import urlopen
import speech_recognition as sr
import requests
from pptx import Presentation
from xlsxwriter import Workbook
import subprocess
import sys
import pyttsx3
from pytube import YouTube
requests.packages.urllib3.disable_warnings()
try:
_create_unverified_https_context=ssl._create_unverified_context
except 'AttributeError':
pass
else:
ssl._create_default_https_context=_create_unverified_https_context
headers = {'''user-agent':'Chrome/53.0.2785.143'''}
#speak=wicl.Dispatch("SAPI.SpVoice")
speak = pyttsx3.init()
def events(frame,put):
identity_keywords = ["who are you", "who r u", "what is your name"]
youtube_keywords = ("play ", "stream ", "queue ")
launch_keywords = ["open ", "launch "]
search_keywords = ["search "]
wikipedia_keywords = ["wikipedia ", "wiki "]
location_keywords = ["locate","spot"]
check_keywords = ["what","when","was","how","has","had","should","would","can","could","cool","good"] #could or cool or good
download_music=("download ","download music ")
search_pc= ("find ","lookfor ")
close_keywords=("close ","over ","stop ","exit ")
link = put.split()
#Add note
if put.startswith("note") or put.startswith("not") or put.startswith("node"):
try:
check = link[1]
username = os.getlogin()
filename = "Notes.txt"
f1 = open(r'''C:\Users\{0}\Desktop\{1}'''.format(username,filename),'a')
link = '+'.join(link[1:])
text = link.replace('+',' ')
text = text[0].capitalize() + text[1:]
if check in check_keywords:
text += "?"
else:
text += "."
f1.write(text)
f1.write("\n")
f1.close()
speak.say("Note added successfully!")
speak.runAndWait()
except:
print("Could not add the specified note!")
#Screen Recorder
elif link[0] == "recorder":
try:
if len(link) < 2:
video = '"UScreenCapture"'
audio = '"Microphone (Realtek High Definition Audio)"'
elif len(link) < 3:
video = link[1]
video = video.replace('_',' ')
video = '"' + video + '"'
audio = '"Microphone (Realtek High Definition Audio)"'
else:
video = link[1]
video = video.replace('_',' ')
video = '"' + video + '"'
audio = link[2]
audio = audio.replace('_',' ')
audio = '"' + audio + '"'
username = os.getlogin()
speak.say("Recording started!")
speak.runAndWait()
os.chdir(r'''C:\Users\{}\Desktop'''.format(username))
subprocess.call(r'''ffmpeg -rtbufsize 1500M -f dshow -i video={0}:audio={1} -vcodec mpeg4 -vtag xvid -qscale:v 0 -crf 0 -acodec libmp3lame -ab 320k -ac 1 -ar 44100 video.avi'''.format(video,audio),shell=True) #video = UScreenCapture , audio = Microphone (Realtek High Definition Audio)
except:
print("Unable to start requested service!")
#Voice Recorder
elif link[0] == "audio" and link[1] == "recorder":
try:
if len(link) < 3:
audio = '"Microphone (Realtek High Definition Audio)"'
else:
audio = link[2]
audio = audio.replace('_',' ')
audio = '"' + audio + '"'
username = os.getlogin()
speak.say("Recording started!")
speak.runAndWait()
os.chdir(r'''C:\Users\{}\Desktop'''.format(username))
subprocess.call(r'''ffmpeg -rtbufsize 1500M -f dshow -i audio={0} -acodec libmp3lame -ab 320k -ac 1 -ar 44100 audio.mp3'''.format(audio),shell=True)
except:
print("Unable to start requested service!")
#Video Recorder
elif link[0] == "video" and link[1] == "recorder":
try:
if len(link) < 3:
video = '"UScreenCapture"'
else:
video = link[2]
video = video.replace('_',' ')
video = '"' + video + '"'
username = os.getlogin()
speak.say("Recording started!")
speak.runAndWait()
os.chdir(r'''C:\Users\{}\Desktop'''.format(username))
subprocess.call(r'''ffmpeg -rtbufsize 1500M -f dshow -i video={0} -vcodec mpeg4 -vtag xvid -qscale:v 0 -crf 0 video.avi'''.format(video),shell=True)
except:
print("Unable to start requested service!")
#Merge audio and video
elif link[0] == "merge":
try:
username = os.getlogin()
os.chdir(r'''C:\Users\{}\Desktop'''.format(username))
video = link[1]
audio = link[2]
output = link[3]
subprocess.call(r'''ffmpeg -i {} -i {} -c:v copy -c:a copy {}'''.format(video,audio,output),shell=True)
except:
print("Unable to process requested service!")
#Convert video
elif link[0] == "convert":
try:
username = os.getlogin()
os.chdir(r'''C:\Users\{}\Desktop'''.format(username))
if link[1] == "na":
form_in = link[2]
video1 = link[3]
form_out = link[4]
video2 = link[5]
if (form_in == "avi" or form_in == "webm" or form_in == "mp4" or form_in == "mkv") and (form_out == "mp4" or form_out == "mkv"):
subprocess.call(r'''ffmpeg -i {} -c:v libx264 -an {}'''.format(video1,video2), shell = True)
elif (form_in == "avi" or form_in == "mp4" or form_in == "mkv") and form_out == "webm":
subprocess.call(r'''ffmpeg -i {} -c:v libvpx-vp9 -b:v 2M -an {}'''.format(video1,video2),shell=True)
else:
form_in = link[1]
video1 = link[2]
form_out = link[3]
video2 = link[4]
if (form_in == "avi" or form_in == "webm" or form_in == "mp4" or form_in == "mkv") and (form_out == "mp4" or form_out == "mkv"):
subprocess.call(r'''ffmpeg -i {} -c:v libx264 -acodec aac {}'''.format(video1,video2), shell = True)
elif (form_in == "avi" or form_in == "mp4" or form_in == "mkv") and form_out == "webm":
subprocess.call(r'''ffmpeg -i {} -c:v libvpx-vp9 -b:v 2M -cpu-used -5 -deadline realtime -c:a libvorbis {}'''.format(video1,video2), shell = True)
elif (form_in == "mp4" or form_in == "mkv" or form_in == "webm") and form_out == "avi":
subprocess.call(r'''ffmpeg -i {} -c:v mpeg4 -vtag xvid -qscale:v 0 -acodec libmp3lame {}'''.format(video1,video2), shell = True)
elif (form_in == "avi" or form_in == "webm" or form_in == "mp4" or form_in == "mkv" or form_in == "mp3" or form_in == "m4a") and (form_out == "m4a" or form_out == "mp3"):
subprocess.call(r'''ffmpeg -i {} {}'''.format(video1,video2), shell = True)
except:
print("Unable to process requested service!")
#Closing Benji
elif put.startswith(close_keywords):
os._exit(0)
#Images to video
elif put.startswith("images to video "):
try:
framerate = link[3]
username = os.getlogin()
os.chdir(r'''C:\Users\{}\Desktop\Images'''.format(username))
subprocess.call(r'''ffmpeg -framerate 1/{} -i img%03d.jpg -vcodec mpeg4 -vtag xvid -qscale:v 0 -crf 0 output.avi'''.format(framerate),shell=True)
speak.say("Video created!")
speak.runAndWait()
except:
print("Unable to create video file!")
elif put.startswith(search_pc):
try:
name=link[1]
rex=regex.compile(name)
filepath=link[2]
realpath=filepath
for root,dirs,files in os.walk(os.path.normpath(filepath)):
for f in files:
result = rex.search(f)
if result:
realpath=os.path.join(root, f)
print (realpath+"\n")
os.startfile(realpath)
except:
print("Error")
put = put.lower()
put = put.strip()
link = put.split()
# elif put.startswith(search_pc):
# process=subprocess.Popen("dir /b/s "+link[1],shell=True,stdout=subprocess.PIPE)
# while True:
# output = process.stdout.readline()
# if output == '' and process.poll() is not None:
# break
# if output:
# print (output.strip()+"\n")
# outp=output
# try:
# os.startfile(outp)
# except:
# speak.say("Sorry,couldn't open")
#Play song on youtube
if put.startswith(youtube_keywords):
try:
link = '+'.join(link[1:])
# print(link)
say = link.replace('+', ' ')
url = 'https://www.youtube.com/results?search_query='+link
# webbrowser.open('https://www.youtube.com'+link)
fhand=urllib.request.urlopen(url).read()
soup = BeautifulSoup(fhand, "html.parser")
songs = soup.findAll('div', {'class': 'yt-lockup-video'})
hit = songs[0].find('a')['href']
# print(hit)
speak.say("playing "+say)
speak.runAndWait()
webbrowser.open('https://www.youtube.com'+hit)
except:
print('Sorry Ethan. Looks like its not working!')
#Download video
if put.startswith("download video "):
try:
link = '+'.join(link[2:])
say = link.replace('+', ' ')
url = 'https://www.youtube.com/results?search_query='+link
fhand=urllib.request.urlopen(url).read()
soup = BeautifulSoup(fhand, "html.parser")
songs = soup.findAll('div', {'class': 'yt-lockup-video'})
hit = songs[0].find('a')['href']
speak.say("downloading video "+say)
speak.runAndWait()
username = os.getlogin()
os.chdir(r'''C:\Users\{}\Desktop'''.format(username))
YouTube('https://www.youtube.com' + hit).streams.first().download()
speak.say("download complete!")
speak.runAndWait()
except:
print('Sorry Ethan. Looks like its not working!')
#Download music
elif put.startswith(download_music):
try:
link = '+'.join(link[1:])
# print(link)
say = link.replace('+', ' ')
url = 'https://www.youtube.com/results?search_query='+link
# webbrowser.open('https://www.youtube.com'+link)
fhand=urllib.request.urlopen(url).read()
soup = BeautifulSoup(fhand, "html.parser")
songs = soup.findAll('div', {'class': 'yt-lockup-video'})
hit = songs[0].find('a')['href']
# print(hit)
speak.say("downloading "+say)
speak.runAndWait()
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
'quiet': True,
'restrictfilenames': True,
'outtmpl': 'C:\\Users\\'+os.environ['USERNAME']+'\\Desktop\\%(title)s.%(ext)s'
}
ydl = youtube_dl.YoutubeDL(ydl_opts)
ydl.download(['https://www.youtube.com'+hit])
speak.say("download completed Check your desktop for the song")
speak.runAndWait()
except:
print("Unable to download requested music!")
#Location
elif any(word in put for word in location_keywords):
try:
link='+'.join(link[1:])
say=link.replace('+',' ')
speak.say("locating "+ say)
speak.runAndWait()
webbrowser.open('https://www.google.nl/maps/place/'+link)
except:
print('The place seems to be sequestered.')
#Who are you?
elif any(word in put for word in identity_keywords):
try:
speak.say("I am BENJI, a digital assistant declassified for civilian use. Previously I was used by the Impossible Missions Force")
speak.runAndWait()
except:
print('Error. Try reading the ReadMe to know about me!')
#Open a webpage
elif any(word in put for word in launch_keywords):
try:
link = '+'.join(link[1:])
speak.say("opening "+link)
speak.runAndWait()
webbrowser.open('http://www.'+ link)
except:
print('Sorry Ethan,unable to access it. Cannot hack either-IMF protocol!')
#Google search
elif any(word in put for word in search_keywords):
try:
link='+'.join(link[1:])
say=link.replace('+',' ')
speak.say("searching google for "+say)
speak.runAndWait()
webbrowser.open('https://www.google.com/search?q='+link)
except:
print('Nope, this is not working.')
#Google Images
elif put.startswith("images of "):
try:
link='+'.join(link[2:])
say=link.replace('+',' ')
speak.say("searching images of " + say)
speak.runAndWait()
webbrowser.open('https://www.google.co.in/search?q=' + link + '&source=lnms&tbm=isch')
except:
print('Could not search for images!')
#Gmail
elif put.startswith("gmail"):
try:
speak.say("Opening Gmail!")
speak.runAndWait()
webbrowser.open('https://www.google.com/gmail')
except:
print("Could not open Gmail!")
#Google Cloud Print
elif put.startswith("google cloud print"):
try:
speak.say("Opening google cloud print!")
speak.runAndWait()
webbrowser.open('https://www.google.com/cloudprint')
except:
print("Could not open Google Cloud Print!")
#Google Others
elif put.startswith("google "):
try:
say = link[1]
speak.say("Opening google " + say)
speak.runAndWait()
webbrowser.open('https://'+ say +'.google.com')
except:
print("Could not open Google " + say.capitalize() + "!")
#Blogger
elif put.startswith("blogger"):
try:
speak.say("Opening blogger!")
speak.runAndWait()
webbrowser.open('https://www.blogger.com')
except:
print("Could not open Blogger!")
#Wikipedia
elif any(word in put for word in wikipedia_keywords):
try:
link = '+'.join(link[1:])
say = link.replace('+', ' ')
wikisearch = wikipedia.page(say)
speak.say("Opening wikipedia page for" + say)
speak.runAndWait()
webbrowser.open(wikisearch.url)
except:
print('Wikipedia could not either find the article or your Third-world connection is unstable')
#Podcast
elif put.startswith("podcast"):
try:
speak.say("Opening podcast!")
speak.runAndWait()
webbrowser.open('https://castbox.fm/home')
except:
print("Could not open podcast!")
#Lock the device
elif put.startswith('secure ') or put.startswith('lock '):
try:
speak.say("locking the device")
speak.runAndWait()
ctypes.windll.user32.LockWorkStation()
except :
print('Cannot lock device')
#News of various press agencies
elif put.startswith('news '):
try:
say = '+'.join(link[1:])
say = say.replace('+','-')
if link[1] == "al" and link[2] == "jazeera":
say += "-english"
elif link[1] == "bbc":
say += "-news"
elif link[1] == "espn" and link[2] == "cric":
say += "-info"
url = ('https://newsapi.org/v1/articles?source=' + say + '&sortBy=latest&apiKey=571863193daf421082a8666fe4b666f3')
newsresponce = requests.get(url)
newsjson = newsresponce.json()
speak.say('Our agents from ' + say + ' report this')
speak.runAndWait()
print(' ====='+ say.upper() +'===== \n')
i = 1
for item in newsjson['articles']:
print(str(i) + '. ' + item['title'] + '\n')
print(item['description'] + '\n')
i += 1
except:
print('Unable to retrieve data!')
#shutdown after specific time
elif put.startswith('shutdown after '):
try:
if link[2].isdigit() and link[4].isdigit():
if link[2] == "zero":
link[2] = "0"
if link[4] == "zero":
link[4] = "0"
hours = int(link[2])
minutes = int(link[4])
time_seconds = 60 * minutes
time_seconds = time_seconds + hours * 3600
subprocess.call("shutdown /s /t {0}".format(str(time_seconds)), shell = True)
speak.say("Shutdown initialized!")
speak.runAndWait()
except:
print("Please shutdown manually!")
#shutdown now
elif put.startswith("shutdown now"):
try:
subprocess.call("shutdown /s /t 0", shell = True)
except:
print("Please shutdown manually!")
#abort shutdown
elif put.startswith("cancel shutdown"):
try:
subprocess.call("shutdown /a", shell = True)
speak.say("Shutdown cancelled!")
speak.runAndWait()
except:
print("Unable do cancel shutdown!")
#restart
elif put.startswith("restart now"):
try:
subprocess.call("shutdown /r /t 0", shell = True)
except:
print("Unable do restart device!")
#Folder
elif put.startswith('create ') and link[-1] == "folder":
try:
username = os.getlogin()
filename = '+'.join(link[1:-1])
filename = filename.replace('+','_').capitalize()
path = r'''C:\Users\{0}\Desktop\{1}'''.format(username,filename)
os.mkdir(path)
speak.say("Folder created!")
speak.runAndWait()
except:
print("Couldn't create specified folder!")
#create file
elif put.startswith('create ') and link[-1] == "document":
try:
username = os.getlogin()
filename = '+'.join(link[1:-2])
filename = filename.replace('+','_').capitalize()
if link[-2] == "text":
filename += ".txt"
f1 = open(r'''C:\Users\{0}\Desktop\{1}'''.format(username,filename),'a')
f1.close()
elif link[-2] == "word" or link[-2] == "world":
filename += ".docx"
f1 = open(r'''C:\Users\{0}\Desktop\{1}'''.format(username,filename),'a')
f1.close()
elif link[-2] == "powerpoint" or link[-2] =="presentation":
filename += ".pptx"
prs = Presentation()
title_slide_layout = prs.slide_layouts[0]
slide = prs.slides.add_slide(title_slide_layout)
os.chdir(r'''C:\Users\{0}\Desktop'''.format(username))
prs.save(filename)
elif link[-2] == "excel" or link[-2] == "Excel":
filename += ".xlsx"
wb = Workbook(filename)
ws = wb.add_worksheet()
os.chdir(r'''C:\Users\{0}\Desktop'''.format(username))
wb.close()
elif link[-2] == "visio" or link[-2] == "vizio":
filename += ".vsdx"
f1 = open(r'''C:\Users\{0}\Desktop\{1}'''.format(username,filename),'a')
f1.close()
elif link[-2] == "rich" or link[-2] == "reach":
filename += ".rtf"
f1 = open(r'''C:\Users\{0}\Desktop\{1}'''.format(username,filename),'a')
f1.close()
speak.say("Created" + filename)
speak.runAndWait()
except:
print("Unable to create a file.")
#Calculator
elif put.startswith('calculator'):
try:
subprocess.call('calc',shell=True)
except:
print("Unable to open calculator!")
#Exit/Quit
elif put.startswith('exit') or put.startswith('quit'):
sys.exit()
#A stdout class to redirect output to tkinter window
class StdRedirector(object):
def __init__(self, text_window):
self.text_window = text_window
def write(self, output):
self.text_window.insert(tk.END, output)
# Creating the graphical user interface
class MyFrame(tk.Frame):
def __init__(self,*args,**kwargs):
self.textBox = tk.Text(root,
height=1,width=30,
font=("Times", 16),
bg="#666", fg="#0f0",
spacing1=6, spacing3=6,
insertbackground="#0f0"
)
self.textBox.insert("1.0", "$>")
self.textBox.grid(row=1,column=1, padx=10, pady=10)
root.bind('<Return>', self.OnEnter)
#root.bind('<Destroy>', self.onClose)
self.textBox.focus_set()
speak.say('''Hi Agent! BENJI at your service''')
speak.runAndWait()
self.photo1 = tk.PhotoImage(file="mic_icon.png")
self.btn = ttk.Button(root,command=self.OnClicked,
image=self.photo1, style="C.TButton")
self.btn.grid(row=1,column=2, padx=10, pady=20)
'''
self.output_window = tk.Toplevel()
output_text_window = tk.Text(self.output_window)
self.stddirec = StdRedirector(output_text_window)
sys.stdout = self.stddirec
output_text_window.pack()
self.output_window.withdraw()
'''
def OnEnter(self,event):
put=self.textBox.get("1.2","end-1c")
self.displayText(put)
self.textBox.insert('1.2',put)
self.textBox.delete('1.2',tk.END)
events(self, put)
if put=='':
self.displayText('Reenter')
def OnClicked(self):
r = sr.Recognizer()
with sr.Microphone() as source:
speak.say('Hey I am Listening ')
speak.runAndWait()
audio = r.listen(source)
try:
put=r.recognize_google(audio)
self.displayText(put)
self.textBox.insert('1.2',put)
self.textBox.delete('1.2',tk.END)
events(self,put)
except sr.UnknownValueError:
self.displayText("Could not understand audio")
except sr.RequestError as e:
self.displayText("Could not request results; {0}".format(e))
def displayText(self, text):
try :
if not self.output_window.winfo_viewable() :
self.output_window.update()
self.output_window.deiconify()
except :
self.createOutputWindow()
print(text)
def createOutputWindow(self):
self.output_window = tk.Toplevel()
output_text_window = tk.Text(self.output_window)
self.stddirec = StdRedirector(output_text_window)
sys.stdout = self.stddirec
output_text_window.pack()
#Trigger the GUI. Light the fuse!
if __name__=="__main__":
root = tk.Tk()
view = MyFrame(root)
style = ttk.Style()
style.configure('C.TButton',
background='#555',
highlightthickness='0'
)
style.map("C.TButton",
background=[('pressed', '!disabled', '#333'), ('active', '#666')]
)
# root.geometry('{}x{}'.format(400, 100))
# view.pack(side="top",fill="both",expand=False)
root.iconphoto(True, tk.PhotoImage(file=os.path.join(sys.path[0],'benji_final.gif')))
root.title('B.E.N.J.I.')
root.configure(background="#444")
root.resizable(0,0)
root.mainloop()
| 41.104167
| 301
| 0.510195
|
6fbf7166b1a40132a383ae6c0331819453191689
| 29,917
|
py
|
Python
|
xception_tf/keras_xception.py
|
rickyHong/Light-Head-RCNN-enhanced-Xdetector
|
1b19e15709635e007494648c4fb519b703a29d84
|
[
"Apache-2.0"
] | 116
|
2018-03-12T19:38:06.000Z
|
2021-02-23T16:15:34.000Z
|
xception_tf/keras_xception.py
|
rickyHong/Light-Head-RCNN-enhanced-Xdetector
|
1b19e15709635e007494648c4fb519b703a29d84
|
[
"Apache-2.0"
] | 11
|
2018-04-25T15:46:21.000Z
|
2019-03-15T11:25:48.000Z
|
xception_tf/keras_xception.py
|
rickyHong/Light-Head-RCNN-enhanced-Xdetector
|
1b19e15709635e007494648c4fb519b703a29d84
|
[
"Apache-2.0"
] | 37
|
2018-03-31T03:38:42.000Z
|
2020-09-18T09:04:32.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Xception V1 model for Keras.
On ImageNet, this model gets to a top-1 validation accuracy of 0.790
and a top-5 validation accuracy of 0.945.
Do note that the input image format for this model is different than for
the VGG16 and ResNet models (299x299 instead of 224x224),
and that the input preprocessing function
is also different (same as Inception V3).
Also do note that this model is only available for the TensorFlow backend,
due to its reliance on `SeparableConvolution` layers.
# Reference
- [Xception: Deep Learning with Depthwise Separable
Convolutions](https://arxiv.org/abs/1610.02357)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras import layers
from keras.preprocessing import image
from tensorflow.python.keras._impl.keras.applications import imagenet_utils
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions # pylint: disable=unused-import
from tensorflow.python.keras._impl.keras.engine.topology import get_source_inputs
from tensorflow.python.keras._impl.keras.layers import Activation
from tensorflow.python.keras._impl.keras.layers import BatchNormalization
from tensorflow.python.keras._impl.keras.layers import Conv2D
from tensorflow.python.keras._impl.keras.layers import Dense
from tensorflow.python.keras._impl.keras.layers import GlobalAveragePooling2D
from tensorflow.python.keras._impl.keras.layers import GlobalMaxPooling2D
from tensorflow.python.keras._impl.keras.layers import Input
from tensorflow.python.keras._impl.keras.layers import MaxPooling2D
from tensorflow.python.keras._impl.keras.layers import SeparableConv2D
from tensorflow.python.keras._impl.keras.models import Model
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
from tensorflow.python.platform import tf_logging as logging
TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.4/xception_weights_tf_dim_ordering_tf_kernels_notop.h5'
def Xception(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Xception architecture.
Optionally loads weights pre-trained
on ImageNet. This model is available for TensorFlow only,
and can only be used with inputs following the TensorFlow
data format `(width, height, channels)`.
You should set `image_data_format="channels_last"` in your Keras config
located at ~/.keras/keras.json.
Note that the default input image size for this model is 299x299.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(299, 299, 3)`.
It should have exactly 3 input channels,
and width and height should be no smaller than 71.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
if K.image_data_format() != 'channels_last':
logging.warning(
'The Xception model is only available for the '
'input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height). '
'You should set `image_data_format="channels_last"` in your Keras '
'config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=71,
data_format=K.image_data_format(),
require_flatten=False,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
img_input = Input(tensor=input_tensor, shape=input_shape)
x = Conv2D(
32, (3, 3), strides=(2, 2), use_bias=False,
name='block1_conv1')(img_input)
x = BatchNormalization(name='block1_conv1_bn')(x)
x = Activation('relu', name='block1_conv1_act')(x)
x = Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
x = BatchNormalization(name='block1_conv2_bn')(x)
x = Activation('relu', name='block1_conv2_act')(x)
residual = Conv2D(
128, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(
128, (3, 3), padding='same', use_bias=False, name='block2_sepconv1')(x)
x = BatchNormalization(name='block2_sepconv1_bn')(x)
x = Activation('relu', name='block2_sepconv2_act')(x)
x = SeparableConv2D(
128, (3, 3), padding='same', use_bias=False, name='block2_sepconv2')(x)
x = BatchNormalization(name='block2_sepconv2_bn')(x)
x = MaxPooling2D(
(3, 3), strides=(2, 2), padding='same', name='block2_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(
256, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block3_sepconv1_act')(x)
x = SeparableConv2D(
256, (3, 3), padding='same', use_bias=False, name='block3_sepconv1')(x)
x = BatchNormalization(name='block3_sepconv1_bn')(x)
x = Activation('relu', name='block3_sepconv2_act')(x)
x = SeparableConv2D(
256, (3, 3), padding='same', use_bias=False, name='block3_sepconv2')(x)
x = BatchNormalization(name='block3_sepconv2_bn')(x)
x = MaxPooling2D(
(3, 3), strides=(2, 2), padding='same', name='block3_pool')(x)
x = layers.add([x, residual])
residual = Conv2D(
728, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block4_sepconv1_act')(x)
x = SeparableConv2D(
728, (3, 3), padding='same', use_bias=False, name='block4_sepconv1')(x)
x = BatchNormalization(name='block4_sepconv1_bn')(x)
x = Activation('relu', name='block4_sepconv2_act')(x)
x = SeparableConv2D(
728, (3, 3), padding='same', use_bias=False, name='block4_sepconv2')(x)
x = BatchNormalization(name='block4_sepconv2_bn')(x)
x = MaxPooling2D(
(3, 3), strides=(2, 2), padding='same', name='block4_pool')(x)
x = layers.add([x, residual])
for i in range(8):
residual = x
prefix = 'block' + str(i + 5)
x = Activation('relu', name=prefix + '_sepconv1_act')(x)
x = SeparableConv2D(
728, (3, 3), padding='same', use_bias=False,
name=prefix + '_sepconv1')(x)
x = BatchNormalization(name=prefix + '_sepconv1_bn')(x)
x = Activation('relu', name=prefix + '_sepconv2_act')(x)
x = SeparableConv2D(
728, (3, 3), padding='same', use_bias=False,
name=prefix + '_sepconv2')(x)
x = BatchNormalization(name=prefix + '_sepconv2_bn')(x)
x = Activation('relu', name=prefix + '_sepconv3_act')(x)
x = SeparableConv2D(
728, (3, 3), padding='same', use_bias=False,
name=prefix + '_sepconv3')(x)
x = BatchNormalization(name=prefix + '_sepconv3_bn')(x)
x = layers.add([x, residual])
residual = Conv2D(
1024, (1, 1), strides=(2, 2), padding='same', use_bias=False)(x)
residual = BatchNormalization()(residual)
x = Activation('relu', name='block13_sepconv1_act')(x)
x = SeparableConv2D(
728, (3, 3), padding='same', use_bias=False, name='block13_sepconv1')(x)
x = BatchNormalization(name='block13_sepconv1_bn')(x)
x = Activation('relu', name='block13_sepconv2_act')(x)
x = SeparableConv2D(
1024, (3, 3), padding='same', use_bias=False, name='block13_sepconv2')(x)
x = BatchNormalization(name='block13_sepconv2_bn')(x)
x = MaxPooling2D(
(3, 3), strides=(2, 2), padding='same', name='block13_pool')(x)
x = layers.add([x, residual])
x = SeparableConv2D(
1536, (3, 3), padding='same', use_bias=False, name='block14_sepconv1')(x)
x = BatchNormalization(name='block14_sepconv1_bn')(x)
x = Activation('relu', name='block14_sepconv1_act')(x)
x = SeparableConv2D(
2048, (3, 3), padding='same', use_bias=False, name='block14_sepconv2')(x)
x = BatchNormalization(name='block14_sepconv2_bn')(x)
x = Activation('relu', name='block14_sepconv2_act')(x)
if include_top:
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='xception')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file(
'xception_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models',
file_hash='0a58e3b7378bc2990ea3b43d5981f1f6')
else:
weights_path = get_file(
'xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='b0042744bf5b25fce3cb969f33bebb97')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
if old_data_format:
K.set_image_data_format(old_data_format)
elif weights is not None:
model.load_weights(weights)
return model
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
Arguments:
x: a 4D numpy array consists of RGB values within [0, 255].
Returns:
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf')
# if __name__ == '__main__':
# # image size = 299 * 299
# model = Xception(include_top=True, weights='./imagenet_xception.h5')
# img_path = 'images/000010.jpg'
# img = image.load_img(img_path, target_size=(299, 299))
# x = image.img_to_array(img)
# x = np.expand_dims(x, axis=0)
# x = preprocess_input(x)
# print('Input image shape:', x.shape)
# preds = model.predict(x)
# print(np.argmax(preds))
# print('Predicted:', decode_predictions(preds, 1))
if __name__ == '__main__':
# image size = 299 * 299
model = Xception(include_top=True, weights='./imagenet_xception.h5')
preds = model.predict(np.ones((1,299,299,3)) * 0.5)
print(preds)
print(np.argmax(preds))
#print('Predicted:', decode_predictions(preds, 1))
# img_path = 'elephant.jpg'
# img = image.load_img(img_path, target_size=(299, 299))
# x = image.img_to_array(img)
# x = np.expand_dims(x, axis=0)
# x = preprocess_input(x)
# print('Input image shape:', x.shape)
# output
# [[2.44039271e-04 3.40558181e-04 2.15881926e-04 2.18360859e-04
# 2.88262585e-04 3.51772498e-04 1.39083553e-04 4.87553130e-04
# 1.78387156e-04 4.48726409e-04 3.20674240e-04 2.66706105e-04
# 3.06589471e-04 4.18273557e-04 2.00233408e-04 3.22850014e-04
# 7.58993905e-04 1.70717234e-04 3.65104701e-04 3.29575443e-04
# 1.87462647e-04 3.42177832e-03 1.57531677e-03 9.12522897e-04
# 4.74808650e-04 5.03750867e-04 3.02915170e-04 4.55694390e-04
# 6.59965444e-04 1.76039888e-04 1.54873880e-04 3.50133661e-04
# 2.32449209e-04 1.31015753e-04 2.25184005e-04 1.97169618e-04
# 1.99169852e-04 2.19373265e-04 1.17472874e-03 1.84896853e-04
# 2.65799608e-04 2.60540663e-04 5.73144061e-04 3.21989821e-04
# 3.50478251e-04 3.86057189e-04 3.34256969e-04 5.15017891e-04
# 1.53190907e-04 1.79798517e-04 3.44967120e-04 7.03162805e-04
# 7.87434285e-04 4.21380508e-04 2.14891857e-04 3.61554848e-04
# 3.10995063e-04 2.04628275e-04 1.29632099e-04 8.39248416e-04
# 2.74911668e-04 2.76182080e-04 2.06329321e-04 4.80624294e-04
# 4.94690612e-04 2.69681681e-04 4.02257137e-04 2.63020571e-04
# 2.59118213e-04 3.97932658e-04 2.05551391e-04 1.31746510e-03
# 2.75826606e-04 1.34255807e-03 1.79195180e-04 6.80175959e-04
# 3.13495810e-04 3.64334264e-04 1.46027037e-03 9.52783914e-04
# 4.19965101e-04 3.85436579e-04 2.51578924e-04 1.94609733e-04
# 1.51887696e-04 2.77458894e-04 3.00190499e-04 3.23872897e-04
# 3.22439068e-04 5.81079745e-04 3.34015000e-04 4.10893816e-04
# 1.16701215e-03 3.62874067e-04 3.90285801e-04 2.26313728e-04
# 3.79570789e-04 1.57272807e-04 2.77454412e-04 2.67574447e-04
# 2.48215889e-04 2.22690447e-04 2.06891666e-04 4.18494135e-04
# 2.22274481e-04 8.56392217e-05 1.63585035e-04 3.13926197e-04
# 2.55956664e-04 2.12145053e-04 3.58709745e-04 7.93615077e-03
# 4.01076104e-04 5.57611114e-04 2.31016937e-04 2.04064418e-04
# 2.35709755e-04 8.38462496e-04 1.87619764e-04 1.72218439e-04
# 3.08697781e-04 1.55434886e-04 4.16235998e-04 1.85059034e-04
# 2.28378223e-04 1.75888345e-04 3.64472769e-04 1.20374968e-03
# 1.53327931e-03 4.84266930e-04 2.21745693e-04 2.03516349e-04
# 5.04454365e-04 2.49830220e-04 3.45752982e-04 2.43297181e-04
# 1.50585809e-04 2.24065225e-04 2.56275554e-04 2.86073046e-04
# 1.85481505e-04 5.48519893e-04 1.57625313e-04 3.40887840e-04
# 4.06851992e-04 2.09394930e-04 6.88385917e-04 2.52083264e-04
# 2.52348196e-04 4.54790570e-04 7.23221019e-05 3.64280742e-04
# 4.06965817e-04 2.91487435e-04 1.51777349e-04 1.77145936e-04
# 2.52168393e-04 3.16359248e-04 1.41630066e-04 1.50666237e-04
# 1.69904451e-04 1.96407389e-04 1.52202061e-04 7.17518677e-04
# 1.92121079e-04 2.42713781e-04 1.71669119e-04 2.34745239e-04
# 2.21430295e-04 1.74707078e-04 2.60429719e-04 1.54904890e-04
# 3.15618381e-04 2.74902093e-04 2.36956927e-04 3.11621814e-04
# 1.57982577e-04 1.72291693e-04 2.26382370e-04 2.84159905e-04
# 2.24678719e-04 3.60317208e-04 2.57711770e-04 3.12268996e-04
# 1.87667552e-04 4.04387130e-04 3.41737730e-04 2.92282697e-04
# 3.13932047e-04 3.04615358e-04 8.05544958e-04 1.81015159e-04
# 1.32337300e-04 3.13142780e-04 4.27515100e-04 5.10894461e-04
# 2.50709854e-04 3.71323520e-04 2.49028322e-04 4.90829232e-04
# 2.96531915e-04 1.22216574e-04 1.83282216e-04 2.50772369e-04
# 1.79878873e-04 1.28711443e-04 2.67514115e-04 1.87228768e-04
# 3.16528225e-04 1.29724533e-04 2.30620164e-04 1.22380967e-04
# 1.02163598e-04 1.19242010e-04 4.09552973e-04 1.21525969e-04
# 4.17467090e-04 3.25925328e-04 1.63337405e-04 2.49359029e-04
# 5.74716309e-04 2.79885280e-04 3.90140136e-04 3.64086271e-04
# 2.21971131e-04 1.90079198e-04 2.28959718e-04 2.78563559e-04
# 3.74788215e-04 1.98245267e-04 6.89025183e-05 1.64592333e-04
# 1.82392861e-04 2.49615987e-04 3.15534387e-04 5.02826471e-04
# 1.89281171e-04 1.45130107e-04 1.47488070e-04 3.39709572e-04
# 2.66446412e-04 1.94611304e-04 8.37609696e-05 2.75810948e-04
# 1.98132067e-04 6.36467361e-04 2.36530890e-04 3.72744486e-04
# 2.56353756e-04 3.93280032e-04 2.74396996e-04 3.63109633e-04
# 4.57760849e-04 1.84386183e-04 6.00714702e-04 1.58156094e-04
# 2.32384817e-04 3.20812513e-04 3.13669880e-04 1.34746355e-04
# 2.37498520e-04 2.30278514e-04 3.57826153e-04 1.52256558e-04
# 1.59467207e-04 2.95665581e-04 2.17578636e-04 9.29895323e-05
# 2.16281580e-04 3.29788774e-04 4.52707987e-04 1.84365272e-04
# 2.74440274e-04 2.57835898e-04 2.31737504e-04 2.12123530e-04
# 1.92458145e-04 2.31324244e-04 1.13956849e-04 4.33975161e-04
# 1.71093139e-04 3.51679744e-04 1.79462557e-04 4.91213228e-04
# 4.44191042e-04 5.74295118e-04 1.57494957e-04 1.36551680e-04
# 2.34418389e-04 2.19174282e-04 1.59293937e-04 1.41067532e-04
# 2.82207067e-04 4.24366823e-04 1.11401474e-04 1.03822393e-04
# 2.80887383e-04 3.55053053e-04 1.91373707e-04 3.53992975e-04
# 3.83352744e-04 3.61665559e-04 7.56208145e-04 5.84480702e-04
# 2.38649081e-04 3.10221338e-04 1.24729460e-03 4.15439688e-04
# 5.48211450e-04 1.49512940e-04 7.23765464e-04 6.26679743e-04
# 5.70744800e-04 6.21201587e-04 1.14783971e-03 6.55618322e-04
# 2.89018877e-04 2.15038119e-04 9.44400148e-04 5.81019616e-04
# 4.26979823e-04 2.16945322e-04 7.92749634e-05 2.13641688e-04
# 1.53813060e-04 1.35564769e-04 3.10822361e-04 1.68038358e-04
# 3.54396121e-04 2.15596141e-04 3.15417681e-04 3.87888780e-04
# 3.59496218e-04 1.80806397e-04 1.72377186e-04 1.09466193e-04
# 1.63547214e-04 2.43061688e-04 1.27891093e-04 1.76975285e-04
# 2.38554319e-04 2.94405327e-04 2.47226388e-04 1.63817182e-04
# 2.35763044e-04 3.08199204e-04 2.37016473e-04 2.59984547e-04
# 2.57825479e-04 1.93464002e-04 2.93404999e-04 2.76442181e-04
# 1.70701664e-04 2.80819193e-04 5.37016487e-04 1.71333173e-04
# 3.71195521e-04 2.54716491e-04 3.85011343e-04 3.86854197e-04
# 1.20281868e-04 5.43218106e-04 2.53400009e-04 1.91137151e-04
# 3.93087743e-04 3.43329710e-04 2.18025918e-04 3.27854883e-04
# 1.78854913e-04 3.01071675e-04 1.33301743e-04 1.75242487e-04
# 8.84092442e-05 2.21244234e-04 1.09776433e-04 1.87992948e-04
# 2.29415527e-04 1.31361056e-04 3.13597353e-04 1.81626543e-04
# 2.38912893e-04 2.19335940e-04 3.36327445e-04 1.34159229e-04
# 1.66972066e-04 2.29641548e-04 1.45203623e-04 1.04638020e-04
# 1.68155864e-04 2.00599898e-04 1.21051839e-04 1.62631040e-04
# 1.68168772e-04 3.30573152e-04 1.87161218e-04 1.79293551e-04
# 1.44042991e-04 2.15156688e-04 6.85554405e-04 2.75474484e-03
# 6.32824667e-04 4.85791650e-04 1.44409831e-03 4.87420039e-04
# 8.29513185e-04 1.70392892e-03 9.72181297e-05 1.60706899e-04
# 1.71611740e-04 3.51346028e-03 3.41578561e-04 3.01660824e-04
# 7.81773881e-04 3.30832368e-03 7.01052486e-04 7.02633406e-05
# 2.75496393e-03 1.16162153e-03 4.37784102e-03 2.55846977e-03
# 5.55265462e-04 2.39953457e-04 1.19896489e-03 1.68642192e-03
# 3.25620844e-04 3.43664666e-04 6.73498143e-04 7.61065807e-04
# 2.14721920e-04 1.00785040e-03 4.31458291e-04 6.42476138e-04
# 2.43200222e-03 5.79370710e-04 4.85498604e-04 1.58467027e-03
# 5.08891302e-04 5.06193319e-04 1.03690848e-03 3.36999103e-04
# 9.14381875e-04 3.72775597e-04 1.41716702e-03 5.78549516e-04
# 2.70854944e-04 5.39649220e-04 3.56324948e-03 1.28810550e-03
# 2.61602923e-04 3.71837028e-04 1.90678154e-04 1.91083609e-03
# 2.19096866e-04 2.40710491e-04 1.37729687e-04 5.36862412e-04
# 1.53137383e-03 6.75573945e-04 1.47513449e-04 1.15220761e-03
# 4.42913501e-04 2.50017794e-04 3.29706818e-03 4.93907719e-04
# 3.24281771e-03 2.23342981e-03 1.47672647e-04 1.95681874e-04
# 1.57487739e-04 9.41135804e-04 1.29173324e-03 6.22291816e-04
# 1.89144223e-04 2.14256775e-02 1.37624767e-04 1.80048519e-04
# 1.24519269e-04 1.52670417e-03 8.37434607e-04 3.97237367e-04
# 5.14673593e-04 3.72313545e-03 1.78159459e-03 2.00092923e-04
# 5.32159640e-04 1.40057888e-03 6.00600848e-04 2.37316405e-03
# 3.30932927e-03 1.30007465e-04 3.86291242e-04 2.24924507e-03
# 5.92372788e-04 6.39961450e-04 1.62056193e-03 2.10169164e-04
# 3.23865184e-04 2.76658189e-04 5.95014775e-04 1.00520691e-02
# 2.76926352e-04 3.96435207e-04 6.47807901e-04 2.72396579e-03
# 9.66820517e-04 1.63427740e-03 6.27857342e-04 5.14242565e-03
# 7.10885040e-04 1.61024160e-04 5.49553079e-04 3.40635655e-04
# 1.71188638e-02 1.13265275e-03 1.21782732e-03 9.31849179e-04
# 7.68956612e-04 1.19271129e-03 7.23556732e-04 5.58019150e-04
# 4.59414849e-04 5.08046942e-04 7.06899387e-04 8.91712727e-04
# 2.51193764e-04 2.71938654e-04 7.18676369e-04 2.68921652e-03
# 1.15373300e-03 1.07583427e-03 9.51006915e-03 1.66921108e-03
# 3.24091496e-04 3.54896678e-04 6.17161440e-03 5.76931459e-04
# 2.99004198e-04 6.79980556e-04 4.58826078e-04 1.14240625e-03
# 9.15436132e-04 9.22743347e-04 1.49405433e-03 2.01336760e-03
# 3.13789118e-04 1.09742640e-03 2.10035825e-03 2.29676371e-04
# 2.96365964e-04 1.49502959e-02 1.73694512e-03 5.71505679e-03
# 6.21789251e-04 1.09906297e-03 5.17464883e-04 3.21293686e-04
# 6.06367888e-04 6.70823327e-04 9.92170069e-04 2.11752811e-03
# 3.61424696e-04 4.57374641e-04 1.85860801e-04 2.70950166e-03
# 1.90211853e-04 3.11637094e-04 7.71213265e-04 4.30888077e-03
# 2.48159311e-04 1.71133535e-04 1.51328812e-03 1.70044426e-04
# 1.03887310e-03 4.28658997e-04 1.81979046e-03 1.90670806e-04
# 2.47326388e-04 8.32991733e-04 6.37254154e-04 2.87808245e-04
# 1.59904681e-04 1.92098727e-04 1.59584597e-04 1.54527486e-03
# 9.67290811e-03 1.89355528e-03 6.81848440e-04 9.84192174e-03
# 1.95199309e-03 2.79221265e-03 1.61356397e-03 9.62076709e-04
# 3.17211612e-03 2.77718552e-03 4.92741994e-04 6.15699217e-04
# 8.63544736e-03 1.79459667e-03 6.01864012e-04 8.97102407e-04
# 1.12507418e-02 1.13463541e-03 8.05017189e-04 1.41175813e-04
# 3.44532612e-03 2.27107434e-03 2.86640041e-03 7.17224495e-04
# 4.08838212e-04 3.63718922e-04 1.88916747e-03 3.51631461e-04
# 1.51292072e-04 3.86320823e-03 2.26312870e-04 1.82594580e-03
# 2.55135819e-03 7.10132299e-04 6.20356575e-03 7.84598698e-04
# 1.22294354e-03 6.33454765e-04 1.34979584e-03 1.02405054e-02
# 1.32575035e-04 1.51044616e-04 5.22934739e-03 4.12756926e-04
# 4.42252523e-04 4.84381942e-03 9.51871858e-04 2.88472022e-03
# 4.49564541e-03 3.64590460e-03 1.31984780e-04 3.93917412e-03
# 1.29539834e-03 1.30201341e-04 5.82714158e-04 3.53167008e-04
# 3.46774526e-04 6.14737766e-03 5.38542692e-04 9.42293438e-04
# 5.92572428e-03 4.96419962e-04 2.73713900e-04 1.58624130e-03
# 6.52334827e-04 6.14074583e-04 8.10835976e-03 1.45772833e-03
# 3.01242690e-04 4.76241024e-04 2.38483350e-04 4.15955437e-04
# 1.73990033e-04 6.05492445e-04 2.53232720e-04 6.80139870e-04
# 5.95471065e-04 2.43580304e-04 8.47464986e-03 2.52640632e-04
# 1.75258704e-03 3.23539804e-04 1.46721967e-03 1.35794445e-03
# 7.02530204e-04 1.75756810e-04 3.19360232e-04 2.50553538e-04
# 7.92405161e-04 4.80376789e-03 4.24322300e-03 2.60650762e-04
# 1.48688466e-03 6.08615391e-03 8.48063733e-04 2.74493382e-03
# 3.58053227e-03 1.08728008e-02 7.51594198e-04 3.11489194e-03
# 2.50405073e-03 2.98836996e-04 3.03115277e-03 9.93129652e-05
# 8.57855193e-04 1.52714289e-04 2.62172340e-04 1.04848854e-03
# 7.40964839e-04 3.01324879e-04 3.35531833e-04 5.66651695e-04
# 3.97482747e-03 3.02727334e-04 3.24054243e-04 5.40738134e-03
# 1.99600309e-03 1.74735067e-03 4.16404131e-04 2.57937470e-04
# 2.41175905e-04 1.91600891e-04 1.83858720e-04 2.02687341e-04
# 1.53357093e-03 7.50707113e-04 5.89464232e-03 2.89280713e-03
# 3.13441269e-04 1.46778277e-03 2.50423350e-03 5.67096751e-04
# 2.54716870e-04 2.09235528e-04 5.57711115e-04 2.65238853e-03
# 3.79515509e-03 1.31471039e-04 6.81035686e-04 5.79811807e-04
# 5.60818648e-04 6.56650460e-04 1.90014637e-03 3.59991624e-04
# 2.79195723e-04 1.06012053e-03 6.51205657e-04 7.68870348e-03
# 1.79520401e-03 7.08936073e-04 1.68755665e-04 3.10383213e-04
# 5.61488036e-04 7.70802668e-04 5.42372523e-04 2.19337919e-04
# 5.80050470e-03 2.45732343e-04 2.00131419e-03 2.75640952e-04
# 1.31916616e-03 1.93891500e-03 2.68832478e-03 1.85424672e-03
# 8.59084306e-04 5.79205295e-03 1.13422910e-04 1.95587272e-04
# 6.66073931e-04 7.99535948e-04 1.15683470e-02 6.48438989e-04
# 1.75184919e-04 1.72660832e-04 1.68688642e-03 2.44080042e-03
# 1.07478257e-03 5.51242381e-03 1.33144960e-04 3.41837504e-03
# 1.40640116e-03 8.35915911e-04 8.28811200e-04 2.51796492e-03
# 9.17644240e-04 1.16176838e-02 8.00915004e-04 8.92807613e-04
# 4.92670480e-03 5.62897825e-04 2.26286915e-03 3.74127558e-04
# 2.06146878e-03 1.62867887e-03 4.91169514e-03 2.71408062e-04
# 9.39140213e-04 4.70765051e-04 2.07995670e-03 8.69304780e-03
# 6.25818968e-03 9.23961197e-05 3.64605366e-04 2.70366698e-04
# 2.05429766e-04 6.08613307e-04 6.33282703e-04 3.14117409e-04
# 2.05359166e-03 4.77162335e-04 2.70487362e-04 6.87717809e-04
# 9.72806418e-04 1.01295742e-03 6.47979206e-04 3.48857633e-04
# 1.34422851e-04 9.54246556e-04 5.35605766e-04 2.07111283e-04
# 4.38236911e-03 3.91419715e-04 5.94303478e-04 2.97574996e-04
# 3.21045285e-04 2.72554898e-04 9.69987886e-04 1.40099216e-03
# 1.20086619e-03 5.45166386e-03 3.39739694e-04 2.05601275e-04
# 4.24584199e-04 2.93800636e-04 3.96339269e-03 4.40720498e-04
# 2.89386662e-04 3.33809119e-04 5.05516306e-04 3.48564074e-03
# 4.33468580e-04 4.04639170e-04 4.37844265e-03 8.01445567e-04
# 4.67839837e-03 9.89656546e-05 8.23049690e-04 1.56387757e-03
# 3.72532697e-04 5.17696375e-04 7.55272282e-04 2.96095037e-04
# 3.47010442e-03 1.75263465e-03 3.73059371e-03 4.69183782e-04
# 3.31608858e-03 4.37630573e-04 3.68323876e-04 4.85901226e-04
# 7.28685409e-03 4.39446094e-03 3.57634295e-03 5.73831145e-04
# 1.01755653e-03 4.92050662e-04 2.72850477e-04 9.39253485e-04
# 1.67715293e-03 3.28631519e-04 2.64884229e-03 2.07235059e-03
# 3.94351606e-04 3.26313631e-04 5.81823755e-04 2.41195969e-03
# 4.05046099e-04 4.19012457e-03 8.88122304e-04 8.33809376e-04
# 2.46450858e-04 1.17411240e-04 2.20397866e-04 4.64310142e-04
# 6.10933057e-04 4.72636078e-04 6.53434661e-04 7.68031343e-04
# 4.06978652e-03 4.88885329e-04 1.55612128e-04 1.06893806e-03
# 8.12137034e-04 7.29008112e-04 8.67866154e-04 4.51727479e-04
# 4.68844955e-04 8.07281875e-04 8.67321680e-04 9.89009277e-04
# 1.05266939e-04 6.93605398e-04 1.63193312e-04 1.36720177e-04
# 2.80598091e-04 4.64318553e-04 2.23133902e-04 2.38256808e-03
# 5.94309764e-03 1.48056808e-03 8.14453233e-04 1.20447704e-03
# 1.86055514e-03 2.49076029e-03 1.58159179e-03 1.38495804e-03
# 6.94809016e-04 5.92178665e-04 1.47550609e-02 9.60179837e-04
# 3.27270536e-04 8.24608316e-04 1.22308114e-03 8.99795908e-04
# 5.48558193e-04 4.39190073e-04 1.16698514e-03 7.40525662e-04
# 1.17043375e-04 7.70649000e-04 4.76852380e-04 2.14960193e-04
# 6.93711778e-03 2.66100949e-04 3.38351820e-04 8.31563491e-04
# 9.57816257e-04 4.72916989e-04 5.26931603e-04 4.00880672e-04
# 3.36762343e-04 4.21837147e-04 2.71521596e-04 3.66676133e-04
# 1.58203315e-04 5.59787382e-04 5.03910531e-04 2.03677409e-04
# 2.81147048e-04 2.08629208e-04 2.48097349e-04 2.57000123e-04
# 5.41470770e-04 5.36441803e-04 3.75619769e-04 4.23420221e-04
# 5.43418981e-04 3.43837251e-04 4.11142159e-04 4.98211884e-04
# 5.65560418e-04 8.87888367e-04 1.47216837e-04 7.74699947e-05
# 8.43115326e-04 1.80234027e-03 6.08525821e-04 7.68800965e-04
# 4.62411321e-04 4.50784806e-04 9.68802487e-04 1.56956725e-04
# 8.68423260e-04 9.14228382e-04 8.55212857e-04 3.80445679e-04
# 1.16079836e-03 2.58800603e-04 3.73504910e-04 3.26367473e-04
# 3.27318616e-04 1.77301685e-04 4.78650024e-03 5.90185518e-04
# 2.22818553e-03 6.91886235e-04 2.18028930e-04 8.06220109e-04
# 4.49926185e-04 1.74662593e-04 4.86382807e-04 2.12543266e-04
# 4.74094442e-04 3.88993649e-04 1.94331704e-04 2.20697169e-04
# 3.99687036e-04 3.56433215e-04 2.73243786e-04 2.75110477e-04
# 6.36506767e-04 3.93930095e-04 1.46968901e-04 4.29410633e-04
# 4.12151188e-04 1.84499382e-04 3.63839499e-04 2.84115056e-04
# 1.59979027e-04 2.18668327e-04 2.11138700e-04 9.21817991e-05
# 3.32705793e-04 1.06426356e-04 3.64252453e-04 7.31761858e-04]]
# 473
| 48.724756
| 151
| 0.720092
|
8e237d4ef1446b296d6d2a3e45f27a6a89105949
| 5,320
|
py
|
Python
|
docusign_esign/models/expirations.py
|
pivotal-energy-solutions/docusign-python-client
|
f3edd0b82e57999bc8848a63a0477712714ee437
|
[
"MIT"
] | null | null | null |
docusign_esign/models/expirations.py
|
pivotal-energy-solutions/docusign-python-client
|
f3edd0b82e57999bc8848a63a0477712714ee437
|
[
"MIT"
] | null | null | null |
docusign_esign/models/expirations.py
|
pivotal-energy-solutions/docusign-python-client
|
f3edd0b82e57999bc8848a63a0477712714ee437
|
[
"MIT"
] | 1
|
2021-04-26T20:52:45.000Z
|
2021-04-26T20:52:45.000Z
|
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Expirations(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, expire_after=None, expire_enabled=None, expire_warn=None):
"""
Expirations - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'expire_after': 'str',
'expire_enabled': 'str',
'expire_warn': 'str'
}
self.attribute_map = {
'expire_after': 'expireAfter',
'expire_enabled': 'expireEnabled',
'expire_warn': 'expireWarn'
}
self._expire_after = expire_after
self._expire_enabled = expire_enabled
self._expire_warn = expire_warn
@property
def expire_after(self):
"""
Gets the expire_after of this Expirations.
An integer that sets the number of days the envelope is active.
:return: The expire_after of this Expirations.
:rtype: str
"""
return self._expire_after
@expire_after.setter
def expire_after(self, expire_after):
"""
Sets the expire_after of this Expirations.
An integer that sets the number of days the envelope is active.
:param expire_after: The expire_after of this Expirations.
:type: str
"""
self._expire_after = expire_after
@property
def expire_enabled(self):
"""
Gets the expire_enabled of this Expirations.
When set to **true**, the envelope expires (is no longer available for signing) in the set number of days. If false, the account default setting is used. If the account does not have an expiration setting, the DocuSign default value of 120 days is used.
:return: The expire_enabled of this Expirations.
:rtype: str
"""
return self._expire_enabled
@expire_enabled.setter
def expire_enabled(self, expire_enabled):
"""
Sets the expire_enabled of this Expirations.
When set to **true**, the envelope expires (is no longer available for signing) in the set number of days. If false, the account default setting is used. If the account does not have an expiration setting, the DocuSign default value of 120 days is used.
:param expire_enabled: The expire_enabled of this Expirations.
:type: str
"""
self._expire_enabled = expire_enabled
@property
def expire_warn(self):
"""
Gets the expire_warn of this Expirations.
An integer that sets the number of days before envelope expiration that an expiration warning email is sent to the recipient. If set to 0 (zero), no warning email is sent.
:return: The expire_warn of this Expirations.
:rtype: str
"""
return self._expire_warn
@expire_warn.setter
def expire_warn(self, expire_warn):
"""
Sets the expire_warn of this Expirations.
An integer that sets the number of days before envelope expiration that an expiration warning email is sent to the recipient. If set to 0 (zero), no warning email is sent.
:param expire_warn: The expire_warn of this Expirations.
:type: str
"""
self._expire_warn = expire_warn
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 31.856287
| 261
| 0.60094
|
0a5cbf9816675b49ba96e087441cd1862f1979b6
| 10,897
|
py
|
Python
|
src/lib/models/networks/resnet_fpn_dcn_align.py
|
Hzx66666/FairMOT_PS
|
c0b2ef18cd712ebd2512ce73672667a72a9d4f04
|
[
"MIT"
] | null | null | null |
src/lib/models/networks/resnet_fpn_dcn_align.py
|
Hzx66666/FairMOT_PS
|
c0b2ef18cd712ebd2512ce73672667a72a9d4f04
|
[
"MIT"
] | null | null | null |
src/lib/models/networks/resnet_fpn_dcn_align.py
|
Hzx66666/FairMOT_PS
|
c0b2ef18cd712ebd2512ce73672667a72a9d4f04
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Dequan Wang and Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import torch
import torch.nn as nn
from dcn_v2 import DCN
import torch.utils.model_zoo as model_zoo
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class PoseResNet(nn.Module):
def __init__(self, block, layers, heads, head_conv):
self.inplanes = 64
self.heads = heads
self.deconv_with_bias = False
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layer1 = self._make_deconv_layer(256, 4)
self.deconv_layer2 = self._make_deconv_layer(128, 4)
self.deconv_layer3 = self._make_deconv_layer(64, 4)
self.smooth_layer1 = DeformConv(256, 256)
self.smooth_layer2 = DeformConv(128, 128)
self.smooth_layer3 = DeformConv(64, 64)
self.project_layer1 = DeformConv(256 * block.expansion, 256)
self.project_layer2 = DeformConv(128 * block.expansion, 128)
self.project_layer3 = DeformConv(64 * block.expansion, 64)
for head in self.heads:
classes = self.heads[head]
if head_conv > 0 and head != 'id':
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(64, classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_filters, num_kernels):
layers = []
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels)
planes = num_filters
# fc = DCN(self.inplanes, planes,
# kernel_size=(3, 3), stride=1,
# padding=1, dilation=1, deformable_groups=1)
# fc = nn.Conv2d(self.inplanes, planes,
# kernel_size=3, stride=1,
# padding=1, dilation=1, bias=False)
# fill_fc_weights(fc)
up = nn.ConvTranspose2d(
in_channels=self.inplanes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias)
fill_up_weights(up)
# layers.append(fc)
#layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
# layers.append(nn.ReLU(inplace=True))
layers.append(up)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
c1 = self.layer1(x)
c2 = self.layer2(c1)
c3 = self.layer3(c2)
c4 = self.layer4(c3)
p4 = c4
p_3 = self.deconv_layer1(p4) + self.project_layer1(c3)
p3 = self.smooth_layer1(p_3)
p_2 = self.deconv_layer2(p_3) + self.project_layer2(c2)
p2 = self.smooth_layer2(p_2)
p_1 = self.deconv_layer3(p_2) + self.project_layer3(c1)
p1 = self.smooth_layer3(p_1)
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(p1)
return [ret]
def init_weights(self, num_layers):
if 1:
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
print('=> init deconv weights from normal distribution')
class DeformConv(nn.Module):
def __init__(self, chi, cho):
super(DeformConv, self).__init__()
'''
self.actf = nn.Sequential(
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
'''
self.conv = DCN(chi, cho, kernel_size=(3, 3), stride=1,
padding=1, dilation=1, deformable_groups=1)
'''
for name, m in self.actf.named_modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
'''
def forward(self, x):
x = self.conv(x)
#x = self.actf(x)
return x
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_pose_net(num_layers, heads, head_conv=256):
block_class, layers = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, heads, head_conv=head_conv)
model.init_weights(num_layers)
return model
| 33.632716
| 80
| 0.566211
|
175453c38f3860281429655c02e4ba6500627666
| 24,718
|
py
|
Python
|
models/swin_transformer.py
|
alisure-fork/Transformer-SSL
|
962b367197cb1ea0a5242d22832b455612d958b0
|
[
"MIT"
] | 426
|
2021-05-10T19:24:30.000Z
|
2022-03-27T10:19:07.000Z
|
models/swin_transformer.py
|
alisure-fork/Transformer-SSL
|
962b367197cb1ea0a5242d22832b455612d958b0
|
[
"MIT"
] | 12
|
2021-05-13T10:47:23.000Z
|
2022-02-21T10:19:09.000Z
|
models/swin_transformer.py
|
alisure-fork/Transformer-SSL
|
962b367197cb1ea0a5242d22832b455612d958b0
|
[
"MIT"
] | 52
|
2021-05-11T05:14:29.000Z
|
2022-03-27T10:18:45.000Z
|
# --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# Modified by Zhenda Xie
# --------------------------------------------------------
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def extra_repr(self) -> str:
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
def flops(self, N):
# calculate flops for 1 window with token length of N
flops = 0
# qkv = self.qkv(x)
flops += N * self.dim * 3 * self.dim
# attn = (q @ k.transpose(-2, -1))
flops += self.num_heads * N * (self.dim // self.num_heads) * N
# x = (attn @ v)
flops += self.num_heads * N * N * (self.dim // self.num_heads)
# x = self.proj(x)
flops += N * self.dim * self.dim
return flops
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm, norm_before_mlp='ln'):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
self.norm_before_mlp = norm_before_mlp
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
if self.norm_before_mlp == 'ln':
self.norm2 = nn.LayerNorm(dim)
elif self.norm_before_mlp == 'bn':
self.norm2 = lambda x: nn.BatchNorm1d(dim)(x.transpose(1, 2)).transpose(1, 2)
else:
raise NotImplementedError
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
def flops(self):
flops = 0
H, W = self.input_resolution
# norm1
flops += self.dim * H * W
# W-MSA/SW-MSA
nW = H * W / self.window_size / self.window_size
flops += nW * self.attn.flops(self.window_size * self.window_size)
# mlp
flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio
# norm2
flops += self.dim * H * W
return flops
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self) -> str:
return f"input_resolution={self.input_resolution}, dim={self.dim}"
def flops(self):
H, W = self.input_resolution
flops = H * W * self.dim
flops += (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim
return flops
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
norm_before_mlp='ln'):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer, norm_before_mlp=norm_before_mlp)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x)
if self.downsample is not None:
x = self.downsample(x)
return x
def extra_repr(self) -> str:
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
def flops(self):
flops = 0
for blk in self.blocks:
flops += blk.flops()
if self.downsample is not None:
flops += self.downsample.flops()
return flops
class PatchEmbed(nn.Module):
r""" Image to Patch Embedding
Args:
img_size (int): Image size. Default: 224.
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
self.img_size = img_size
self.patch_size = patch_size
self.patches_resolution = patches_resolution
self.num_patches = patches_resolution[0] * patches_resolution[1]
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
if self.norm is not None:
x = self.norm(x)
return x
def flops(self):
Ho, Wo = self.patches_resolution
flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
if self.norm is not None:
flops += Ho * Wo * self.embed_dim
return flops
class SwinTransformer(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1000,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
use_checkpoint=False, norm_before_mlp='ln', **kwargs):
super().__init__()
self.num_classes = num_classes
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = int(embed_dim * 2 ** (self.num_layers - 1))
self.mlp_ratio = mlp_ratio
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
norm_layer=norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint,
norm_before_mlp=norm_before_mlp)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x):
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x)
x = self.norm(x) # B L C
x = self.avgpool(x.transpose(1, 2)) # B C 1
x = torch.flatten(x, 1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def flops(self):
flops = 0
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += self.num_features * self.patches_resolution[0] * self.patches_resolution[1] // (2 ** self.num_layers)
flops += self.num_features * self.num_classes
return flops
| 41.542857
| 119
| 0.595801
|
a17908b1a4c6f5426b97165ae2fdaa07408c4328
| 6,000
|
py
|
Python
|
pycorrector/seq2seq/infer.py
|
ParikhKadam/pycorrector
|
771115538256126711888a48f8b359757500d345
|
[
"Apache-2.0"
] | 3,153
|
2018-04-11T10:36:24.000Z
|
2022-03-31T10:06:31.000Z
|
pycorrector/seq2seq/infer.py
|
ParikhKadam/pycorrector
|
771115538256126711888a48f8b359757500d345
|
[
"Apache-2.0"
] | 251
|
2018-04-12T08:44:07.000Z
|
2022-03-30T08:50:32.000Z
|
pycorrector/seq2seq/infer.py
|
ParikhKadam/pycorrector
|
771115538256126711888a48f8b359757500d345
|
[
"Apache-2.0"
] | 784
|
2018-04-19T06:48:39.000Z
|
2022-03-31T07:10:18.000Z
|
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
import os
import sys
import numpy as np
import torch
sys.path.append('../..')
from pycorrector.seq2seq import config
from pycorrector.seq2seq.data_reader import SOS_TOKEN, EOS_TOKEN
from pycorrector.seq2seq.data_reader import load_word_dict
from pycorrector.seq2seq.seq2seq import Seq2Seq
from pycorrector.seq2seq.convseq2seq import ConvSeq2Seq
from pycorrector.seq2seq.data_reader import PAD_TOKEN
from pycorrector.seq2seq.seq2seq_model import Seq2SeqModel
from pycorrector.utils.logger import logger
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Inference(object):
def __init__(self, arch, model_dir, src_vocab_path=None, trg_vocab_path=None,
embed_size=50, hidden_size=50, dropout=0.5, max_length=128):
logger.debug("device: {}".format(device))
if arch in ['seq2seq', 'convseq2seq']:
self.src_2_ids = load_word_dict(src_vocab_path)
self.trg_2_ids = load_word_dict(trg_vocab_path)
self.id_2_trgs = {v: k for k, v in self.trg_2_ids.items()}
if arch == 'seq2seq':
logger.debug('use seq2seq model.')
self.model = Seq2Seq(encoder_vocab_size=len(self.src_2_ids),
decoder_vocab_size=len(self.trg_2_ids),
embed_size=embed_size,
enc_hidden_size=hidden_size,
dec_hidden_size=hidden_size,
dropout=dropout).to(device)
model_path = os.path.join(model_dir, 'seq2seq.pth')
self.model.load_state_dict(torch.load(model_path))
self.model.eval()
else:
logger.debug('use convseq2seq model.')
trg_pad_idx = self.trg_2_ids[PAD_TOKEN]
self.model = ConvSeq2Seq(encoder_vocab_size=len(self.src_2_ids),
decoder_vocab_size=len(self.trg_2_ids),
embed_size=embed_size,
enc_hidden_size=hidden_size,
dec_hidden_size=hidden_size,
dropout=dropout,
trg_pad_idx=trg_pad_idx,
device=device,
max_length=max_length).to(device)
model_path = os.path.join(model_dir, 'convseq2seq.pth')
self.model.load_state_dict(torch.load(model_path))
self.model.eval()
elif arch == 'bertseq2seq':
# Bert Seq2seq model
logger.debug('use bert seq2seq model.')
use_cuda = True if torch.cuda.is_available() else False
# encoder_type=None, encoder_name=None, decoder_name=None
self.model = Seq2SeqModel("bert", "{}/encoder".format(model_dir),
"{}/decoder".format(model_dir), use_cuda=use_cuda)
else:
logger.error('error arch: {}'.format(arch))
raise ValueError("Model arch choose error. Must use one of seq2seq model.")
self.arch = arch
self.max_length = max_length
def predict(self, sentence_list):
result = []
if self.arch in ['seq2seq', 'convseq2seq']:
for query in sentence_list:
out = []
tokens = [token.lower() for token in query]
tokens = [SOS_TOKEN] + tokens + [EOS_TOKEN]
src_ids = [self.src_2_ids[i] for i in tokens if i in self.src_2_ids]
sos_idx = self.trg_2_ids[SOS_TOKEN]
if self.arch == 'seq2seq':
src_tensor = torch.from_numpy(np.array(src_ids).reshape(1, -1)).long().to(device)
src_tensor_len = torch.from_numpy(np.array([len(src_ids)])).long().to(device)
sos_tensor = torch.Tensor([[self.trg_2_ids[SOS_TOKEN]]]).long().to(device)
translation, attn = self.model.translate(src_tensor, src_tensor_len, sos_tensor, self.max_length)
translation = [self.id_2_trgs[i] for i in translation.data.cpu().numpy().reshape(-1) if
i in self.id_2_trgs]
else:
src_tensor = torch.from_numpy(np.array(src_ids).reshape(1, -1)).long().to(device)
translation, attn = self.model.translate(src_tensor, sos_idx)
translation = [self.id_2_trgs[i] for i in translation if i in self.id_2_trgs]
for word in translation:
if word != EOS_TOKEN:
out.append(word)
else:
break
result.append(''.join(out))
elif self.arch == 'bertseq2seq':
corrected_sents = self.model.predict(sentence_list)
result = [i.replace(' ', '') for i in corrected_sents]
else:
raise ValueError('error arch.')
return result
if __name__ == "__main__":
m = Inference(config.arch,
config.model_dir,
config.src_vocab_path,
config.trg_vocab_path,
embed_size=config.embed_size,
hidden_size=config.hidden_size,
dropout=config.dropout,
max_length=config.max_length
)
inputs = [
'老是较书。',
'感谢等五分以后,碰到一位很棒的奴生跟我可聊。',
'遇到一位很棒的奴生跟我聊天。',
'遇到一位很美的女生跟我疗天。',
'他们只能有两个选择:接受降新或自动离职。',
'王天华开心得一直说话。'
]
outputs = m.predict(inputs)
for a, b in zip(inputs, outputs):
print('input :', a)
print('predict:', b)
print()
# result:
# input:由我起开始做。
# output:我开始做。
# input:没有解决这个问题,
# output:没有解决的问题,
# input:由我起开始做。
| 43.478261
| 117
| 0.5515
|
2f1e934b8e60286169e18fd9d8a48e332ee21f62
| 3,858
|
py
|
Python
|
socib_cms/cmsutils/migrations/0001_initial.py
|
socib/django-socib-cms
|
e5f5f137cfa4e156956c9757a7c6979e6bee51ce
|
[
"MIT"
] | 3
|
2016-11-25T13:35:26.000Z
|
2018-04-27T04:25:39.000Z
|
socib_cms/cmsutils/migrations/0001_initial.py
|
socib/django-socib-cms
|
e5f5f137cfa4e156956c9757a7c6979e6bee51ce
|
[
"MIT"
] | null | null | null |
socib_cms/cmsutils/migrations/0001_initial.py
|
socib/django-socib-cms
|
e5f5f137cfa4e156956c9757a7c6979e6bee51ce
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
import filer.fields.image
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('filer', '0006_auto_20160623_1627'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='GenericLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('object_id', models.PositiveIntegerField()),
('order', models.IntegerField(default=100, verbose_name='order')),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
],
options={
'ordering': ('order',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='LinkSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('code', models.CharField(max_length=20, verbose_name='code')),
('description', models.CharField(max_length=200, null=True, verbose_name='description', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='URLLink',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('url', models.URLField(verbose_name='URL')),
('title', models.CharField(max_length=200, verbose_name='title')),
('title_en', models.CharField(max_length=200, null=True, verbose_name='title')),
('title_ca', models.CharField(max_length=200, null=True, verbose_name='title')),
('title_es', models.CharField(max_length=200, null=True, verbose_name='title')),
('description', models.CharField(max_length=900, null=True, verbose_name='description', blank=True)),
('description_en', models.CharField(max_length=900, null=True, verbose_name='description', blank=True)),
('description_ca', models.CharField(max_length=900, null=True, verbose_name='description', blank=True)),
('description_es', models.CharField(max_length=900, null=True, verbose_name='description', blank=True)),
('css_class', models.CharField(max_length=50, null=True, verbose_name='CSS class', blank=True)),
('created_on', models.DateTimeField(auto_now_add=True, verbose_name='date added')),
('updated_on', models.DateTimeField(auto_now=True, verbose_name='date modified')),
('created_by', models.ForeignKey(related_name='created-urllink', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='created by')),
('picture', filer.fields.image.FilerImageField(on_delete=django.db.models.deletion.SET_NULL, verbose_name='picture', blank=True, to='filer.Image', null=True)),
('updated_by', models.ForeignKey(related_name='updated-urllink', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='update by')),
],
options={
'verbose_name': 'URL link',
'verbose_name_plural': 'URL links',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='genericlink',
name='linkset',
field=models.ForeignKey(to='cmsutils.LinkSet'),
preserve_default=True,
),
]
| 50.763158
| 177
| 0.606013
|
2fd784b4dd56a2be3031c8c4999f3cb2fbeed481
| 11,777
|
py
|
Python
|
model/faster_rcnn.py
|
txytju/Faster-RCNN-LocNet
|
74a2dc4d70f1236d0aec71914af8fa922f428e4f
|
[
"MIT"
] | 29
|
2018-07-02T08:03:02.000Z
|
2022-02-27T14:06:44.000Z
|
model/faster_rcnn.py
|
txytju/Faster-RCNN-LocNet
|
74a2dc4d70f1236d0aec71914af8fa922f428e4f
|
[
"MIT"
] | 1
|
2018-08-01T12:53:15.000Z
|
2018-08-01T12:53:15.000Z
|
model/faster_rcnn.py
|
txytju/Faster-RCNN-LocNet
|
74a2dc4d70f1236d0aec71914af8fa922f428e4f
|
[
"MIT"
] | 8
|
2018-07-03T08:29:22.000Z
|
2022-02-27T14:06:45.000Z
|
from __future__ import division
import torch as t
import numpy as np
import cupy as cp
from utils import array_tool as at
from model.utils.bbox_tools import loc2bbox, p2bbox
from model.utils.nms import non_maximum_suppression
from torch import nn
from data.dataset import preprocess
from torch.nn import functional as F
from utils.config import opt
class FasterRCNN(nn.Module):
"""Base class for Faster R-CNN.
This is a base class for Faster R-CNN links supporting object detection
API [#]_. The following three stages constitute Faster R-CNN.
1. **Feature extraction**: Images are taken and their \
feature maps are calculated.
2. **Region Proposal Networks**: Given the feature maps calculated in \
the previous stage, produce set of RoIs around objects.
3. **Localization and Classification Heads**: Using feature maps that \
belong to the proposed RoIs, classify the categories of the objects \
in the RoIs and improve localizations.
Each stage is carried out by one of the callable
:class:`torch.nn.Module` objects :obj:`feature`, :obj:`rpn` and :obj:`head`.
There are two functions :meth:`predict` and :meth:`__call__` to conduct
object detection.
:meth:`predict` takes images and returns bounding boxes that are converted
to image coordinates. This will be useful for a scenario when
Faster R-CNN is treated as a black box function, for instance.
:meth:`__call__` is provided for a scnerario when intermediate outputs
are needed, for instance, for training and debugging.
Links that support obejct detection API have method :meth:`predict` with
the same interface. Please refer to :meth:`predict` for
further details.
.. [#] Shaoqing Ren, Kaiming He, Ross Girshick, Jian Sun. \
Faster R-CNN: Towards Real-Time Object Detection with \
Region Proposal Networks. NIPS 2015.
Args:
extractor (nn.Module): A module that takes a BCHW image
array and returns feature maps.
rpn (nn.Module): A module that has the same interface as
:class:`model.region_proposal_network.RegionProposalNetwork`.
Please refer to the documentation found there.
head (nn.Module): A module that takes
a BCHW variable, RoIs and batch indices for RoIs. This returns class
dependent localization paramters and class scores.
loc_normalize_mean (tuple of four floats): Mean values of
localization estimates.
loc_normalize_std (tupler of four floats): Standard deviation
of localization estimates.
"""
def __init__(self, extractor, rpn, head,
loc_normalize_mean = (0., 0., 0., 0.),
loc_normalize_std = (0.1, 0.1, 0.2, 0.2)
):
super(FasterRCNN, self).__init__()
self.extractor = extractor
self.rpn = rpn
self.head = head
# mean and std
self.loc_normalize_mean = loc_normalize_mean
self.loc_normalize_std = loc_normalize_std
self.use_preset('evaluate')
@property
def n_class(self):
# Total number of classes including the background.
return self.head.n_class
def forward(self, x, scale=1.):
"""Forward Faster R-CNN.
Scaling paramter :obj:`scale` is used by RPN to determine the
threshold to select small objects, which are going to be
rejected irrespective of their confidence scores.
Here are notations used.
* :math:`N` is the number of batch size
* :math:`R'` is the total number of RoIs produced across batches. \
Given :math:`R_i` proposed RoIs from the :math:`i` th image, \
:math:`R' = \\sum _{i=1} ^ N R_i`.
* :math:`L` is the number of classes excluding the background.
Classes are ordered by the background, the first class, ..., and
the :math:`L` th class.
Args:
x (autograd.Variable): 4D image variable.
scale (float): Amount of scaling applied to the raw image
during preprocessing.
Returns:
Variable, Variable, array, array:
Returns tuple of four values listed below.
* **roi_cls_locs**: Offsets and scalings for the proposed RoIs. \
Its shape is :math:`(R', (L + 1) \\times 4)`.
* **roi_scores**: Class predictions for the proposed RoIs. \
Its shape is :math:`(R', L + 1)`.
* **rois**: RoIs proposed by RPN. Its shape is \
:math:`(R', 4)`.
* **roi_indices**: Batch indices of RoIs. Its shape is \
:math:`(R',)`.
"""
img_size = x.shape[2:]
h = self.extractor(x)
rpn_locs, rpn_scores, rois, search_regions, roi_indices, anchor = self.rpn(h, img_size, scale)
(px, py), roi_scores = self.head(h, rois, search_regions, roi_indices)
return (px, py), roi_scores, rois, search_regions, roi_indices
def use_preset(self, preset):
"""Use the given preset during prediction.
This method changes values of :obj:`self.nms_thresh` and
:obj:`self.score_thresh`. These values are a threshold value
used for non maximum suppression and a threshold value
to discard low confidence proposals in :meth:`predict`,
respectively.
If the attributes need to be changed to something
other than the values provided in the presets, please modify
them by directly accessing the public attributes.
Args:
preset ({'visualize', 'evaluate'): A string to determine the
preset to use.
"""
if preset == 'visualize':
self.nms_thresh = 0.3
self.score_thresh = 0.7
elif preset == 'evaluate':
self.nms_thresh = 0.3
self.score_thresh = 0.05
else:
raise ValueError('preset must be visualize or evaluate')
def _suppress(self, raw_cls_bbox, raw_prob):
bbox = list()
label = list()
score = list()
# skip cls_id = 0 because it is the background class
for l in range(1, self.n_class):
cls_bbox_l = raw_cls_bbox
prob_l = raw_prob[:, l]
mask = prob_l > self.score_thresh
cls_bbox_l = cls_bbox_l[mask]
prob_l = prob_l[mask]
keep = non_maximum_suppression(
cp.array(cls_bbox_l), self.nms_thresh, prob_l)
keep = cp.asnumpy(keep)
bbox.append(cls_bbox_l[keep])
# The labels are in [0, self.n_class - 2].
label.append((l - 1) * np.ones((len(keep),)))
score.append(prob_l[keep])
bbox = np.concatenate(bbox, axis=0).astype(np.float32)
label = np.concatenate(label, axis=0).astype(np.int32)
score = np.concatenate(score, axis=0).astype(np.float32)
return bbox, label, score
def predict(self, imgs, sizes=None, visualize=False, prob_thre=0.7):
"""Detect objects from images.
This method predicts objects for each image.
Args:
imgs (iterable of numpy.ndarray): Arrays holding images.
All images are in CHW and RGB format
and the range of their value is :math:`[0, 255]`.
Returns:
tuple of lists:
This method returns a tuple of three lists,
:obj:`(bboxes, labels, scores)`.
* **bboxes**: A list of float arrays of shape :math:`(R, 4)`, \
where :math:`R` is the number of bounding boxes in a image. \
Each bouding box is organized by \
:math:`(y_{min}, x_{min}, y_{max}, x_{max})` \
in the second axis.
* **labels** : A list of integer arrays of shape :math:`(R,)`. \
Each value indicates the class of the bounding box. \
Values are in range :math:`[0, L - 1]`, where :math:`L` is the \
number of the foreground classes.
* **scores** : A list of float arrays of shape :math:`(R,)`. \
Each value indicates how confident the prediction is.
"""
self.eval()
# sizes changes when visualize is set to different values
if visualize:
self.use_preset('visualize')
prepared_imgs = list()
sizes = list()
for img in imgs:
size = img.shape[1:] # reshaped image size
img = preprocess(at.tonumpy(img))
prepared_imgs.append(img)
sizes.append(size)
else:
prepared_imgs = imgs
bboxes = list()
labels = list()
scores = list()
for img, size in zip(prepared_imgs, sizes):
img = t.autograd.Variable(at.totensor(img).float()[None], volatile=True)
# judge and change type if necessary
if t.is_tensor(size[1]) :
size[1] = int(size[1])
if t.is_tensor(img.shape[3]):
img.shape[3] = int(img.shape[3])
scale = img.shape[3] / size[1]
(px, py), roi_scores, rois, search_regions, _ = self(img, scale=scale)
# We are assuming that batch size is 1.
roi_score = roi_scores.data
px = px.data
py = py.data
roi = at.totensor(rois) / scale
search_regions = at.totensor(search_regions) / scale
# Convert to numpy array
px = at.tonumpy(px)
py = at.tonumpy(py)
search_regions = at.tonumpy(search_regions)
# Convert predictions to bounding boxes in image coordinates.
# Bounding boxes are scaled to the scale of the input images.
# use px, py and search_regions to generate boxes
cls_bbox = p2bbox(px, py, search_regions, threshold=prob_thre)
cls_bbox = at.totensor(cls_bbox)
# clip bounding box
cls_bbox[:, 0::2] = (cls_bbox[:, 0::2]).clamp(min=0, max=size[0])
cls_bbox[:, 1::2] = (cls_bbox[:, 1::2]).clamp(min=0, max=size[1])
prob = at.tonumpy(F.softmax(at.tovariable(roi_score), dim=1))
raw_cls_bbox = at.tonumpy(cls_bbox)
raw_prob = at.tonumpy(prob)
# print("raw_cls_bbox shape : ", raw_cls_bbox.shape)
# print("raw_prob : ", raw_prob)
bbox, label, score = self._suppress(raw_cls_bbox, raw_prob)
bboxes.append(bbox)
labels.append(label)
scores.append(score)
self.use_preset('evaluate')
self.train()
return bboxes, labels, scores
def get_optimizer(self):
"""
return optimizer, It could be overwriten if you want to specify
special optimizer
"""
lr = opt.lr
params = []
# different learning rate for different parameters
for key, value in dict(self.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params': [value], 'lr': lr * 2, 'weight_decay': 0}]
else:
params += [{'params': [value], 'lr': lr, 'weight_decay': opt.weight_decay}]
if opt.use_adam:
self.optimizer = t.optim.Adam(params)
else:
self.optimizer = t.optim.SGD(params, momentum=0.9)
return self.optimizer
def scale_lr(self, decay=0.1):
for param_group in self.optimizer.param_groups:
param_group['lr'] *= decay
return self.optimizer
| 37.15142
| 102
| 0.58818
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.