hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
85720a88db5024722bcc1458b131d3934654bee2
| 13,346
|
py
|
Python
|
tftest.py
|
spookiej/terraform-python-testing-helper
|
bd74b0b4be6e3e25a10ceff475adf4dc727b2e9d
|
[
"Apache-2.0"
] | null | null | null |
tftest.py
|
spookiej/terraform-python-testing-helper
|
bd74b0b4be6e3e25a10ceff475adf4dc727b2e9d
|
[
"Apache-2.0"
] | null | null | null |
tftest.py
|
spookiej/terraform-python-testing-helper
|
bd74b0b4be6e3e25a10ceff475adf4dc727b2e9d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple Python wrapper for Terraform test fixtures.
See documentation in the TerraformTest class for usage. Terraform wrapping
inspired by https://github.com/beelit94/python-terraform
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
import itertools
import json
import logging
import os
import shutil
import subprocess
import tempfile
import weakref
__version__ = '1.4.1'
_LOGGER = logging.getLogger('tftest')
TerraformCommandOutput = collections.namedtuple(
'TerraformCommandOutput', 'retcode out err')
TerraformStateResource = collections.namedtuple(
'TerraformStateResource', 'key provider type attributes depends_on raw')
class TerraformTestError(Exception):
pass
def parse_args(init_vars=None, tf_vars=None, **kw):
"""Convert method arguments for use in Terraform commands.
Args:
init_vars: dict of key/values converted to -backend-config='k=v' form, or
string argument converted to -backend-config=arg
tf_vars: dict of key/values converted to -var k=v form.
**kw: converted to the appropriate Terraform flag.
Returns:
A list of command arguments for use with subprocess.
"""
cmd_args = []
if kw.get('auto_approve'):
cmd_args.append('-auto-approve')
if kw.get('backend') is False:
cmd_args.append('-backend=false')
if kw.get('color') is False:
cmd_args.append('-no-color')
if kw.get('force_copy'):
cmd_args.append('-force-copy')
if kw.get('input') is False:
cmd_args.append('-input=false')
if kw.get('json_format') is True:
cmd_args.append('-json')
if kw.get('lock') is False:
cmd_args.append('-lock=false')
if kw.get('plugin_dir'):
cmd_args += ['-plugin-dir', kw['plugin_dir']]
if kw.get('refresh') is False:
cmd_args.append('-refresh=false')
if isinstance(init_vars, dict):
cmd_args += ['-backend-config=\'{}={}\''.format(k, v)
for k, v in init_vars.items()]
elif isinstance(init_vars, str):
cmd_args += ['-backend-config', '{}'.format(init_vars)]
if tf_vars:
cmd_args += list(itertools.chain.from_iterable(
("-var", "{}={}".format(k, v)) for k, v in tf_vars.items()
))
return cmd_args
class TerraformJSONBase(object):
"Base class for JSON wrappers."
def __init__(self, raw):
self._raw = raw
def __bytes__(self):
return bytes(self._raw)
def __len__(self):
return len(self._raw)
def __str__(self):
return str(self._raw)
class TerraformValueDict(TerraformJSONBase):
"Minimal wrapper to directly expose outputs or variables."
def __init__(self, raw):
super(TerraformValueDict, self).__init__(raw)
# only matters for outputs
self.sensitive = tuple(k for k, v in raw.items() if v.get('sensitive'))
def __getattr__(self, name):
return getattr(self._raw, name)
def __getitem__(self, name):
return self._raw[name].get('value')
def __contains__(self, name):
return name in self._raw
def __iter__(self):
return iter(self._raw)
class TerraformPlanModule(TerraformJSONBase):
"Minimal wrapper for parsed plan output modules."
def __init__(self, raw):
super(TerraformPlanModule, self).__init__(raw)
prefix = raw.get('address', '')
self._strip = 0 if not prefix else len(prefix) + 1
self._modules = self._resources = None
@property
def child_modules(self):
if self._modules is None:
self._modules = dict((mod['address'][self._strip:], TerraformPlanModule(
mod)) for mod in self._raw.get('child_modules'))
return self._modules
@property
def resources(self):
if self._resources is None:
self._resources = dict((res['address'][self._strip:], res)
for res in self._raw.get('resources', []))
return self._resources
def __getitem__(self, name):
return self._raw[name]
def __contains__(self, name):
return name in self._raw
class TerraformPlanOutput(TerraformJSONBase):
"Minimal wrapper for Terraform plan JSON output."
def __init__(self, raw):
super(TerraformPlanOutput, self).__init__(raw)
planned_values = raw.get('planned_values', {})
self.root_module = TerraformPlanModule(
planned_values.get('root_module', {}))
self.outputs = TerraformValueDict(planned_values.get('outputs', {}))
self.resource_changes = dict((v['address'], v)
for v in self._raw['resource_changes'])
self.variables = TerraformValueDict(raw['variables'])
@property
def resources(self):
return self.root_module.resources
@property
def modules(self):
return self.root_module.child_modules
def __getattr__(self, name):
return self._raw[name]
class TerraformState(TerraformJSONBase):
"Minimal wrapper for Terraform state JSON format."
def __init__(self, raw):
super(TerraformState, self).__init__(raw)
self.outputs = TerraformValueDict(raw.get('outputs', {}))
self._resources = None
@property
def resources(self):
if not self._resources:
resources = {}
for res in self._raw['resources']:
name = '%s.%s.%s' % (
res.get('module'), res.get('type'), res.get('name'))
resources[name] = res
self._resources = resources
return self._resources
def __getattr__(self, name):
return self._raw[name]
class TerraformTest(object):
"""Helper class for use in testing Terraform modules.
This helper class can be used to set up fixtures in Terraform tests, so that
the usual Terraform commands (init, plan, apply, output, destroy) can be run
on a module. Configuration is done at instantiation first, by passing in the
Terraform root module path, and the in the setup method through files that
will be temporarily linked in the module, and Terraform variables.
The standard way of using this is by calling setup to configure the module
through temporarily linked Terraform files and variables, run one or more
Terraform commands, then check command output, state, or created resources
from individual tests.
The local .terraform directory (including local state) and any linked file
are removed when the instance is garbage collected. Destroy needs to be
called explicitly using destroy().
Args:
tfdir: the Terraform module directory to test, either an absolute path, or
relative to basedir.
basedir: optional base directory to use for relative paths, defaults to the
directory above the one this module lives in.
terraform: path to the Terraform command.
"""
def __init__(self, tfdir, basedir=None, terraform='terraform'):
"""Set Terraform folder to operate on, and optional base directory."""
self._basedir = basedir or os.getcwd()
self.terraform = terraform
self.tfdir = self._abspath(tfdir)
@classmethod
def _cleanup(cls, tfdir, filenames, deep=True):
"""Remove linked files and .terraform folder at instance deletion."""
_LOGGER.debug('cleaning up %s %s', tfdir, filenames)
for filename in filenames:
path = os.path.join(tfdir, filename)
if os.path.islink(path):
os.unlink(path)
if not deep:
return
path = os.path.join(tfdir, '.terraform')
if os.path.isdir(path):
shutil.rmtree(path)
path = os.path.join(tfdir, 'terraform.tfstate')
if os.path.isfile(path):
os.unlink(path)
def _abspath(self, path):
"""Make relative path absolute from base dir."""
return path if path.startswith('/') else os.path.join(self._basedir, path)
def setup(self, extra_files=None, plugin_dir=None, init_vars=None,
backend=True, cleanup_on_exit=True):
"""Setup method to use in test fixtures.
This method prepares a new Terraform environment for testing the module
specified at init time, and returns init output.
Args:
extra_files: list of absolute or relative to base paths to be linked in
the root module folder
plugin_dir: path to a plugin directory to be used for Terraform init, eg
built with terraform-bundle
init_vars: Terraform backend configuration variables
backend: Terraform backend argument
cleanup_on_exit: remove .terraform and terraform.tfstate files on exit
Returns:
Terraform init output.
"""
# link extra files inside dir
filenames = []
for link_src in (extra_files or []):
link_src = self._abspath(link_src)
filename = os.path.basename(link_src)
if os.path.isfile(link_src):
link_dst = os.path.join(self.tfdir, filename)
try:
os.symlink(link_src, link_dst)
except FileExistsError as e: # pylint:disable=undefined-variable
_LOGGER.warning(e)
else:
_LOGGER.debug('linked %s', link_src)
filenames.append(filename)
else:
_LOGGER.warning('no such file {}'.format(link_src))
self._finalizer = weakref.finalize(
self, self._cleanup, self.tfdir, filenames, deep=cleanup_on_exit)
return self.init(plugin_dir=plugin_dir, init_vars=init_vars, backend=backend)
def init(self, input=False, color=False, force_copy=False, plugin_dir=None,
init_vars=None, backend=True):
"""Run Terraform init command."""
cmd_args = parse_args(input=input, color=color, backend=backend,
force_copy=force_copy, plugin_dir=plugin_dir,
init_vars=init_vars)
return self.execute_command('init', *cmd_args).out
def plan(self, input=False, color=False, refresh=True, tf_vars=None, output=False):
"Run Terraform plan command, optionally returning parsed plan output."
cmd_args = parse_args(input=input, color=color,
refresh=refresh, tf_vars=tf_vars)
if not output:
return self.execute_command('plan', *cmd_args).out
with tempfile.NamedTemporaryFile() as fp:
cmd_args.append('-out={}'.format(fp.name))
self.execute_command('plan', *cmd_args)
result = self.execute_command('show', '-no-color', '-json', fp.name)
try:
return TerraformPlanOutput(json.loads(result.out))
except json.JSONDecodeError as e:
raise TerraformTestError('Error decoding plan output: {}'.format(e))
def apply(self, input=False, color=False, auto_approve=True, tf_vars=None):
"""Run Terraform apply command."""
cmd_args = parse_args(input=input, color=color,
auto_approve=auto_approve, tf_vars=tf_vars)
return self.execute_command('apply', *cmd_args).out
def output(self, name=None, color=False, json_format=True):
"""Run Terraform output command."""
cmd_args = []
if name:
cmd_args.append(name)
cmd_args += parse_args(color=color, json_format=json_format)
output = self.execute_command('output', *cmd_args).out
_LOGGER.debug('output %s', output)
if json_format:
try:
output = TerraformValueDict(json.loads(output))
except json.JSONDecodeError as e:
_LOGGER.warning('error decoding output: {}'.format(e))
return output
def destroy(self, color=False, auto_approve=True, tf_vars=None):
"""Run Terraform destroy command."""
cmd_args = parse_args(
color=color, auto_approve=auto_approve, tf_vars=tf_vars)
return self.execute_command('destroy', *cmd_args).out
def refresh(self, color=False, lock=False, tf_vars=None):
"""Run Terraform refresh command."""
cmd_args = parse_args(
color=color, lock=lock, tf_vars=tf_vars)
return self.execute_command('refresh', *cmd_args).out
def state_pull(self):
"""Pull state."""
state = self.execute_command('state', 'pull')
try:
state = TerraformState(json.loads(state.out))
except json.JSONDecodeError as e:
_LOGGER.warning('error decoding state: {}'.format(e))
return state
def execute_command(self, cmd, *cmd_args):
"""Run arbitrary Terraform command."""
_LOGGER.debug([cmd, cmd_args])
cmdline = [self.terraform, cmd]
cmdline += cmd_args
try:
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.tfdir, env=os.environ.copy())
except FileNotFoundError as e:
raise TerraformTestError('Terraform executable not found: %s' % e)
out, err = p.communicate()
out = out.decode('utf-8', errors='ignore')
err = err.decode('utf-8', errors='ignore')
retcode = p.returncode
if retcode == 1:
message = 'Error running command {command}: {retcode} {out} {err}'.format(
command=cmd, retcode=retcode, out=out, err=err)
_LOGGER.critical(message)
raise TerraformTestError(message)
return TerraformCommandOutput(retcode, out, err)
| 34.485788
| 89
| 0.690019
|
beb8d7c72c286a206fbc406be755d27ed7d67af5
| 2,954
|
py
|
Python
|
hood/models.py
|
tonyishangu/Hoodwatch
|
b627d1facc46edbd22edb2c22cf864e3310618fc
|
[
"MIT"
] | null | null | null |
hood/models.py
|
tonyishangu/Hoodwatch
|
b627d1facc46edbd22edb2c22cf864e3310618fc
|
[
"MIT"
] | null | null | null |
hood/models.py
|
tonyishangu/Hoodwatch
|
b627d1facc46edbd22edb2c22cf864e3310618fc
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
from datetime import datetime
from cloudinary.models import CloudinaryField
class NeighbourHood(models.Model):
name = models.CharField(max_length=50, null=True, blank=True)
location = models.CharField(max_length=60, null=True, blank=True)
admin = models.ForeignKey("Profile", on_delete=models.CASCADE, related_name='hood', null=True, blank=True)
hood_logo = CloudinaryField('images')
description = models.TextField(null=True, blank=True)
health_tell = models.IntegerField(null=True, blank=True)
police_number = models.IntegerField(null=True, blank=True)
def __str__(self):
return f'{self.name} hood'
def create_neighborhood(self):
self.save()
def delete_neighborhood(self):
self.delete()
@classmethod
def find_neighborhood(cls, neighborhood_id):
return cls.objects.filter(id=neighborhood_id)
class Profile(models.Model):
user = models.OneToOneField(
User, on_delete=models.CASCADE, related_name='profile', null=True, blank=True)
name = models.CharField(max_length=80, blank=True)
bio = models.TextField(max_length=254, blank=True)
profile_picture = CloudinaryField('images')
location = models.CharField(max_length=50, blank=True, null=True)
neighbourhood = models.ForeignKey(
NeighbourHood, on_delete=models.SET_NULL, null=True, related_name='members', blank=True)
def __str__(self):
return f'{self.user.username} profile'
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
class Business(models.Model):
name = models.CharField(max_length=120)
email = models.EmailField(max_length=254)
description = models.TextField(blank=True)
neighbourhood = models.ForeignKey(
NeighbourHood, on_delete=models.CASCADE, related_name='business')
user = models.ForeignKey(
Profile, on_delete=models.CASCADE, related_name='owner')
def __str__(self):
return f'{self.name} Business'
def create_business(self):
self.save()
def delete_business(self):
self.delete()
@classmethod
def search_business(cls, name):
return cls.objects.filter(name__icontains=name).all()
class Post(models.Model):
title = models.CharField(max_length=120, null=True)
post = models.TextField()
date = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(
Profile, on_delete=models.CASCADE, related_name='post_owner')
hood = models.ForeignKey(
NeighbourHood, on_delete=models.CASCADE, related_name='hood_post')
| 34.348837
| 110
| 0.71564
|
e1742a333d7c6e5ca5f491d44b446f23b10a1f18
| 6,166
|
py
|
Python
|
probability/discrete/prob_utils.py
|
vahndi/probability
|
6ddf88e6f3d947c96b879e426030f60eb5cb2d59
|
[
"MIT"
] | 2
|
2020-02-21T00:47:03.000Z
|
2020-09-22T19:00:48.000Z
|
probability/discrete/prob_utils.py
|
vahndi/probability
|
6ddf88e6f3d947c96b879e426030f60eb5cb2d59
|
[
"MIT"
] | 52
|
2020-01-16T16:05:08.000Z
|
2022-02-24T15:10:10.000Z
|
probability/discrete/prob_utils.py
|
vahndi/probability
|
6ddf88e6f3d947c96b879e426030f60eb5cb2d59
|
[
"MIT"
] | null | null | null |
from typing import Any, Tuple, List, Hashable
from pandas import Series, DataFrame
def _filter_distribution(
distribution: DataFrame,
distribution_name: Hashable,
name_comparator: str, value: Any
) -> Tuple[DataFrame, str]:
"""
Filter probability distribution data using the variable name, comparator
code and value.
:param distribution: The probability distribution data to filter.
:param name_comparator: Amalgamation of variable name and filtering
comparator in the form '{name}__{comparator}'.
:param value: Value to filter to.
:return: Filtered Data, Variable Name
"""
var_names = [col for col in distribution if col != distribution_name]
def match_var(code: str) -> bool:
return name_comparator in [f'{var_name}__{code}'
for var_name in var_names]
if name_comparator in var_names:
return distribution.loc[
distribution[name_comparator] == value
], name_comparator
elif match_var('eq'):
return distribution.loc[
distribution[name_comparator[: -4]] == value
], name_comparator[: -4]
elif match_var('ne'):
return distribution.loc[
distribution[name_comparator[: -4]] != value
], name_comparator[: -4]
elif match_var('lt'):
return distribution.loc[
distribution[name_comparator[: -4]] < value
], name_comparator[: -4]
elif match_var('gt'):
return distribution.loc[
distribution[name_comparator[: -4]] > value
], name_comparator[: -4]
elif match_var('le'):
return distribution.loc[
distribution[name_comparator[: -4]] <= value
], name_comparator[: -4]
elif match_var('ge'):
return distribution.loc[
distribution[name_comparator[: -4]] >= value
], name_comparator[: -4]
elif match_var('in'):
return distribution.loc[
distribution[name_comparator[: -4]].isin(value)
], name_comparator[: -4]
elif match_var('not_in'):
return distribution.loc[
~distribution[name_comparator[: -8]].isin(value)
], name_comparator[: -8]
def p(distribution: Series, **joint_vars_vals) -> float:
"""
Calculate the probability of ALL of the values of the joint values given.
:param distribution: Distribution data to calculate probability from.
:param joint_vars_vals: Names and values of variables to find probability of
e.g. `C=1`, `D__le=1`.
"""
dist_name = distribution.name
data = distribution.copy().reset_index()
for joint_var, joint_val in joint_vars_vals.items():
# filter individual probabilities to specified values e.g. P(A,B,C,D=d1)
data, _ = _filter_distribution(
data, dist_name, joint_var, joint_val
)
# calculate probability
return data[dist_name].sum()
def p_or(distribution: Series, **joint_vars_vals) -> float:
"""
Calculate the probability of ANY of the joint values given.
:param distribution: Distribution data to calculate probability from.
:param joint_vars_vals: Names and values of variables to find probability of
e.g. `C=1`, `D__le=1`.
"""
dist_name = distribution.name
data = distribution.copy().reset_index()
or_ix = set()
for joint_var, joint_val in joint_vars_vals.items():
# filter individual probabilities to specified values e.g. P(A,B,C,D=d1)
filtered, _ = _filter_distribution(
data, dist_name, joint_var, joint_val
)
or_ix.update(filtered.index)
# calculate probability
return data.loc[or_ix, dist_name].sum()
def given(distribution: Series, **givens) -> Series:
"""
Condition the distribution on given and/or not-given values of the
variables.
:param distribution: The probability distribution to condition
e.g. P(A,B,C,D).
:param givens: Names and values of variables to condition on a given value
e.g. D=1.
:return: Conditioned distribution. Filtered to only given values of the
cond_values.
Contains a single probability distribution summing to 1.
"""
dist_name = distribution.name
col_names = distribution.index.names
joint_names = ([
n for n in col_names
if n not in givens.keys() # not a given variable name w/o comparator
and not set(
[f'{n}__{code}' for code in _match_codes]
).intersection(givens.keys()) # not a given variable name w/ comparator
])
var_names = joint_names.copy()
data = distribution.copy().reset_index()
for given_var, given_val in givens.items():
# filter individual probabilities to given values e.g. P(A,B,C,D=d1)
data, var_name = _filter_distribution(
data, dist_name, given_var, given_val
)
var_names.append(var_name)
# normalize each individual remaining probability P(Ai,Bj,Ck,d1)
# to the sum of remaining probabilities P(A,B,C,d1)
data[dist_name] = data[dist_name] / data[dist_name].sum()
return data.set_index([
var_name for var_name in var_names
if var_name not in givens.keys()
])[dist_name]
_match_codes: List[str] = ['eq', 'ne', 'lt', 'gt', 'le', 'ge', 'in', 'not_in']
def valid_name_comparator(name_comparator: str, var_names: List[str]) -> bool:
"""
Return whether the given name is a valid conditioning filter name for any of
the variables in var_names.
:param name_comparator: Amalgamation of variable name and filtering
comparator in the form '{name}__{comparator}'.
:param var_names: List of valid variables names to look for in
`name_comparator`.
"""
for var_name in var_names:
if name_comparator == var_name:
return True
for var_name in var_names:
for code in _match_codes:
if name_comparator == var_name + '__' + code:
return True
return False
| 37.369697
| 80
| 0.632663
|
be26fa82e84568a82164ed32e64e1b532d744971
| 2,620
|
py
|
Python
|
Old/liaoxuefeng/urllib_first.py
|
exchris/Pythonlearn
|
174f38a86cf1c85d6fc099005aab3568e7549cd0
|
[
"MIT"
] | null | null | null |
Old/liaoxuefeng/urllib_first.py
|
exchris/Pythonlearn
|
174f38a86cf1c85d6fc099005aab3568e7549cd0
|
[
"MIT"
] | 1
|
2018-11-27T09:58:54.000Z
|
2018-11-27T09:58:54.000Z
|
Old/liaoxuefeng/urllib_first.py
|
exchris/pythonlearn
|
174f38a86cf1c85d6fc099005aab3568e7549cd0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/7/26 0026 下午 3:49
# @Author : Exchris Tsai
# @Site :
# @File : urllib_first.py
# @Software: PyCharm
__author__ = 'Exchris Tsai'
from urllib import request, parse
from collections import deque
import re
"""
url = r"http://www.ifitshow.com"
data1 = request.urlopen(url).read()
a = request.urlopen(url)
type(a)
# <class 'http.client.HTTPResponse'>
a.geturl()
# 'http://www.baidu.com/s?word=Jecvay'
a.info()
# <http.client.HTTPMessage object at 0x03272250>
a.getcode()
# 200
"""
"""
# print(data.decode('utf-8'))
# Python 简单处理URL,抓取百度上面搜索关键词为Jecvay Notes的网页
data = {}
data['word'] = 'Jecvay Notes'
# 将data转换为'word=Jecvay+Notes',
url_values = parse.urlencode(data)
urls = "http://www.baidu.com/s?"
full_url = urls + url_values
# http://www.baidu.com/s?word=Jecvay+Notes
data = request.urlopen(full_url).read()
data = data.decode('utf-8')
# print(data)
"""
"""
# Python 的队列
queue = deque(["Eric", "John", "Michael"])
queue.append(["Terry"]) # Terry入队
queue.append(["Graham"]) # Graham入队
# 输出: "Eric"
# print(queue.popleft()) # 队首元素出队
# 输出 'John'
# print(queue.popleft()) # 队首元素出队
# deque(['Michael', ['Terry'], ['Graham']])
# print(queue) # 队列中剩下的元素
basket = {'apple','orange','apple','pear','orange','banana'}
# print(basket) # 这里演示的是去重功能
# {'orange','banana','pear','apple'}
# print('orange' in basket) # 快速判断元素是否在集合内
# True
# print('crabgrass' in basket)
# False
"""
"""
>>> a = set('abracadabra')
>>> b = set('alacazam')
>>> a
{'a', 'r', 'b', 'c', 'd'}
>>> a - b # 集合a中包含元素
{'r', 'd', 'b'}
>>> a | b # 集合a或b中包含的所有元素
{'a', 'c', 'r', 'd', 'b', 'm', 'z', 'l'}
>>> a & b # 集合a和b中都包含了的元素
{'a', 'c'}
>>> a ^ b # 不同时包含于a和b的元素
{'r', 'd', 'b', 'm', 'z', 'l'}
"""
queqe = deque()
visited = set()
url = 'http://news.dbanotes.net' # 入口页面,可以换成别的
queqe.append(url)
cnt = 0
while queqe:
url = queqe.popleft() # 队首元素出队
visited |= {url} # 标记为已访问
print('已经抓取:' + str(cnt) + ' 正在抓取 < ---' + url)
cnt += 1
# 添加超时跳过功能
urlop = request.urlopen(url, timeout=2)
if 'html' not in urlop.getheader('Content-Type'):
continue
# 避免程序异常中止,用try...catch处理异常
try:
data = urlop.read().decode('utf-8')
except:
continue
# 正则表达式提取页面中所有队列,并判断是否已经访问过,然后加入待爬队列
linkre = re.compile('href=\"(.+?)\"')
for x in linkre.findall(data):
if 'http' in x and x not in visited:
queqe.append(x)
print('加入队列 ---> ' + x)
| 21.129032
| 60
| 0.559542
|
0068cd4aff51171aece1ff4d29c8d0ea63e4a9fa
| 809
|
py
|
Python
|
sphinx/source/docs/user_guide/examples/interaction_callbacks_js_on_change.py
|
IuryPiva/bokeh
|
25ecf0e460f057a179ae6cdfbc99cdac60e916b2
|
[
"BSD-3-Clause"
] | 1
|
2021-04-09T02:57:29.000Z
|
2021-04-09T02:57:29.000Z
|
sphinx/source/docs/user_guide/examples/interaction_callbacks_js_on_change.py
|
IuryPiva/bokeh
|
25ecf0e460f057a179ae6cdfbc99cdac60e916b2
|
[
"BSD-3-Clause"
] | 1
|
2021-03-01T14:04:56.000Z
|
2021-03-01T14:04:56.000Z
|
sphinx/source/docs/user_guide/examples/interaction_callbacks_js_on_change.py
|
IuryPiva/bokeh
|
25ecf0e460f057a179ae6cdfbc99cdac60e916b2
|
[
"BSD-3-Clause"
] | null | null | null |
from bokeh.layouts import column
from bokeh.models import ColumnDataSource, CustomJS, Slider
from bokeh.plotting import Figure, output_file, show
output_file("js_on_change.html")
x = [x*0.005 for x in range(0, 200)]
y = x
source = ColumnDataSource(data=dict(x=x, y=y))
plot = Figure(plot_width=400, plot_height=400)
plot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)
callback = CustomJS(args=dict(source=source), code="""
const data = source.data;
const f = cb_obj.value
const x = data['x']
const y = data['y']
for (let i = 0; i < x.length; i++) {
y[i] = Math.pow(x[i], f)
}
source.change.emit();
""")
slider = Slider(start=0.1, end=4, value=1, step=.1, title="power")
slider.js_on_change('value', callback)
layout = column(slider, plot)
show(layout)
| 25.28125
| 66
| 0.666255
|
73f90f6b9ddff70582a3ec86ce334dd582e9acdd
| 2,367
|
py
|
Python
|
hidt/networks/blocks/specnorm.py
|
constantine7cd/StyleTF
|
e1fa28735e3c3102a2a857c121a6d8a7d76ae4da
|
[
"MIT"
] | 638
|
2020-03-14T20:57:24.000Z
|
2022-03-31T06:19:14.000Z
|
hidt/networks/blocks/specnorm.py
|
constantine7cd/StyleTF
|
e1fa28735e3c3102a2a857c121a6d8a7d76ae4da
|
[
"MIT"
] | 13
|
2020-04-02T12:29:10.000Z
|
2021-12-27T09:27:22.000Z
|
hidt/networks/blocks/specnorm.py
|
constantine7cd/StyleTF
|
e1fa28735e3c3102a2a857c121a6d8a7d76ae4da
|
[
"MIT"
] | 83
|
2020-03-20T21:52:52.000Z
|
2022-03-01T02:57:25.000Z
|
__all__ = ['SpectralNorm']
import torch
from torch import nn
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(nn.Module):
"""
Based on the paper "Spectral Normalization for Generative Adversarial Networks" by Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida
and the Pytorch implementation https://github.com/christiancosgrove/pytorch-spectral-normalization-gan
"""
def __init__(self, module, name='weight', power_iterations=1):
super().__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height, -1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height, -1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = nn.Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = nn.Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = nn.Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
| 34.304348
| 150
| 0.617237
|
26392e2d03a11ff08f11960113bf2c657b1d283c
| 1,656
|
py
|
Python
|
tests/statistical/bayesian/run-bayesian-3.py
|
JonathanLehner/korali
|
90f97d8e2fed2311f988f39cfe014f23ba7dd6cf
|
[
"MIT"
] | 43
|
2018-07-26T07:20:42.000Z
|
2022-03-02T10:23:12.000Z
|
tests/statistical/bayesian/run-bayesian-3.py
|
JonathanLehner/korali
|
90f97d8e2fed2311f988f39cfe014f23ba7dd6cf
|
[
"MIT"
] | 212
|
2018-09-21T10:44:07.000Z
|
2022-03-22T14:33:05.000Z
|
tests/statistical/bayesian/run-bayesian-3.py
|
JonathanLehner/korali
|
90f97d8e2fed2311f988f39cfe014f23ba7dd6cf
|
[
"MIT"
] | 16
|
2018-07-25T15:00:36.000Z
|
2022-03-22T14:19:46.000Z
|
#!/usr/bin/env python3
# In this example, we demonstrate how Korali samples the posterior distribution
# in a bayesian problem where the likelihood is calculated by providing
# reference data points and their objective values.
# Importing the computational model
import sys
sys.path.append('./_model')
from model import *
# Creating new experiment
import korali
e = korali.Experiment()
# Setting up the reference likelihood for the Bayesian Problem
e["Problem"]["Type"] = "Bayesian/Reference"
e["Problem"]["Likelihood Model"] = "Positive StudentT"
e["Problem"]["Reference Data"] = getReferenceData()
e["Problem"]["Computational Model"] = lambda sampleData: model(sampleData, getReferencePoints())
# Configuring TMCMC parameters
e["Solver"]["Type"] = "Sampler/TMCMC"
e["Solver"]["Population Size"] = 500
e["Solver"]["Target Coefficient Of Variation"] = 0.8
e["Solver"]["Covariance Scaling"] = 0.04
# Configuring the problem's random distributions
e["Distributions"][0]["Name"] = "Uniform 0"
e["Distributions"][0]["Type"] = "Univariate/Uniform"
e["Distributions"][0]["Minimum"] = 0.0
e["Distributions"][0]["Maximum"] = +5.0
# Configuring the problem's variables and their prior distributions
e["Variables"][0]["Name"] = "a"
e["Variables"][0]["Prior Distribution"] = "Uniform 0"
e["Variables"][1]["Name"] = "b"
e["Variables"][1]["Prior Distribution"] = "Uniform 0"
e["Variables"][2]["Name"] = "[Sigma]"
e["Variables"][2]["Prior Distribution"] = "Uniform 0"
# Configuring output settings
e["File Output"]["Enabled"] = False
# Starting Korali's Engine and running experiment
e["Console Output"]["Verbosity"] = "Detailed"
k = korali.Engine()
k.run(e)
| 32.470588
| 96
| 0.713768
|
b5ae092ab63cdc3f84ecb87301c51135f4ac0452
| 29,032
|
py
|
Python
|
places/migrations/0054_auto__add_field_tag_order.py
|
evrenesat/ganihomes
|
eece2d8d957989b176cc5a36d723f676862f8d17
|
[
"BSD-2-Clause"
] | 24
|
2016-08-06T18:10:54.000Z
|
2022-03-04T11:47:39.000Z
|
places/migrations/0054_auto__add_field_tag_order.py
|
evrenesat/ganihomes
|
eece2d8d957989b176cc5a36d723f676862f8d17
|
[
"BSD-2-Clause"
] | 1
|
2017-03-28T02:36:50.000Z
|
2017-03-28T07:18:57.000Z
|
places/migrations/0054_auto__add_field_tag_order.py
|
evrenesat/ganihomes
|
eece2d8d957989b176cc5a36d723f676862f8d17
|
[
"BSD-2-Clause"
] | 13
|
2017-03-28T02:35:32.000Z
|
2022-02-21T23:36:15.000Z
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Tag.order'
db.add_column('places_tag', 'order', self.gf('django.db.models.fields.SmallIntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'Tag.order'
db.delete_column('places_tag', 'order')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'places.booking': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'Booking'},
'end': ('django.db.models.fields.DateField', [], {}),
'guest': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'guest'", 'to': "orm['auth.User']"}),
'guest_payment': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'host'", 'to': "orm['auth.User']"}),
'host_earning': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_type': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Place']"}),
'reservation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.ReservedDates']"}),
'start': ('django.db.models.fields.DateField', [], {}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'places.currency': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'Currency'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'code_position': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'factor': ('django.db.models.fields.DecimalField', [], {'default': "'0'", 'max_digits': '12', 'decimal_places': '4'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'main': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'places.description': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'Description'},
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'descriptions'", 'to': "orm['places.Place']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'places.friendship': {
'Meta': {'object_name': 'Friendship'},
'confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Profile']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'places.geolocation': {
'Meta': {'object_name': 'GeoLocation'},
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.GeoLocation']"}),
'type': ('django.db.models.fields.SmallIntegerField', [], {})
},
'places.message': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'Message'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receiver': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'received_messages'", 'to': "orm['auth.User']"}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sent_messages'", 'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'text': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'places.paymentselection': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'PaymentSelection'},
'acc_owner': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bank_city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'bank_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'bank_postcode': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'bank_street': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'bic': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'iban': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '2', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'places.photo': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'Photo'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {'default': '60'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Place']", 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'places.place': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'Place'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'bathrooms': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'bed_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'bedroom': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'cancellation': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'capacity': ('django.db.models.fields.SmallIntegerField', [], {'default': '2'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'clean_rating': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'cleaning_fee': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'}),
'comfort_rating': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Currency']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'district': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'emergency_phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'extra_limit': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'extra_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'favorite_counter': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'location_rating': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'lon': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'manual': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'max_stay': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'min_stay': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'monthly_discount': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'neighborhood': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'overall_rating': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'placement': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['places.GeoLocation']", 'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'prices': ('django.db.models.fields.TextField', [], {'default': "''"}),
'primary_photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'reserved_dates': ('django.db.models.fields.TextField', [], {'default': "''"}),
'rules': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'size_type': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'space': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'street_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'summary': ('django.db.models.fields.TextField', [], {'default': "''"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['places.Tag']", 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'value_money_rating': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'weekend_price': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '6', 'decimal_places': '2'}),
'weekly_discount': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'places.placereview': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'PlaceReview'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'clean_rating': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'comfort_rating': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location_rating': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'overall_rating': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Place']"}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'text': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'value_money_rating': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'writer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'places.profile': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'Profile'},
'about_me': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'brithdate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cell': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Currency']"}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'favorites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['places.Place']", 'null': 'True', 'blank': 'True'}),
'friends': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'friend_profiles'", 'symmetrical': 'False', 'through': "orm['places.Friendship']", 'to': "orm['auth.User']"}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'default': "'tr_TR'", 'max_length': '5'}),
'lastlogin': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'private_name': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'raw_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'website_url': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'places.promotioncode': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'PromotionCode'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'expiry_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percentage': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '6', 'decimal_places': '2'}),
'puser': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'used_promotions'", 'to': "orm['auth.User']"}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_promotions'", 'to': "orm['auth.User']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {}),
'used': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'places.reserveddates': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'ReservedDates'},
'end': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Place']"}),
'start': ('django.db.models.fields.DateField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'places.sessionalprice': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'SessionalPrice'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'end': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.Place']"}),
'price': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'start': ('django.db.models.fields.DateField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'weekend_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '2', 'blank': 'True'})
},
'places.tag': {
'Meta': {'ordering': "['order']", 'object_name': 'Tag'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.TagCategory']"}),
'help': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'places.tagcategory': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'TagCategory'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'places.tagtranslation': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'TagTranslation'},
'help': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lang': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tags'", 'to': "orm['places.Tag']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'translation': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'places.transaction': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'Transaction'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '8', 'decimal_places': '2'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'reciver_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'sender_type': ('django.db.models.fields.SmallIntegerField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'places.userreview': {
'Meta': {'ordering': "['timestamp']", 'object_name': 'UserReview'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'person'", 'to': "orm['auth.User']"}),
'status': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'text': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'writer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writer'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['places']
| 85.640118
| 207
| 0.553183
|
004d4774bb916da8f9c38522559dbca8b9de7586
| 972
|
py
|
Python
|
yardstick/network_services/collector/publisher.py
|
mythwm/yardstick
|
ea13581f450c9c44f6f73d383e6a192697a95cc1
|
[
"Apache-2.0"
] | 28
|
2017-02-07T07:46:42.000Z
|
2021-06-30T08:11:06.000Z
|
yardstick/network_services/collector/publisher.py
|
mythwm/yardstick
|
ea13581f450c9c44f6f73d383e6a192697a95cc1
|
[
"Apache-2.0"
] | 6
|
2018-01-18T08:00:54.000Z
|
2019-04-11T04:51:41.000Z
|
yardstick/network_services/collector/publisher.py
|
mythwm/yardstick
|
ea13581f450c9c44f6f73d383e6a192697a95cc1
|
[
"Apache-2.0"
] | 46
|
2016-12-13T10:05:47.000Z
|
2021-02-18T07:33:06.000Z
|
# Copyright (c) 2016-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements stub for publishing results in yardstick format."""
class Publisher(object):
"""Class that handles publishing test results in yardstick format."""
def __init__(self):
super(Publisher, self).__init__()
def start(self):
"""Nothing to do, yet"""
pass
def stop(self):
"""Nothing to do, yet"""
pass
| 32.4
| 77
| 0.704733
|
229477646eeb31b41268ff2203860bd60ee88da8
| 972
|
py
|
Python
|
utils/common.py
|
Sult/daf
|
a4da9e8c96f70577e2490c05e82bdf7d0de1a563
|
[
"MIT"
] | null | null | null |
utils/common.py
|
Sult/daf
|
a4da9e8c96f70577e2490c05e82bdf7d0de1a563
|
[
"MIT"
] | null | null | null |
utils/common.py
|
Sult/daf
|
a4da9e8c96f70577e2490c05e82bdf7d0de1a563
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from django.conf import settings
from django.utils.timezone import utc
#converts a timestamp to a aware datetime object
def convert_timestamp(timestamp):
try:
temp = datetime.fromtimestamp(timestamp).replace(tzinfo=utc)
return temp
except TypeError:
return None
#see if you reached last iteration of a forloop
def lookahead(iterable):
it = iter(iterable)
last = it.next()
for val in it:
yield last, False
last = val
yield last, True
##### ICONS #####
#Get the size name of an icon
def icon_size_name(size):
for s in settings.IMAGE_SIZES:
if s[1] == size:
return s[0]
#get the last restart datetime for eve server
def last_server_restart():
now = datetime.now().replace(tzinfo=utc)
if now.hour < 12:
now = now - timedelta(days=1)
restart = now.replace(hour=12, minute=0, second=0, microsecond=0)
return restart
| 23.707317
| 69
| 0.667695
|
1d699ec92618e0f59bfcda95478a324ac838c340
| 22,896
|
py
|
Python
|
src/Algorithms/VertexDynamic.py
|
Antonio-Cruciani/dynamic-random-graph-generator
|
630710aadb8e096ccecfcbcac79f170a97637f77
|
[
"CNRI-Python"
] | 3
|
2020-04-06T10:24:06.000Z
|
2021-09-26T07:31:03.000Z
|
src/Algorithms/VertexDynamic.py
|
Antonio-Cruciani/dynamic-random-graph-generator
|
630710aadb8e096ccecfcbcac79f170a97637f77
|
[
"CNRI-Python"
] | 2
|
2021-06-18T08:13:26.000Z
|
2021-06-18T08:13:36.000Z
|
src/Algorithms/VertexDynamic.py
|
Antonio-Cruciani/dynamic-random-graph-generator
|
630710aadb8e096ccecfcbcac79f170a97637f77
|
[
"CNRI-Python"
] | null | null | null |
import networkx as nx
from src.Graphs.Objects.MultipleEdge import DynamicGraph
from src.FileOperations.WriteOnFile import create_file, create_folder, write_on_file_contents
from src.StastModules.Snapshot import get_snapshot_dynamic,get_snapshot_dynamicND
from src.StastModules.SpectralAnalysis import get_spectral_gap_transition_matrix,spectral_gap_sparse
import time
import math as mt
import logging
import os
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
class VertexDynamicOutput:
def __init__(self):
self.stats = []
# self.flood_infos = []
def add_stats(self, new_stats):
self.stats.append(new_stats)
# def add_flood_infos(self,new_flood_infos):
# self.flood_info.append(new_flood_infos)
def get_stats(self):
return (self.stats)
# def get_flood_infos(self):
# return(self.flood_infos)
class VertexDynamic:
def __init__(self, d, c, inrate, outrate, outpath, flooding=True, regular_convergence=0.9, regular_decay=0.5
, model="Multiple", simNumber=30, maxIter=100, onlySpectral=False, Offline=False, GPU=False):
"""
:param d: int, minimum degree that each node must have
:param c: float, tolerance constant, c*d is the maximum degree that each node can have
:param inrate: float, intensity parameter of the Poisson Process
:param outrate: float, node-falling probability
:param outpath: str, output path for the results
:param flooding: bool, if True, simulates the flooding process
:param regular_convergence: Threshold for the (d,c*d)-regularity convergence, NOTE: The Algorithm will compute the best percentage anyway.
:param model: str, if Multiple each node will sample more than one node at each round, NOTE: Leave it as Multiple
:param simNumber: int, number of experiments to perfom
:param maxIter: int, maximum number of steps for the simulations
:param onlySpectral: bool, if true will save only the spectral properties of the graph
:param Offline: bool, if true, will simulate the model saving the adjacency list of the model at each time step without computing any statistic
:param GPU: bool, if true, will be used the GPU instead of the CPU for solving the eigen-problem
"""
self.d_list = d
self.c_list = c
self.inrate_list = inrate
self.outrate_list = outrate
self.flooding = flooding
self.decay = regular_decay
self.cdPercentage = regular_convergence
self.model = model
self.outPath = outpath
self.simNumber = simNumber
self.spectrum = onlySpectral
self.MC = Offline
self.max_iter = maxIter
self.GPU = GPU
def run(self):
logging.info("----------------------------------------------------------------")
logging.info("Starting simulation")
sim_start = time.time()
for inrate in self.inrate_list:
for outrate in self.outrate_list:
logging.info("----------------------------------------------------------------")
logging.info("Inrate: %r Outrate: %r Flooding: %r" % (inrate, outrate,self.flooding ))
outpath = create_folder(self.outPath,
"VertexDynamic_in_" + str(inrate) + "_out_" + str(outrate) + "_f_" + str(
self.flooding))
path = outpath
outpath = outpath + "results"
vertexDynamicStats = VertexDynamicOutput()
for d in self.d_list:
logging.info("Inrate: %r Outrate: %r Flooding %r d: %d" % (inrate,outrate,self.flooding,d))
#print("Inrate: ", inrate, " Outrate: ", outrate, " Flooding: ", self.flooding, "d: ",d)
for c in self.c_list:
logging.info("Inrate: %r Outrate: %r Flooding %r d: %d c: %r " % (inrate,outrate,self.flooding,d,c))
#print("Inrate: ", inrate, " Outrate: ", outrate, " Flooding: ", self.flooding, " d: ",d," c: ",c)
for sim in range(0, self.simNumber):
logging.info("Simulation %d" % (sim))
start_time = time.time()
if(self.spectrum):
stats = self.VertexDynamicGeneratorSpectrum(d, c, inrate, outrate, sim,path = path )
elif(self.MC):
stats = self.VertexDynamicGeneratorMCConfigurations( d, c, inrate, outrate, sim, path=path)
else:
stats = self.VertexDynamicGenerator(d, c, inrate, outrate, sim)
vertexDynamicStats.add_stats(stats)
logging.info("Elapsed time %r" % (time.time() - start_time))
logging.info("----------------------------------------------------------------")
self.write_info_dic_as_csv(outpath, vertexDynamicStats)
logging.info("Ending simulation")
logging.info("Total elapsed time %r" % (time.time() - sim_start))
def VertexDynamicGenerator(self, d, c, inrate, outrate, sim):
def check_convergence_dynamic():
if (G.get_converged() == False):
# Getting the number of the vertices with less than d neighbours
# Number of the nodes with a degree >=d and <= cd
semireg = 0
# Number of nodes with a degree <d
underreg = 0
# Number of nodes with a degree >cd
overreg = 0
nodi = list(G.get_G().nodes())
for u in nodi:
if (G.get_G().degree(u) < G.get_d()):
underreg += 1
elif (G.get_G().degree(u) > G.get_tolerance()):
overreg += 1
else:
semireg += 1
G.increment_time_conv()
# if (semireg >= len(nodi) * (self.cdPercentage - (G.get_reset_number() * self.decay))):
percentages = [i for i in range(0,101)]
G.set_semiregular_percentage(percentages[-1])
if (semireg >= len(nodi) * G.get_semiregular_percentage()):
G.set_converged(True)
else:
a = 0
b = 100
while(a<=b):
m = ((b+a) / 2)
G.set_semiregular_percentage(m)
if(semireg >= len(nodi) * G.get_semiregular_percentage()):
a = m + 1
else:
b = m - 1
logging.info("Structural convergence at %r "%(G.get_semiregular_percentage() * 100))
G.set_converged(True)
flood_dictionary = {}
if (G.get_converged()):
if (G.flooding.get_initiator() == -1):
G.set_flooding()
G.flooding.set_stop_time(mt.floor(mt.log(G.get_target_n(),2)))
G.flooding.set_initiator()
G.flooding.update_flooding(G)
else:
# Updating Flooding
if (G.flooding.get_t_flood() == 1):
logging.info("Flooding protocol STARTED %r"%(G.flooding.get_started()))
if (G.flooding.get_started() == True):
G.flooding.update_flooding(G)
if (not G.flooding.check_flooding_status()):
G.set_converged(True)
if (G.flooding.get_number_of_restart() == 0):
logging.info("All the informed nodes left the network")
logging.info("Flooding Protocol status: Failed")
logging.info("----------------------------------------------------------------")
G.flooding.set_converged(False)
G.flooding.set_failed(True)
if (G.flooding.get_converged()):
logging.info("AL NODES IN THE NETWORK ARE INFORMED")
logging.info("Number of informed nodes %d" % (G.flooding.get_informed_nodes()))
logging.info("Number of uninformed nodes %d " %(G.flooding.get_uninformed_nodes()))
logging.info("Percentage of informed nodes %r" % (G.flooding.get_percentage()))
logging.info("Informed Ratio: %r"%(G.flooding.get_last_ratio()))
logging.info("Flooding Protocol status: Correctly Terminated")
logging.info("Flooding time: %d" %(G.flooding.get_t_flood()))
logging.info("----------------------------------------------------------------")
threshold = G.get_target_n()
if (G.flooding.get_t_flood() > threshold):
logging.info("Iterations > threshold")
logging.info("The Flooding protocol is too slow, stopping the simulation")
logging.info("Number of informed nodes %d " % (G.flooding.get_informed_nodes()))
logging.info("Number of uninformed nodes %d " %(G.flooding.get_uninformed_nodes()))
logging.info("Percentage of informed nodes %r" % (G.flooding.get_percentage()))
logging.info("Informed Ratio: %r"%(G.flooding.get_last_ratio()))
logging.info("Flooding Protocol status: Failed")
logging.info("Number of executed steps: %d Step threshold: %d" % (
G.flooding.get_t_flood(), threshold))
logging.info("----------------------------------------------------------------")
G.set_converged(True)
G.flooding.set_converged(False)
G.flooding.set_failed(True)
flood_dictionary['informed_nodes'] = G.flooding.get_informed_nodes()
flood_dictionary['uninformed_nodes'] = G.flooding.get_uninformed_nodes()
flood_dictionary['percentage_informed'] = G.flooding.get_percentage()
flood_dictionary['t_flood'] = G.flooding.get_t_flood()
flood_dictionary['process_status'] = G.get_converged()
flood_dictionary['flood_status'] = G.flooding.get_converged()
flood_dictionary['initiator'] = G.flooding.get_initiator()
else:
flood_dictionary['informed_nodes'] = 0
flood_dictionary['uninformed_nodes'] = len(G.get_list_of_nodes())
flood_dictionary['percentage_informed'] = 0
flood_dictionary['t_flood'] = 0
flood_dictionary['process_status'] = G.get_converged()
flood_dictionary['flood_status'] = G.flooding.get_converged()
flood_dictionary['initiator'] = G.flooding.get_initiator()
return (flood_dictionary)
t = 0
final_stats = []
achieved = False
repeat = True
sim = {
"simulation": sim
}
if (d <= 0 or c < 0):
logging.error("Input parameters must be: d>0 c>1")
return (-1)
G = DynamicGraph(0, d, c, inrate, outrate, 0, self.model)
while (repeat):
G.disconnect_from_network()
G.connect_to_network()
G.add_phase_vd()
G.del_phase_vd()
if (not achieved):
if (G.get_target_density()):
logging.info("The Graph contains the desired number of nodes")
achieved = True
stats = get_snapshot_dynamic(G, G.get_d(), G.get_c(), t)
flood_info = check_convergence_dynamic()
conv_perc = {"conv_percentage": (G.get_semiregular_percentage())}
final_stats.append({**sim, **conv_perc, **stats, **flood_info})
else:
stats = get_snapshot_dynamic(G, G.get_d(), G.get_c(), t)
flood_info = check_convergence_dynamic()
conv_perc = {"conv_percentage": (G.get_semiregular_percentage())}
final_stats.append({**sim, **conv_perc, **stats, **flood_info})
t += 1
if (G.flooding.get_converged() and (not (G.flooding.get_failed()))):
repeat = False
if ((self.cdPercentage - (G.get_reset_number() * self.decay)) <= -1):
logging.info("The graph does not converge")
repeat = False
if (G.flooding.get_failed()):
repeat = False
logging.info("Flooding protocol: FAILED")
return (final_stats)
def VertexDynamicGeneratorMCConfigurations(self, d, c, inrate, outrate, sim,path=""):
def check_convergence_dynamic():
if (G.get_converged() == False):
# Getting the number of the vertices with less than d neighbours
# Number of the nodes with a degree >=d and <= cd
semireg = 0
# Number of nodes with a degree <d
underreg = 0
# Number of nodes with a degree >cd
overreg = 0
nodi = list(G.get_G().nodes())
for u in nodi:
if (G.get_G().degree(u) < G.get_d()):
underreg += 1
elif (G.get_G().degree(u) > G.get_tolerance()):
overreg += 1
else:
semireg += 1
G.increment_time_conv()
# if (semireg >= len(nodi) * (self.cdPercentage - (G.get_reset_number() * self.decay))):
percentages = [i for i in range(0,101)]
G.set_semiregular_percentage(percentages[-1])
if (semireg >= len(nodi) * G.get_semiregular_percentage()):
G.set_converged(True)
logging.info("Structural convergence at %r "%(G.get_semiregular_percentage() * 100))
else:
a = 0
b = 100
while(a<=b):
m = ((b+a) / 2)
G.set_semiregular_percentage(m)
if(semireg >= len(nodi) * G.get_semiregular_percentage()):
a = m + 1
else:
b = m - 1
logging.info("Structural convergence at %r "%(G.get_semiregular_percentage() * 100))
G.set_converged(True)
try:
# Create sim Directory
os.mkdir(path + "/" + str(sim))
logging.info("Directory %r sim Created " % (path))
except FileExistsError:
logging.error("Directory %r sim already exists" % (path))
try:
# Create sim Directory
os.mkdir(path + "/" + str(sim) + "/before")
logging.info("Directory %r sim/before Created " % (path))
except FileExistsError:
logging.error("Directory %r sim/before already exists" % (path))
try:
# Create sim Directory
os.mkdir(path + "/" + str(sim) + "/after")
logging.info("Directory %r sim/after Created " % (path))
except FileExistsError:
logging.error("Directory %r sim/after already exists" % (path))
t = 0
achieved = False
repeat = True
if (d <= 0 or c < 0):
logging.error("Input parameters must be: d>0 c>1")
return (-1)
G = DynamicGraph(0, d, c, inrate, outrate, 0, self.model)
c = 0
stats = {"d": G.get_d(), "c": G.get_c(), "n": G.get_target_n(),"lambda":G.get_inrate(),"beta":G.get_outrate()}
while (repeat):
G.disconnect_from_network()
# Saving graph
nx.write_adjlist(G.get_G(), path=path + str(sim) + "/before/" + str(t) + ".adjlist")
G.connect_to_network()
G.add_phase_vd()
G.del_phase_vd()
# Saving graph
nx.write_adjlist(G.get_G(), path=path + str(sim) + "/after/" + str(t) + ".adjlist")
if (not achieved):
if (G.get_target_density()):
logging.info("The Graph contains the desired number of nodes")
achieved = True
check_convergence_dynamic()
else:
check_convergence_dynamic()
t += 1
if(G.get_converged()):
if(c == self.max_iter):
repeat = False
logging.info("Graph converged and 100 more steps simulated")
else:
c+=1
stats3 = [stats, stats]
return (stats3)
def VertexDynamicGeneratorSpectrum(self, d, c, inrate, outrate, sim,path=""):
def check_convergence_dynamic():
if (G.get_converged() == False):
# Getting the number of the vertices with less than d neighbours
# Number of the nodes with a degree >=d and <= cd
semireg = 0
# Number of nodes with a degree <d
underreg = 0
# Number of nodes with a degree >cd
overreg = 0
nodi = list(G.get_G().nodes())
for u in nodi:
if (G.get_G().degree(u) < G.get_d()):
underreg += 1
elif (G.get_G().degree(u) > G.get_tolerance()):
overreg += 1
else:
semireg += 1
G.increment_time_conv()
percentages = [i for i in range(0,101)]
G.set_semiregular_percentage(percentages[-1])
if (semireg >= len(nodi) * G.get_semiregular_percentage()):
G.set_converged(True)
logging.info("Structural convergence at %r "%(G.get_semiregular_percentage() * 100))
else:
a = 0
b = 100
while(a<=b):
m = ((b+a) / 2)
G.set_semiregular_percentage(m)
if(semireg >= len(nodi) * G.get_semiregular_percentage()):
a = m + 1
else:
b = m - 1
logging.info("Structural convergence at %r "%(G.get_semiregular_percentage() * 100))
G.set_converged(True)
t = 0
final_stats = []
achieved = False
repeat = True
sim = {
"simulation": sim
}
if (d <= 0 or c < 0):
logging.error("Input parameters must be: d>0 c>1")
return (-1)
G = DynamicGraph(0, d, c, inrate, outrate, 0, self.model)
c = 0
while (repeat):
G.disconnect_from_network()
if (achieved):
if(self.GPU):
IsinvertibleBefore, spectralGapBefore, lambdaNGapBefore = get_spectral_gap_transition_matrix(G.get_G())
else:
spectralGapBefore = spectral_gap_sparse(G.get_G())
spectralGapBefore = {"SpectralGapBefore": spectralGapBefore}
else:
if (self.GPU):
IsinvertibleBefore, spectralGapBefore, lambdaNGapBefore = get_spectral_gap_transition_matrix(
G.get_G())
else:
spectralGapBefore = spectral_gap_sparse(G.get_G())
spectralGapBefore = {"SpectralGapBefore": spectralGapBefore}
G.connect_to_network()
G.add_phase_vd()
G.del_phase_vd()
if (not achieved):
if (G.get_target_density()):
logging.info("The Graph contains the desired number of nodes")
achieved = True
stats = get_snapshot_dynamicND(G, G.get_d(), G.get_c(), t)
check_convergence_dynamic()
conv_perc = {"conv_percentage": (G.get_semiregular_percentage())}
if (self.GPU):
IsinvertibleAfter, spectralGapAfter, lambdaNGapAfter = get_spectral_gap_transition_matrix(
G.get_G())
else:
spectralGapAfter = spectral_gap_sparse(G.get_G())
spectralGapsAfter = {"SpectralGapAfter":spectralGapAfter}
#spectralGapBefore = {'SpectralGapBefore':0}
final_stats.append({**sim, **conv_perc, **stats,**spectralGapBefore,**spectralGapsAfter})
else:
stats = get_snapshot_dynamicND(G, G.get_d(), G.get_c(), t)
check_convergence_dynamic()
conv_perc = {"conv_percentage": (G.get_semiregular_percentage())}
if (self.GPU):
IsinvertibleAfter, spectralGapAfter, lambdaNGapAfter = get_spectral_gap_transition_matrix(
G.get_G())
else:
spectralGapAfter = spectral_gap_sparse(G.get_G())
spectralGapsAfter = {"SpectralGapAfter": spectralGapAfter}
final_stats.append({**sim, **conv_perc, **stats,**spectralGapBefore,**spectralGapsAfter})
t += 1
if(G.get_converged()):
if(c == self.max_iter):
repeat = False
logging.info("Graph converged and 100 more steps simulated")
else:
c+=1
'''
i = 0
for g in graph_before:
nx.write_adjlist(g, path=path + str(sim['simulation']) + "/before/" + str(i) + ".edgelist"
)
i+=1
i = 0
for g in graph_after:
nx.write_adjlist(g, path=path + str(sim['simulation']) + "/after/" + str(i) + ".edgelist"
)
i+=1
'''
return (final_stats)
def write_info_dic_as_csv(self, outPath, results):
create_file(outPath, list(results.get_stats()[0][0].keys()))
for i in results.get_stats():
write_on_file_contents(outPath, i)
| 42.243542
| 151
| 0.505503
|
1f2606a6edf48b7d02fa1b44bd5d2b2ce7d84284
| 2,107
|
py
|
Python
|
cogs/inactive/ttt.py
|
MiningMark48/Tidal-Bot
|
8db6ecb220fd35930ffe1df5653af7a1ca03c8e9
|
[
"MIT"
] | 6
|
2020-08-09T15:43:07.000Z
|
2022-03-11T15:12:21.000Z
|
cogs/inactive/ttt.py
|
MiningMark48/Tidal-Bot
|
8db6ecb220fd35930ffe1df5653af7a1ca03c8e9
|
[
"MIT"
] | 6
|
2020-10-29T02:32:40.000Z
|
2022-01-13T03:12:45.000Z
|
cogs/inactive/ttt.py
|
MiningMark48/Tidal-Bot
|
8db6ecb220fd35930ffe1df5653af7a1ca03c8e9
|
[
"MIT"
] | 1
|
2021-06-09T08:06:31.000Z
|
2021-06-09T08:06:31.000Z
|
import random
import asyncio
import discord
from discord.ext import commands
from util.decorators import delete_original
class Fun(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="tictactoe", aliases=["ttt"])
@delete_original()
async def tic_tac_toe(self, ctx, opponent: discord.Member):
"""[WIP] Play Tic-Tac-Toe with a selected player!"""
emoji_join = "\N{RAISED HAND WITH FINGERS SPLAYED}"
current_game_board = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
def check_join(e, u):
return u.id == opponent.id
embed = discord.Embed(title="Tic-Tac-Toe", color=0xff00ff)
embed.description = f"{opponent.mention}, you have been invited to play Tic-Tac-Toe. \n\nClick the {emoji_join} reaction below to accept."
msg = await ctx.send(embed=embed)
await msg.add_reaction(emoji_join)
try:
wf_react, _ = await self.bot.wait_for('reaction_add', check=check_join, timeout=15)
wf_react = str(wf_react.emoji)
except asyncio.TimeoutError:
embed.description = "User did not accept the invite. \N{FROWNING FACE WITH OPEN MOUTH}"
await msg.edit(embed=embed)
await msg.clear_reactions()
return
if wf_react != emoji_join:
return
await msg.clear_reactions()
embed.description = "Starting game..."
await msg.edit(embed=embed)
await asyncio.sleep(1)
embed.description = f"```{self.ascii_board(current_game_board)}```"
await msg.edit(embed=embed)
def ascii_board(self, board):
blank_board = "" \
" {} | {} | {} \n" \
"----+---+----\n" \
" {} | {} | {} \n" \
"----+---+----\n" \
" {} | {} | {} \n" \
board = board[:9]
blank_board = blank_board.format(
board[0], board[1], board[2], board[3], board[4], board[5], board[6], board[7], board[8]
)
return blank_board
def setup(bot):
bot.add_cog(Fun(bot))
| 30.1
| 146
| 0.565733
|
e0581fdc5d4a5ef3bdec7a3ab8d03481a3b4a90c
| 660
|
py
|
Python
|
setup.py
|
jaylett/tp_to_statsd
|
7ece3ec381cff5b5cfdf26a6daeaf860b07c2ec8
|
[
"MIT"
] | null | null | null |
setup.py
|
jaylett/tp_to_statsd
|
7ece3ec381cff5b5cfdf26a6daeaf860b07c2ec8
|
[
"MIT"
] | null | null | null |
setup.py
|
jaylett/tp_to_statsd
|
7ece3ec381cff5b5cfdf26a6daeaf860b07c2ec8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
setup(
name='tp_to_statsd',
version='0.1.2',
description='Record TP figures using statsd.',
author='James Aylett',
author_email='james@tartarus.org',
license='MIT',
packages=['tp_to_statsd',],
scripts=[
'scripts/tp_to_statsd',
],
install_requires=[
'requests>=2.3.0',
'statsd>=3.0.0',
'python-slugify>=1.1.3',
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
)
| 23.571429
| 50
| 0.577273
|
0fa81604b71916a190dcd6570f624a2aa8e0f90a
| 2,227
|
py
|
Python
|
sdk/python/pulumi_azure_native/subscription/v20200901/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/subscription/v20200901/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/subscription/v20200901/outputs.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'PutAliasResponsePropertiesResponse',
]
@pulumi.output_type
class PutAliasResponsePropertiesResponse(dict):
"""
Put subscription creation result properties.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "subscriptionId":
suggest = "subscription_id"
elif key == "provisioningState":
suggest = "provisioning_state"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PutAliasResponsePropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PutAliasResponsePropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PutAliasResponsePropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
subscription_id: str,
provisioning_state: Optional[str] = None):
"""
Put subscription creation result properties.
:param str subscription_id: Newly created subscription Id.
:param str provisioning_state: The provisioning state of the resource.
"""
pulumi.set(__self__, "subscription_id", subscription_id)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> str:
"""
Newly created subscription Id.
"""
return pulumi.get(self, "subscription_id")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
| 32.275362
| 154
| 0.659632
|
88d9f095743a9d95d784dc8bc3fa27767cac9b4f
| 3,725
|
py
|
Python
|
src/tests/api/v1/test_permissions.py
|
iYasha/studycobra
|
097a78e4a3679bfe6934983eb2be77ba76408d77
|
[
"MIT"
] | null | null | null |
src/tests/api/v1/test_permissions.py
|
iYasha/studycobra
|
097a78e4a3679bfe6934983eb2be77ba76408d77
|
[
"MIT"
] | null | null | null |
src/tests/api/v1/test_permissions.py
|
iYasha/studycobra
|
097a78e4a3679bfe6934983eb2be77ba76408d77
|
[
"MIT"
] | null | null | null |
from typing import Any
from typing import List
from typing import Tuple
import pytest
from fastapi.routing import APIRoute
from sso_auth.utils import is_ignored_path
from main import app as fs_app
from starlette import convertors
from starlette import status
from starlette.testclient import TestClient
from tests.utils import create_access_token
from tests.utils import create_expired_access_token
from tests.utils import get_fake_uuid
def generate_param_by_convertor(param: convertors.Convertor) -> Any:
if isinstance(param, convertors.UUIDConvertor):
return get_fake_uuid(constant=True)
if isinstance(param, convertors.StringConvertor):
return "some_string"
raise NotImplementedError("Неизвестный тип конвертора")
def all_routes() -> List[Tuple[str, str]]:
routes: List[Tuple[str, str]] = []
route: APIRoute
for route in fs_app.routes:
if is_ignored_path(route.path):
continue
if route.param_convertors:
params = {
param_name: generate_param_by_convertor(convertor)
for param_name, convertor in route.param_convertors.items()
}
url = route.url_path_for(route.name, **params)
else:
url = route.url_path_for(route.name)
method = list(route.methods)[0]
routes.append((url, method))
return routes
@pytest.mark.usefixtures("prepare_sso_settings")
@pytest.mark.parametrize("url, method", all_routes())
def test_has_permissions(client: TestClient, url: str, method: str) -> None:
"""
Тест, который проверяет, что все ручки сервиса защищены пермишенами.
"""
token = create_access_token()
headers = {"Authorization": f"Bearer {token}"}
res = client.request(
method=method,
url=url,
headers=headers,
json={},
)
assert res.status_code == status.HTTP_403_FORBIDDEN, res.json()
assert res.json()["error_message"] == "no permission to perform this action"
@pytest.mark.usefixtures("prepare_sso_settings")
@pytest.mark.parametrize("url, method", all_routes())
def test_expired_token(client: TestClient, url: str, method: str) -> None:
"""
Тест, который проверяет, что все ручки сервиса защищены пермишенами.
"""
token = create_expired_access_token()
headers = {"Authorization": f"Bearer {token}"}
res = client.request(
method=method,
url=url,
headers=headers,
json={},
)
assert res.status_code == status.HTTP_401_UNAUTHORIZED, res.json()
assert res.json()["error_message"] == "expired token signature"
@pytest.mark.usefixtures("prepare_sso_settings")
@pytest.mark.parametrize("url, method", all_routes())
def test_invalid_token(client: TestClient, url: str, method: str) -> None:
"""
Тест, который проверяет, что все ручки сервиса защищены пермишенами.
"""
token = "some_broken_token"
headers = {"Authorization": f"Bearer {token}"}
res = client.request(
method=method,
url=url,
headers=headers,
json={},
)
assert res.status_code == status.HTTP_401_UNAUTHORIZED, res.json()
assert res.json()["error_message"] == "invalid token"
@pytest.mark.usefixtures("prepare_sso_settings")
@pytest.mark.parametrize("url, method", all_routes())
def test_token_not_found(client: TestClient, url: str, method: str) -> None:
"""
Тест, который проверяет, что все ручки сервиса защищены пермишенами.
"""
res = client.request(
method=method,
url=url,
headers={},
json={},
)
assert res.status_code == status.HTTP_401_UNAUTHORIZED, res.json()
assert res.json()["error_message"] == "token not found"
| 31.041667
| 80
| 0.679195
|
f18cc8c429f842f41a72b3a040f6f5f291eccae7
| 1,508
|
py
|
Python
|
imperative/python/test/integration/test_save_load.py
|
yang-shuohao/MegEngine
|
2e8742086563ea442c357b14560245c54e0aa0a3
|
[
"Apache-2.0"
] | 1
|
2020-12-11T04:08:25.000Z
|
2020-12-11T04:08:25.000Z
|
imperative/python/test/integration/test_save_load.py
|
yang-shuohao/MegEngine
|
2e8742086563ea442c357b14560245c54e0aa0a3
|
[
"Apache-2.0"
] | null | null | null |
imperative/python/test/integration/test_save_load.py
|
yang-shuohao/MegEngine
|
2e8742086563ea442c357b14560245c54e0aa0a3
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import megengine as mge
import megengine.autodiff as ad
import megengine.optimizer as optimizer
from megengine import Parameter, tensor
from megengine.core.tensor.raw_tensor import RawTensor
from megengine.module import Module
class Simple(Module):
def __init__(self):
super().__init__()
self.a = Parameter([1.23], dtype=np.float32)
def forward(self, x):
x = x * self.a
return x
def test_save_load():
net = Simple()
optim = optimizer.SGD(net.parameters(), lr=1.0, momentum=0.9)
optim.clear_grad()
gm = ad.GradManager().attach(net.parameters())
data = tensor([2.34])
with gm:
loss = net(data)
gm.backward(loss)
optim.step()
model_name = "simple.pkl"
print("save to {}".format(model_name))
mge.save(
{
"name": "simple",
"state_dict": net.state_dict(),
"opt_state": optim.state_dict(),
},
model_name,
)
# Load param to cpu
checkpoint = mge.load(model_name, map_location="cpu0")
device_save = mge.get_default_device()
mge.set_default_device("cpu0")
net = Simple()
net.load_state_dict(checkpoint["state_dict"])
optim = optimizer.SGD(net.parameters(), lr=1.0, momentum=0.9)
optim.load_state_dict(checkpoint["opt_state"])
print("load done")
with gm:
loss = net([1.23])
gm.backward(loss)
optim.step()
# Restore device
mge.set_default_device(device_save)
| 23.2
| 65
| 0.628647
|
7ee66caf866e4604b604d6455b096f0c23c618cc
| 20,756
|
py
|
Python
|
src/engine/SCons/Variables/VariablesTests.py
|
SConsProject/scons-gh-convert-git
|
a749e0006af9a7f7d1f744eeaf8407b8226334e7
|
[
"MIT"
] | 1
|
2017-01-28T15:39:07.000Z
|
2017-01-28T15:39:07.000Z
|
src/engine/SCons/Variables/VariablesTests.py
|
SConsProject/scons-gh-convert-git
|
a749e0006af9a7f7d1f744eeaf8407b8226334e7
|
[
"MIT"
] | null | null | null |
src/engine/SCons/Variables/VariablesTests.py
|
SConsProject/scons-gh-convert-git
|
a749e0006af9a7f7d1f744eeaf8407b8226334e7
|
[
"MIT"
] | 1
|
2022-03-23T17:29:35.000Z
|
2022-03-23T17:29:35.000Z
|
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import sys
import unittest
import TestSCons
import TestUnit
import SCons.Variables
import SCons.Subst
import SCons.Warnings
class Environment(object):
def __init__(self):
self.dict = {}
def subst(self, x):
return SCons.Subst.scons_subst(x, self, gvars=self.dict)
def __setitem__(self, key, value):
self.dict[key] = value
def __getitem__(self, key):
return self.dict[key]
def __contains__(self, key):
return self.dict.__contains__(key)
def has_key(self, key):
return key in self.dict
def cmp(a, b):
"""
Define cmp because it's no longer available in python3
Works under python 2 as well
"""
return (a > b) - (a < b)
def check(key, value, env):
assert int(value) == 6 * 9, "key %s = %s" % (key, repr(value))
# Check saved option file by executing and comparing against
# the expected dictionary
def checkSave(file, expected):
gdict = {}
ldict = {}
exec(open(file, 'r').read(), gdict, ldict)
assert expected == ldict, "%s\n...not equal to...\n%s" % (expected, ldict)
class VariablesTestCase(unittest.TestCase):
def test_keys(self):
"""Test the Variables.keys() method"""
opts = SCons.Variables.Variables()
opts.Add('VAR1')
opts.Add('VAR2',
'THE answer to THE question',
"42",
check,
lambda x: int(x) + 12)
keys = list(opts.keys())
assert keys == ['VAR1', 'VAR2'], keys
def test_Add(self):
"""Test adding to a Variables object"""
opts = SCons.Variables.Variables()
opts.Add('VAR')
opts.Add('ANSWER',
'THE answer to THE question',
"42",
check,
lambda x: int(x) + 12)
o = opts.options[0]
assert o.key == 'VAR'
assert o.help == ''
assert o.default is None
assert o.validator is None
assert o.converter is None
o = opts.options[1]
assert o.key == 'ANSWER'
assert o.help == 'THE answer to THE question'
assert o.default == "42"
o.validator(o.key, o.converter(o.default), {})
def test_it(var, opts=opts):
exc_caught = None
try:
opts.Add(var)
except SCons.Errors.UserError:
exc_caught = 1
assert exc_caught, "did not catch UserError for '%s'" % var
test_it('foo/bar')
test_it('foo-bar')
test_it('foo.bar')
def test_AddVariables(self):
"""Test adding a list of options to a Variables object"""
opts = SCons.Variables.Variables()
opts.AddVariables(('VAR2',),
('ANSWER2',
'THE answer to THE question',
"42",
check,
lambda x: int(x) + 12))
o = opts.options[0]
assert o.key == 'VAR2', o.key
assert o.help == '', o.help
assert o.default is None, o.default
assert o.validator is None, o.validator
assert o.converter is None, o.converter
o = opts.options[1]
assert o.key == 'ANSWER2', o.key
assert o.help == 'THE answer to THE question', o.help
assert o.default == "42", o.default
o.validator(o.key, o.converter(o.default), {})
def test_Update(self):
"""Test updating an Environment"""
# Test that a default value is validated correctly.
test = TestSCons.TestSCons()
file = test.workpath('custom.py')
opts = SCons.Variables.Variables(file)
opts.Add('ANSWER',
'THE answer to THE question',
"42",
check,
lambda x: int(x) + 12)
env = Environment()
opts.Update(env)
assert env['ANSWER'] == 54
env = Environment()
opts.Update(env, {})
assert env['ANSWER'] == 54
# Test that a bad value from the file is used and
# validation fails correctly.
test = TestSCons.TestSCons()
file = test.workpath('custom.py')
test.write('custom.py', 'ANSWER=54')
opts = SCons.Variables.Variables(file)
opts.Add('ANSWER',
'THE answer to THE question',
"42",
check,
lambda x: int(x) + 12)
env = Environment()
exc_caught = None
try:
opts.Update(env)
except AssertionError:
exc_caught = 1
assert exc_caught, "did not catch expected assertion"
env = Environment()
exc_caught = None
try:
opts.Update(env, {})
except AssertionError:
exc_caught = 1
assert exc_caught, "did not catch expected assertion"
# Test that a good value from the file is used and validated.
test = TestSCons.TestSCons()
file = test.workpath('custom.py')
test.write('custom.py', 'ANSWER=42')
opts = SCons.Variables.Variables(file)
opts.Add('ANSWER',
'THE answer to THE question',
"10",
check,
lambda x: int(x) + 12)
env = Environment()
opts.Update(env)
assert env['ANSWER'] == 54
env = Environment()
opts.Update(env, {})
assert env['ANSWER'] == 54
# Test that a bad value from an args dictionary passed to
# Update() is used and validation fails correctly.
test = TestSCons.TestSCons()
file = test.workpath('custom.py')
test.write('custom.py', 'ANSWER=10')
opts = SCons.Variables.Variables(file)
opts.Add('ANSWER',
'THE answer to THE question',
"12",
check,
lambda x: int(x) + 12)
env = Environment()
exc_caught = None
try:
opts.Update(env, {'ANSWER':'54'})
except AssertionError:
exc_caught = 1
assert exc_caught, "did not catch expected assertion"
# Test that a good value from an args dictionary
# passed to Update() is used and validated.
test = TestSCons.TestSCons()
file = test.workpath('custom.py')
test.write('custom.py', 'ANSWER=10')
opts = SCons.Variables.Variables(file)
opts.Add('ANSWER',
'THE answer to THE question',
"12",
check,
lambda x: int(x) + 12)
env = Environment()
opts.Update(env, {'ANSWER':'42'})
assert env['ANSWER'] == 54
# Test against a former bug. If we supply a converter,
# but no default, the value should *not* appear in the
# Environment if no value is specified in the options file
# or args.
test = TestSCons.TestSCons()
file = test.workpath('custom.py')
opts = SCons.Variables.Variables(file)
opts.Add('ANSWER',
help='THE answer to THE question',
converter=str)
env = Environment()
opts.Update(env, {})
assert 'ANSWER' not in env
# Test that a default value of None is all right.
test = TestSCons.TestSCons()
file = test.workpath('custom.py')
opts = SCons.Variables.Variables(file)
opts.Add('ANSWER',
"This is the answer",
None,
check)
env = Environment()
opts.Update(env, {})
assert 'ANSWER' not in env
def test_noaggregation(self):
"""Test that the 'files' and 'args' attributes of the Variables class
don't aggregate entries from one instance to another.
This used to be a bug in SCons version 2.4.1 and earlier.
"""
opts = SCons.Variables.Variables()
opts.files.append('custom.py')
opts.args['ANSWER'] = 54
nopts = SCons.Variables.Variables()
# Ensure that both attributes are initialized to
# an empty list and dict, respectively.
assert len(nopts.files) == 0
assert len(nopts.args) == 0
def test_args(self):
"""Test updating an Environment with arguments overridden"""
# Test that a bad (command-line) argument is used
# and the validation fails correctly.
test = TestSCons.TestSCons()
file = test.workpath('custom.py')
test.write('custom.py', 'ANSWER=42')
opts = SCons.Variables.Variables(file, {'ANSWER':54})
opts.Add('ANSWER',
'THE answer to THE question',
"42",
check,
lambda x: int(x) + 12)
env = Environment()
exc_caught = None
try:
opts.Update(env)
except AssertionError:
exc_caught = 1
assert exc_caught, "did not catch expected assertion"
# Test that a good (command-line) argument is used and validated.
test = TestSCons.TestSCons()
file = test.workpath('custom.py')
test.write('custom.py', 'ANSWER=54')
opts = SCons.Variables.Variables(file, {'ANSWER':42})
opts.Add('ANSWER',
'THE answer to THE question',
"54",
check,
lambda x: int(x) + 12)
env = Environment()
opts.Update(env)
assert env['ANSWER'] == 54
# Test that a (command-line) argument is overridden by a dictionary
# supplied to Update() and the dictionary value is validated correctly.
test = TestSCons.TestSCons()
file = test.workpath('custom.py')
test.write('custom.py', 'ANSWER=54')
opts = SCons.Variables.Variables(file, {'ANSWER':54})
opts.Add('ANSWER',
'THE answer to THE question',
"54",
check,
lambda x: int(x) + 12)
env = Environment()
opts.Update(env, {'ANSWER':42})
assert env['ANSWER'] == 54
def test_Save(self):
"""Testing saving Variables"""
test = TestSCons.TestSCons()
cache_file = test.workpath('cached.options')
opts = SCons.Variables.Variables()
def bool_converter(val):
if val in [1, 'y']: val = 1
if val in [0, 'n']: val = 0
return val
# test saving out empty file
opts.Add('OPT_VAL',
'An option to test',
21,
None,
None)
opts.Add('OPT_VAL_2',
default='foo')
opts.Add('OPT_VAL_3',
default=1)
opts.Add('OPT_BOOL_0',
default='n',
converter=bool_converter)
opts.Add('OPT_BOOL_1',
default='y',
converter=bool_converter)
opts.Add('OPT_BOOL_2',
default=0,
converter=bool_converter)
env = Environment()
opts.Update(env, {'OPT_VAL_3' : 2})
assert env['OPT_VAL'] == 21, env['OPT_VAL']
assert env['OPT_VAL_2'] == 'foo', env['OPT_VAL_2']
assert env['OPT_VAL_3'] == 2, env['OPT_VAL_3']
assert env['OPT_BOOL_0'] == 0, env['OPT_BOOL_0']
assert env['OPT_BOOL_1'] == 1, env['OPT_BOOL_1']
assert env['OPT_BOOL_2'] == '0', env['OPT_BOOL_2']
env['OPT_VAL_2'] = 'bar'
env['OPT_BOOL_0'] = 0
env['OPT_BOOL_1'] = 1
env['OPT_BOOL_2'] = 2
opts.Save(cache_file, env)
checkSave(cache_file, { 'OPT_VAL_2' : 'bar',
'OPT_VAL_3' : 2,
'OPT_BOOL_2' : 2})
# Test against some old bugs
class Foo(object):
def __init__(self, x):
self.x = x
def __str__(self):
return self.x
test = TestSCons.TestSCons()
cache_file = test.workpath('cached.options')
opts = SCons.Variables.Variables()
opts.Add('THIS_USED_TO_BREAK',
'An option to test',
"Default")
opts.Add('THIS_ALSO_BROKE',
'An option to test',
"Default2")
opts.Add('THIS_SHOULD_WORK',
'An option to test',
Foo('bar'))
env = Environment()
opts.Update(env, { 'THIS_USED_TO_BREAK' : "Single'Quotes'In'String",
'THIS_ALSO_BROKE' : "\\Escape\nSequences\t",
'THIS_SHOULD_WORK' : Foo('baz') })
opts.Save(cache_file, env)
checkSave(cache_file, { 'THIS_USED_TO_BREAK' : "Single'Quotes'In'String",
'THIS_ALSO_BROKE' : "\\Escape\nSequences\t",
'THIS_SHOULD_WORK' : 'baz' })
def test_GenerateHelpText(self):
"""Test generating the default format help text"""
opts = SCons.Variables.Variables()
opts.Add('ANSWER',
'THE answer to THE question',
"42",
check,
lambda x: int(x) + 12)
opts.Add('B',
'b - alpha test',
"42",
check,
lambda x: int(x) + 12)
opts.Add('A',
'a - alpha test',
"42",
check,
lambda x: int(x) + 12)
env = Environment()
opts.Update(env, {})
expect = """
ANSWER: THE answer to THE question
default: 42
actual: 54
B: b - alpha test
default: 42
actual: 54
A: a - alpha test
default: 42
actual: 54
"""
text = opts.GenerateHelpText(env)
assert text == expect, text
expectAlpha = """
A: a - alpha test
default: 42
actual: 54
ANSWER: THE answer to THE question
default: 42
actual: 54
B: b - alpha test
default: 42
actual: 54
"""
text = opts.GenerateHelpText(env, sort=cmp)
assert text == expectAlpha, text
def test_FormatVariableHelpText(self):
"""Test generating custom format help text"""
opts = SCons.Variables.Variables()
def my_format(env, opt, help, default, actual, aliases):
return '%s %s %s %s %s\n' % (opt, default, actual, help, aliases)
opts.FormatVariableHelpText = my_format
opts.Add('ANSWER',
'THE answer to THE question',
"42",
check,
lambda x: int(x) + 12)
opts.Add('B',
'b - alpha test',
"42",
check,
lambda x: int(x) + 12)
opts.Add('A',
'a - alpha test',
"42",
check,
lambda x: int(x) + 12)
env = Environment()
opts.Update(env, {})
expect = """\
ANSWER 42 54 THE answer to THE question ['ANSWER']
B 42 54 b - alpha test ['B']
A 42 54 a - alpha test ['A']
"""
text = opts.GenerateHelpText(env)
assert text == expect, text
expectAlpha = """\
A 42 54 a - alpha test ['A']
ANSWER 42 54 THE answer to THE question ['ANSWER']
B 42 54 b - alpha test ['B']
"""
text = opts.GenerateHelpText(env, sort=cmp)
assert text == expectAlpha, text
def test_Aliases(self):
"""Test option aliases"""
# test alias as a tuple
opts = SCons.Variables.Variables()
opts.AddVariables(
(('ANSWER', 'ANSWERALIAS'),
'THE answer to THE question',
"42"),
)
env = Environment()
opts.Update(env, {'ANSWER' : 'answer'})
assert 'ANSWER' in env
env = Environment()
opts.Update(env, {'ANSWERALIAS' : 'answer'})
assert 'ANSWER' in env and 'ANSWERALIAS' not in env
# test alias as a list
opts = SCons.Variables.Variables()
opts.AddVariables(
(['ANSWER', 'ANSWERALIAS'],
'THE answer to THE question',
"42"),
)
env = Environment()
opts.Update(env, {'ANSWER' : 'answer'})
assert 'ANSWER' in env
env = Environment()
opts.Update(env, {'ANSWERALIAS' : 'answer'})
assert 'ANSWER' in env and 'ANSWERALIAS' not in env
class UnknownVariablesTestCase(unittest.TestCase):
def test_unknown(self):
"""Test the UnknownVariables() method"""
opts = SCons.Variables.Variables()
opts.Add('ANSWER',
'THE answer to THE question',
"42")
args = {
'ANSWER' : 'answer',
'UNKNOWN' : 'unknown',
}
env = Environment()
opts.Update(env, args)
r = opts.UnknownVariables()
assert r == {'UNKNOWN' : 'unknown'}, r
assert env['ANSWER'] == 'answer', env['ANSWER']
def test_AddOptionUpdatesUnknown(self):
"""Test updating of the 'unknown' dict"""
opts = SCons.Variables.Variables()
opts.Add('A',
'A test variable',
"1")
args = {
'A' : 'a',
'ADDEDLATER' : 'notaddedyet',
}
env = Environment()
opts.Update(env,args)
r = opts.UnknownVariables()
assert r == {'ADDEDLATER' : 'notaddedyet'}, r
assert env['A'] == 'a', env['A']
opts.Add('ADDEDLATER',
'An option not present initially',
"1")
args = {
'A' : 'a',
'ADDEDLATER' : 'added',
}
opts.Update(env, args)
r = opts.UnknownVariables()
assert len(r) == 0, r
assert env['ADDEDLATER'] == 'added', env['ADDEDLATER']
def test_AddOptionWithAliasUpdatesUnknown(self):
"""Test updating of the 'unknown' dict (with aliases)"""
opts = SCons.Variables.Variables()
opts.Add('A',
'A test variable',
"1")
args = {
'A' : 'a',
'ADDEDLATERALIAS' : 'notaddedyet',
}
env = Environment()
opts.Update(env,args)
r = opts.UnknownVariables()
assert r == {'ADDEDLATERALIAS' : 'notaddedyet'}, r
assert env['A'] == 'a', env['A']
opts.AddVariables(
(('ADDEDLATER', 'ADDEDLATERALIAS'),
'An option not present initially',
"1"),
)
args['ADDEDLATERALIAS'] = 'added'
opts.Update(env, args)
r = opts.UnknownVariables()
assert len(r) == 0, r
assert env['ADDEDLATER'] == 'added', env['ADDEDLATER']
if __name__ == "__main__":
suite = unittest.TestSuite()
tclasses = [ VariablesTestCase,
UnknownVariablesTestCase ]
for tclass in tclasses:
names = unittest.getTestCaseNames(tclass, 'test_')
suite.addTests(list(map(tclass, names)))
TestUnit.run(suite)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 30.037627
| 81
| 0.518693
|
d9feb6df379b160975feaf592b73e2297241ce24
| 3,866
|
py
|
Python
|
extract-partitions.py
|
kenkit/lglaf
|
30b625f24d71190b82f78e02f29749b290cd8d46
|
[
"MIT"
] | 22
|
2017-09-19T23:49:36.000Z
|
2021-11-08T22:03:23.000Z
|
extract-partitions.py
|
kenkit/lglaf
|
30b625f24d71190b82f78e02f29749b290cd8d46
|
[
"MIT"
] | 7
|
2018-04-04T20:38:51.000Z
|
2021-11-08T01:40:19.000Z
|
extract-partitions.py
|
kenkit/lglaf
|
30b625f24d71190b82f78e02f29749b290cd8d46
|
[
"MIT"
] | 12
|
2017-11-10T23:01:20.000Z
|
2020-12-20T05:06:45.000Z
|
#!/usr/bin/env python
#
# Dump partitions to file.
#
# Copyright (C) 2015 Peter Wu <peter@lekensteyn.nl>
# Licensed under the MIT license <http://opensource.org/licenses/MIT>.
from contextlib import closing
import argparse, logging, os, struct
import lglaf, partitions
_logger = logging.getLogger("extract-partitions")
parser = argparse.ArgumentParser()
parser.add_argument("--cr", choices=['yes', 'no'], help="Do initial challenge response (KILO CENT/METR)")
parser.add_argument("-d", "--outdir", default=".",
help="Output directory for disk images.")
# Do not dump partitions larger than this size
# (userdata 11728 MiB, system 2064 MiB, cache 608 MiB, cust 256 MiB)
parser.add_argument("--max-size", metavar="kbytes", type=int, default=65535,
help="Maximum partition size to dump (in KiB) or 0 to dump all (default %(default)d)")
parser.add_argument("--debug", action='store_true', help="Enable debug messages")
parser.add_argument("--batch", action='store_true', help="Enable batch mode")
parser.add_argument("--skip-hello", action="store_true",
help="Immediately send commands, skip HELO message")
def dump_partitions(comm, disk_fd, outdir, max_size, batch):
diskinfo = partitions.get_partitions(comm, disk_fd)
for part in diskinfo.gpt.partitions:
part_offset = part.first_lba * partitions.BLOCK_SIZE
part_size = ((part.last_lba + 1) - part.first_lba) * partitions.BLOCK_SIZE
part_name = part.name
part_label = "/dev/mmcblk0p%i" % part.index
if max_size and part_size > max_size:
if batch:
print("#Ignoring large partition %s (%s) of size %dK" % (part_label, part_name, part_size / 1024))
else:
_logger.info("Ignoring large partition %s (%s) of size %dK",
part_label, part_name, part_size / 1024)
continue
out_path = os.path.join(outdir, "%s.bin" % part_name)
try:
current_size = os.path.getsize(out_path)
if current_size > part_size:
if batch:
print("#%s: unexpected size %dK, larger than %dK" % (out_path, current_size / 1024, part_size / 1024))
else:
_logger.warn("%s: unexpected size %dK, larger than %dK",
out_path, current_size / 1024, part_size / 1024)
continue
elif current_size == part_size:
if batch:
print("#Skipping already existing partition %s (%s)" % ( part_label, part_name))
else:
_logger.info("Skipping partition %s (%s), already found at %s",
part_label, part_name, out_path)
continue
except OSError: pass
if batch:
print("#%s (%s)" % (part_label, part_name))
else:
_logger.info("Dumping partition %s (%s) to %s (%d bytes)",
part_label, part_name, out_path, part_size)
partitions.dump_partition(comm, disk_fd, out_path, part_offset, part_size, batch)
def main():
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s %(name)s: %(levelname)s: %(message)s',
level=logging.DEBUG if args.debug else logging.INFO)
try: os.makedirs(args.outdir)
except OSError: pass
comm = lglaf.autodetect_device(args.cr)
with closing(comm):
lglaf.try_hello(comm)
_logger.debug("Using Protocol version: 0x%x" % comm.protocol_version)
with partitions.laf_open_disk(comm) as disk_fd:
_logger.debug("Opened fd %d for disk", disk_fd)
dump_partitions(comm, disk_fd, args.outdir, args.max_size * 1024, args.batch)
if args.batch:
print("#All finished")
else:
_logger.info("All finished!")
if __name__ == '__main__':
main()
| 42.021739
| 122
| 0.623642
|
dc2d46d817012c5aadc7b40ab481bf10a697f3c5
| 8,467
|
py
|
Python
|
docs/conf.py
|
JanFan/py-aho-corasick
|
6cb87b105b92a4e5fa4f9e7d11ad12c90ed39b7d
|
[
"MIT"
] | 23
|
2017-08-22T03:59:53.000Z
|
2020-09-20T07:49:05.000Z
|
docs/conf.py
|
Guangyi-Z/py-aho-corasick
|
6cb87b105b92a4e5fa4f9e7d11ad12c90ed39b7d
|
[
"MIT"
] | 2
|
2018-06-16T10:43:26.000Z
|
2018-07-15T21:35:29.000Z
|
docs/conf.py
|
JanFan/py-aho-corasick
|
6cb87b105b92a4e5fa4f9e7d11ad12c90ed39b7d
|
[
"MIT"
] | 9
|
2017-04-18T10:52:10.000Z
|
2020-04-29T08:54:26.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# py_aho_corasick documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import py_aho_corasick
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'py-aho-corasick'
copyright = u"2017, JanFan"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = py_aho_corasick.__version__
# The full version, including alpha/beta/rc tags.
release = py_aho_corasick.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'py_aho_corasickdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'py_aho_corasick.tex',
u'py-aho-corasick Documentation',
u'JanFan', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'py_aho_corasick',
u'py-aho-corasick Documentation',
[u'JanFan'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'py_aho_corasick',
u'py-aho-corasick Documentation',
u'JanFan',
'py_aho_corasick',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 30.677536
| 76
| 0.717255
|
f9f6ff4880f77c4fad69435f1c5d7a53ad66235b
| 1,949
|
py
|
Python
|
main.py
|
estuaryoss/estuary-discovery
|
9615a9d544670570f14f4c72ca20f57a0cd9bba4
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
estuaryoss/estuary-discovery
|
9615a9d544670570f14f4c72ca20f57a0cd9bba4
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
estuaryoss/estuary-discovery
|
9615a9d544670570f14f4c72ca20f57a0cd9bba4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
from rest.api.constants.env_constants import EnvConstants
from rest.api.eureka_registrator import EurekaRegistrator
from rest.api.loghelpers.message_dumper import MessageDumper
from rest.api.routes import app, fluentd_service
from rest.environment.environment import EnvironmentSingleton
from rest.utils.env_startup import EnvStartupSingleton
if __name__ == "__main__":
cli = sys.modules['flask.cli']
cli.show_server_banner = lambda *x: None
port = EnvStartupSingleton.get_instance().get_config_env_vars().get(EnvConstants.PORT)
message_dumper = MessageDumper()
host = '0.0.0.0'
fluentd_tag = "startup"
if EnvStartupSingleton.get_instance().get_config_env_vars().get(EnvConstants.EUREKA_SERVER):
EurekaRegistrator(
EnvStartupSingleton.get_instance().get_config_env_vars().get(EnvConstants.EUREKA_SERVER)).register_app(
EnvStartupSingleton.get_instance().get_config_env_vars().get(EnvConstants.APP_IP),
EnvStartupSingleton.get_instance().get_config_env_vars().get(EnvConstants.PORT))
environ_dump = message_dumper.dump_message(EnvironmentSingleton.get_instance().get_env_and_virtual_env())
ip_port_dump = message_dumper.dump_message({"host": host, "port": port})
app.logger.debug({"msg": environ_dump})
app.logger.debug({"msg": ip_port_dump})
app.logger.debug({"msg": EnvStartupSingleton.get_instance().get_config_env_vars()})
fluentd_service.emit(tag=fluentd_tag, msg=environ_dump)
is_https = EnvStartupSingleton.get_instance().get_config_env_vars().get(EnvConstants.HTTPS_ENABLE)
https_cert_path = EnvStartupSingleton.get_instance().get_config_env_vars().get(EnvConstants.HTTPS_CERT)
https_prv_key_path = EnvStartupSingleton.get_instance().get_config_env_vars().get(EnvConstants.HTTPS_KEY)
app.run(host=host, port=port, ssl_context=(https_cert_path, https_prv_key_path) if is_https else None)
| 48.725
| 115
| 0.781426
|
c46812c3eb7419ce6b63007f9bd79f2bc7187aa8
| 4,625
|
py
|
Python
|
handlers/WechatKFZ/kfz_view.py
|
iamjing66/tornaodo_sdk
|
242a852d3231e5798aa357a17e5638c7dc0567b9
|
[
"MIT"
] | null | null | null |
handlers/WechatKFZ/kfz_view.py
|
iamjing66/tornaodo_sdk
|
242a852d3231e5798aa357a17e5638c7dc0567b9
|
[
"MIT"
] | null | null | null |
handlers/WechatKFZ/kfz_view.py
|
iamjing66/tornaodo_sdk
|
242a852d3231e5798aa357a17e5638c7dc0567b9
|
[
"MIT"
] | null | null | null |
import logging
import application
from handlers.WechatKFZ import kfz_authorize
from handlers.base import BaseHandler
from handlers.kbeServer.XREditor.Interface import interface_account
from methods.DBManager import DBManager
class WechatLoginCallBackRequest(BaseHandler):
def get(self):
print("self.get_argument = " , self.get_arguments)
code = self.get_argument("code",None)
state = self.get_argument("state",None)
logging.info("[wechatLogin] wechat callback - code = %s , state = %s " % (code,state))
if code and state:
json_torken = application.App.WechatLogin.get_auth_access_token(code)
#{'access_token': '52_tp1kOT1pOjtxGv5vgXeOSAgex8WNUNPfoGe3j8VJ0X7tEdTgqYyUoRwyrAvhXmAvlzJ56yqaXRt2yt-s0tbdpJyFrSRjsfx4IU9qi3w89Hs', 'expires_in': 7200,
# 'refresh_token': '52_hpYcSsUJanzgI1yQzN8LpFXt_9yexg0m6oCO4uSkVi5tslIWlfY-AxeKOIoVPyFckaxodkUGNnur2128Gqfxre8fYdAx679tpV_lQwkvl08',
# 'openid': 'oWlIt5unXOcY1NxIMZkButjOPbqs', 'scope': 'snsapi_login', 'unionid': 'o8eNzw__wuk6KgOWP43xEep4QtxQ'}
if json_torken:
access_token = json_torken["access_token"]
unionid = json_torken["unionid"]
openid = json_torken["openid"]
#o8eNzw__wuk6KgOWP43xEep4QtxQ
#o8eNzw__wuk6KgOWP43xEep4QtxQ
#openid = oWlIt5unXOcY1NxIMZkButjOPbqs
#{'openid': 'oWlIt5unXOcY1NxIMZkButjOPbqs', 'nickname': 'lyyym', 'sex': 0, 'language': '', 'city': '', 'province': '', 'country': '', 'headimgurl': 'https://thirdwx.qlogo.cn/mmopen/vi_32/Q0j4TwGTfTKOjmsCYpuqtJZ7LTnkYQfaoGic651kicHlTLiaLicfSxfJuLYAuJDib6n8f1oc5Cicj6NKSOdHHzjpCulw/132', 'privilege': [], 'unionid': 'o8eNzw__wuk6KgOWP43xEep4QtxQ'}
logging.info("[gettorken] openid = %s , unionid = %s"% (openid ,unionid))
DB = DBManager()
username = interface_account.JugeUserExist(DB,unionid)
if not username:
#获取用户信息
json_userinfo = application.App.WechatLogin.get_WechatUserInfo(access_token,openid)
if json_userinfo:
nickname = json_userinfo["nickname"]
headimgurl = json_userinfo["headimgurl"]
sex = json_userinfo["sex"]
print("nickname = " , nickname)
print("headimgurl = ", headimgurl)
print("sex = ", sex)
#这里注册
interface_account.InterfaceRegister(DB,unionid,'111111',nickname,headimgurl,True)
#logging.info("[userinfo]nickname = %s, , headimgurl = %s , sex = %s " % (nickname,headimgurl,str(sex)))
#application.App.Redis_Wechat.SaveCode(state,nickname,headimgurl,str(sex),unionid)
application.App.Redis_Wechat.SavUserName(state, unionid)
self.write("Wechat Login To Bind Phone")
else:
self.write("Wechat Login Error 1")
else:
self.write("Wechat Login Succ")
application.App.Redis_Wechat.SavUserName(state,unionid)
#{'openid': 'oWlIt5unXOcY1NxIMZkButjOPbqs', 'nickname': 'lyyym', 'sex': 0, 'language': '', 'city': '', 'province': '', 'country': '',
# 'headimgurl': 'https://thirdwx.qlogo.cn/mmopen/vi_32/Q0j4TwGTfTKOjmsCYpuqtJZ7LTnkYQfaoGic651kicHlTLiaLicfSxfJuLYAuJDib6n8f1oc5Cicj6NKSOdHHzjpCulw/132', 'privilege': [],
# 'unionid': 'o8eNzw__wuk6KgOWP43xEep4QtxQ'}
#logging.info("[userinfo]json_userinfo = %s" % json_userinfo)
DB.close()
else:
self.write("Wechat Login Error 2")
else:
self.write("Wechat Login Error 3")
#print("call back get code = " , code)
def post(self):
print("call back post")
class WechatLoginRequest(BaseHandler):
def get(self):
#json_data = interface_account.WechatLogin()
#self.write(json_data)
state = self.get_argument("state")
codeUrl = application.App.WechatLogin.get_code_url(state)
logging.info("[wechatLogin] codeUrl = %s , state = %s" % (codeUrl,state))
#codeUrl = https://open.weixin.qq.com/connect/qrconnect?appid=wx74b1fd3e0df1b73a&redirect_uri=http%3A%2F%2F29w1v17148.qicp.vip%2Fwechatkfz%2Fcodeget&response_type=code&scope=snsapi_login&state=18740487328 ,
# state = 18740487328
self.redirect(codeUrl)
| 49.731183
| 361
| 0.620324
|
6b5f71ca96ed30f383806df4f641ed6dfaaaed38
| 9,334
|
py
|
Python
|
datamodels/scripts/find_me_another.py
|
mwregan2/MiriTE
|
6b65939454db60bf10619d50fcb5769d23598b76
|
[
"CNRI-Python"
] | null | null | null |
datamodels/scripts/find_me_another.py
|
mwregan2/MiriTE
|
6b65939454db60bf10619d50fcb5769d23598b76
|
[
"CNRI-Python"
] | 24
|
2019-08-09T15:03:20.000Z
|
2022-03-04T10:04:48.000Z
|
datamodels/scripts/find_me_another.py
|
mwregan2/MiriTE
|
6b65939454db60bf10619d50fcb5769d23598b76
|
[
"CNRI-Python"
] | 4
|
2019-06-16T15:03:23.000Z
|
2020-12-02T19:51:52.000Z
|
#!/usr/bin/env python
#
# :History:
#
# 21 Jun 2016: Created.
# 20 Jan 2017: Replaced "clobber" parameter with "overwrite".
# 17 Oct 2018: Metadata wildcards now use 'N/A' instead of 'ANY'.
#
# @author: Steven Beard (UKATC)
#
"""
Script `find_me_another` takes a calibration data product, or a file
obtained from the JWST calibration reference system, and finds another
example from within the MIRI CDP repository.
A typical use of this script would be to replace a calibration file
extracted from the CRDS with a more recent version from the MIRI CDP
repository (before the new version has had a chance to be copied to the
CRDS).
Note that matching CDP files are extracted from the ftp repository and
are saved in the local CDP cache. A copy of this file is written to
the name specified in the outputfile parameter. A copy is written ONLY
if the input file is a recognised CDP and the file extracted from the
ftp repository has a different version number.
The following command arguments are defined by position::
inputfile[0]
The path+name of the file to be read as the example.
outputfile[1]
The path+name of the new file to be written.
Optional. If not given, defaults to <inputfile>_new.fits.
The command also takes the following options::
--datatype <type-string>
The name of the data type to be used to read the product.
If specified, this option overrides the TYPE keyword
contained in the input file.
--cdprelease
The CDP release from which the new CDP file is to be imported.
Defaults to the latest release.
--cdpversion
The CDP version from which the new CDP file is to be imported.
Defaults to the latest version.
--cdpsubversion
The CDP subversion from which the new CDP file is to be imported.
Defaults to the latest subversion.
--verbose or -v
Print the new model before saving it.
--overwrite or -o
Overwrite any existing FITS file.
"""
import optparse
import sys, time
import warnings
import astropy.io.fits as pyfits
from miri.datamodels.cdplib import get_cdp
def get_cdp_metadata( filename ):
"""
Helper function which extracts the CDP metadata from the FITS header
of a file.
"""
hdulist = None
try:
hdulist = pyfits.open( filename )
if hdulist is not None:
header = hdulist[0].header
header_keys = list(header.keys())
if 'REFTYPE' in header or 'REFTYPE' in header_keys:
# There is a new data type keyword in the header.
datatype = header['REFTYPE']
elif 'TYPE' in header or 'TYPE' in header_keys:
# There is an old data type keyword in the header.
datatype = header['TYPE']
else:
datatype = ''
if 'DETECTOR' in header or 'DETECTOR' in header_keys:
# There is a detector keyword in the header.
detector = header['DETECTOR']
else:
detector = 'N/A'
if 'READPATT' in header or 'READPATT' in header_keys:
# There is a detector keyword in the header.
readpatt = header['READPATT']
else:
readpatt = 'N/A'
if 'SUBARRAY' in header or 'SUBARRAY' in header_keys:
# There is a detector keyword in the header.
subarray = header['SUBARRAY']
else:
subarray = 'N/A'
if 'CHANNEL' in header or 'CHANNEL' in header_keys:
# There is a filter keyword in the header.
channel = header['CHANNEL']
else:
channel = 'N/A'
if 'BAND' in header or 'BAND' in header_keys:
# There is a filter keyword in the header.
band = header['BAND']
else:
band = 'N/A'
if 'FILTER' in header or 'FILTER' in header_keys:
# There is a filter keyword in the header.
mirifilter = header['FILTER']
else:
mirifilter = 'N/A'
if 'VERSION' in header or 'VERSION' in header_keys:
# There is a CDP version keyword in the header.
version = header['VERSION']
else:
version = ''
except Exception as e:
strg = "Failed to open FITS file, \'%s\'\n" % filename
strg += " %s: %s" % (e.__class__.__name__, str(e))
raise IOError(strg)
finally:
if hdulist is not None:
hdulist.close()
del hdulist
return (datatype, detector, readpatt, subarray, channel, band,
mirifilter, version)
if __name__ == "__main__":
# Parse arguments
help_text = __doc__
usage = "%prog [opt] inputfile [outputfile]\n"
usage += "Finds another (usually more recent) version of any "
usage += "MIRI calibration data product."
parser = optparse.OptionParser(usage)
parser.add_option("", "--datatype", dest="datatype", type="string",
default=None, help="Data type to use (overriding TYPE)"
)
parser.add_option("", "--cdprelease", dest="cdprelease", type="string",
default=None, help="CDP release to be searched"
)
parser.add_option("", "--cdpversion", dest="cdpversion", type="string",
default=None, help="CDP version to be searched"
)
parser.add_option("", "--cdpsubversion", dest="cdpsubversion", type="string",
default=None, help="CDP subversion to be searched"
)
parser.add_option("-v", "--verbose", dest="verb", action="store_true",
help="Verbose mode"
)
parser.add_option("-o", "--overwrite", dest="overwrite", action="store_true",
help="Overwrite the copy of the file if it already exists"
)
(options, args) = parser.parse_args()
try:
inputfile = args[0]
if len(args) > 1:
outputfile = args[1]
else:
outputfile = inputfile + "_new.fits"
except IndexError:
print(help_text)
time.sleep(1) # Ensure help text appears before error messages.
parser.error("Not enough arguments provided")
sys.exit(1)
verb = options.verb
overwrite = options.overwrite
if options.datatype:
# Use the data type specified
datatype = str(options.datatype)
print("Forcing the data model to be opened with type \'%s\'" % datatype)
else:
datatype = ''
if options.cdprelease:
cdprelease = options.cdprelease
else:
cdprelease = None
if options.cdpversion:
cdpversion = options.cdpversion
else:
cdpversion = None
if options.cdpsubversion:
cdpsubversion = options.cdpsubversion
else:
cdpsubversion = None
# Obtain metadata from the example data model.
(datatype, detector, readpatt, subarray, channel, band, \
mirifilter, version) = get_cdp_metadata( inputfile )
if datatype:
# Attempt to find an alternative version of this data model
strg = "Searching for a " + str(datatype) + " CDP"
if detector != 'ANY' and detector != 'N/A':
strg += ", DETECTOR=" + str(detector)
if readpatt != 'ANY' and readpatt != 'N/A':
strg += ", READPATT=" + str(readpatt)
if subarray != 'ANY' and subarray != 'N/A':
strg += ", SUBARRAY=" + str(subarray)
if (band != 'ANY' and band != 'N/A') or (channel != 'ANY' and channel != 'N/A'):
strg += ", BAND=" + str(band) + ", CHANNEL=" + str(channel)
if mirifilter != 'ANY' and mirifilter != 'N/A':
strg += ", FILTER=" + str(mirifilter)
strg += "..."
print(strg)
newmodel = get_cdp(datatype, model='FM', detector=detector,
readpatt=readpatt, channel=channel,
band=band, mirifilter=mirifilter, subarray=subarray,
integration=None,
cdprelease=None, cdpversion=None, cdpsubversion=None,
ftp_host=None, ftp_path=None, ftp_user='miri',
ftp_passwd='', local_path=None, cdp_env_name='CDP_DIR',
miri_env_name='MIRI_ENV', fail_message=True)
# If this has worked, check the new model is different from the original
# and, if so, save the new model to a new file.
if newmodel is not None:
if verb:
print(newmodel)
newversion = newmodel.meta.version
if newversion != version:
print("Saving new version of this CDP to %s." % outputfile)
newmodel.save( outputfile, overwrite=overwrite )
else:
strg = "New CDP has exactly the same version number (%s)." % \
newversion
strg += " Not saved."
print(strg)
else:
print("Matching CDP could not be found.")
del newmodel
else:
print("Input file does not look like a MIRI CDP. Data type unknown.")
| 36.748031
| 88
| 0.581208
|
e89e8240e779b79ac594feebb4ccb1ef9fb6908f
| 4,580
|
py
|
Python
|
Tools/convert2.py
|
tianqichongzhen/ProgramPrac
|
5e575f394179709a4964483308b91796c341e45f
|
[
"Apache-2.0"
] | 2
|
2019-01-12T13:54:52.000Z
|
2021-09-13T12:47:25.000Z
|
Tools/convert2.py
|
Johnwei386/Warehouse
|
5e575f394179709a4964483308b91796c341e45f
|
[
"Apache-2.0"
] | null | null | null |
Tools/convert2.py
|
Johnwei386/Warehouse
|
5e575f394179709a4964483308b91796c341e45f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# _*_ coding:utf8 _*_
# python2.7
import os
import argparse
# 常用中文标点符号unicode编码
puncs = set([u'\u3002', u'\uFF1F', u'\uFF01', u'\uFF0C', u'\u3001',
u'\uFF1B', u'\uFF1A', u'\u300C', u'\u300D', u'\u300E',
u'\u300F', u'\u2018', u'\u2019', u'\u201C', u'\u201D',
u'\uFF08', u'\uFF09', u'\u3014', u'\u3015', u'\u3010',
u'\u3011', u'\u2014', u'\u2026', u'\u2013', u'\uFF0E'
u'\u300A', u'\u300B', u'\u3008', u'\u3009', u'"'])
def is_chinese(uchar):
"""判断一个unicode是否是汉字"""
if uchar >= u'\u4e00' and uchar<=u'\u9fa5':
return True
elif uchar in puncs: # 中文标点符号也算中文字符
return True
else:
return False
def is_number(uchar):
"""判断一个unicode是否是数字"""
if uchar >= u'\u0030' and uchar<=u'\u0039':
return True
else:
return False
def is_alphabet(uchar):
"""判断一个unicode是否是英文字母"""
if (uchar >= u'\u0041' and uchar <= u'\u005a') or (uchar >= u'\u0061' and uchar <= u'\u007a'):
return True
else:
return False
def test_chartype_judge(rstr):
print(rstr)
ustr = rstr.decode('utf8')
line = list(ustr)
chinese = []
english = []
number = []
special = []
for word in line:
if is_chinese(word):
chinese.append(word)
elif is_alphabet(word):
english.append(word)
elif is_number(word):
number.append(word)
else:
special.append(word)
print(line)
print(' ')
print('chines:', chinese)
print('english:', english)
print('number:', number)
print('special:', special)
def discriminator(chars):
ret = []
non_chinese = []
for char in chars:
if not is_chinese(char):
non_chinese.append(char)
else:
if non_chinese:
ret.append(non_chinese)
non_chinese = []
ret.append(char)
if non_chinese:
ret.append(non_chinese)
return ret
def combine(part):
if isinstance(part, list):
ret = ''.join(part)
else:
ret = part
return ret
def all_char_not_ch(chars):
ret = True
for char in chars:
if is_chinese(char):
ret = False
break
return ret
def transpose(args):
# 转换分词数据集为.conll格式的数据
source = args.sdata # 源数据集名称
target = args.tdata # 要转换的目标数据集名称
assert source is not None
assert target is not None
delimiter = args.delimiter # 分词之间的分隔符
DIR = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.join(DIR, source)
target_path = os.path.join(DIR, target)
assert os.path.exists(data_path),"data path is not existed!"
sentences = []
with open(data_path, 'r') as f:
sentence = []
buff = []
for line in f:
words = line.decode('utf8')
#print(words)
#assert False
for char in words:
if is_chinese(char): # 当前字符为中文字符
if buff: # buff不为空
sentence.append(''.join(buff))
buff = []
sentence.append(char)
else:
# 非中文字符作为一个整体
buff.append(char)
#print(sentence)
#assert False
sentences.append(sentence)
sentence = []
#print(''.join(sentences[0]))
#print(sentences[0])
#assert False
with open(target_path, 'w') as f:
for sen in sentences:
#print(sen)
line = ''
for char in sen:
line = line + ' ' + char.encode('utf8')
line.strip()
f.write(line)
#print(line)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Turn datasets to .coll format')
parser.add_argument('-s', '--sdata',
type=str,
default=None,
help='location of source data corpus name')
parser.add_argument('-t', '--tdata',
type=str,
default=None,
help='being converted corpus name')
parser.add_argument('-d', '--delimiter',
type=str,
default=None,
help='set dataset delimiter default: one space')
args = parser.parse_args()
transpose(args)
#str1 = "绿箭侠123You have(*´ω`*) o(=•ω•=)m你好、?‘’;"
#test_chartype_judge(str1)
| 27.42515
| 98
| 0.513974
|
51d16229be1fe396fcb9dcf6e7107aebf92e7c84
| 4,156
|
py
|
Python
|
server/server/world/zone.py
|
dablum25/test-game
|
28a55beea42c593ef18419dd72abc660a12477d1
|
[
"MIT"
] | null | null | null |
server/server/world/zone.py
|
dablum25/test-game
|
28a55beea42c593ef18419dd72abc660a12477d1
|
[
"MIT"
] | null | null | null |
server/server/world/zone.py
|
dablum25/test-game
|
28a55beea42c593ef18419dd72abc660a12477d1
|
[
"MIT"
] | null | null | null |
import pytmx
import time
from astar import *
from monsterspawn import MonsterSpawn
from npcspawn import NpcSpawn
from warp import Warp
import ConfigParser
def load_zones(world):
config = ConfigParser.RawConfigParser()
config.read('data/zones.ini')
for name in config.sections():
title = config.get(name,'title')
source = config.get(name,'source')
north = config.get(name,'north')
south = config.get(name,'south')
east = config.get(name,'east')
west = config.get(name,'west')
borders = { 'north': north, 'south': south, 'east': east, 'west': west }
world.zones[name] = Zone(name, source, title, borders, world)
class Zone:
'''
Zone with astar pathfinding.
'''
def __init__(self, name, source, title, borders, world):
self.name = name
self.source = source
self.title = title
self.world = world
self.borders = borders
# Logic to load zone file
self.data = pytmx.TiledMap(source)
self.width = self.data.width
self.height = self.data.height
self.blocked = self.data.layers.index(self.data.get_layer_by_name('blocked'))
self.graph = GridWithWeights(self.width, self.height)
self.graph.walls = [ (x,self.height - y - 1) for x,y,gid in self.data.layers[self.blocked].tiles() ]
for o in self.data.objects:
if o.type == 'monster_spawn':
x = int(o.x/32)
y = self.height - int(o.y/32) - 1
w = int(o.width/32)
h = int(o.height/32)
max_spawn = int(o.properties['max_spawn'])
spawn_delay = float(o.properties['spawn_delay'])
monster_name = o.name
# Create monster spawn
MonsterSpawn(monster_name, x, y, w, h, self.name, max_spawn, spawn_delay, self.world)
if o.type == 'npc_spawn':
x = int(o.x/32)
y = self.height - int(o.y/32) - 1
w = int(o.width/32)
h = int(o.height/32)
max_spawn = int(o.properties['max_spawn'])
spawn_delay = float(o.properties['spawn_delay'])
npc_name = o.name
# Create npc spawn
NpcSpawn(npc_name, x, y, w, h, self.name, max_spawn, spawn_delay, self.world)
if o.type == 'warp':
x = int(o.x/32)
y = self.height - int(o.y/32) - 1
#w = int(o.width/32)
#h = int(o.height/32)
end_zone = o.properties['end_zone']
end_x = int(o.properties['end_x'])
end_y = int(o.properties['end_y'])
self.world.warps.append(Warp(self.name, x, y, end_zone, end_x, end_y))
print "Loaded ZONE",self.name
def heuristic(self, a,b):
(x1,y1) = a
(x2,y2) = b
return abs(x1 - x2) + abs(y1 - y2)
def get_path(self, start, goal):
frontier = PriorityQueue()
frontier.put(start, 0)
came_from = {}
cost_so_far = {}
came_from[start] = None
cost_so_far[start] = 0
if start == goal:
return []
if goal[0] > self.width:
return []
if goal[0] < 0:
return []
if goal[1] > self.height:
return []
if goal[1] < 0:
return []
if goal in self.graph.walls:
return []
while not frontier.empty():
current = frontier.get()
if current == goal:
break
for next in self.graph.neighbors(current):
new_cost = cost_so_far[current] + self.graph.cost(current, next)
if next not in cost_so_far or new_cost < cost_so_far[next]:
cost_so_far[next] = new_cost
priority = new_cost + self.heuristic(goal, next)
frontier.put(next, priority)
came_from[next] = current
path = [ current ]
while current != start:
current = came_from[current]
path.append(current)
path.reverse()
path.pop(0)
return path
def open_at(self, x, y):
'''
Determine if x,y are open for movement.
'''
if x > self.width - 1:
return False
elif x < 0:
return False
elif y > self.height - 1:
return False
elif y < 0:
return False
elif self.data.get_tile_gid(x, self.height - y - 1, self.blocked) > 0:
return False
else:
return True
| 26.138365
| 105
| 0.587103
|
579d82619bcd958efef2ca6c3d62e2f8d84c0916
| 18,167
|
py
|
Python
|
pipelineutilities/pipelineutilities/search_files.py
|
ndlib/mellon-manifest-pipeline
|
aa90494e73fbc30ce701771ac653d28d533217db
|
[
"Apache-2.0"
] | 1
|
2021-06-27T15:16:13.000Z
|
2021-06-27T15:16:13.000Z
|
pipelineutilities/pipelineutilities/search_files.py
|
ndlib/marble-manifest-pipeline
|
abc036e4c81a8a5e938373a43153e2492a17cbf8
|
[
"Apache-2.0"
] | 8
|
2019-11-05T18:58:23.000Z
|
2021-09-03T14:54:42.000Z
|
pipelineutilities/pipelineutilities/search_files.py
|
ndlib/mellon-manifest-pipeline
|
aa90494e73fbc30ce701771ac653d28d533217db
|
[
"Apache-2.0"
] | null | null | null |
import boto3
import re
import os
from datetime import datetime, date, timedelta, timezone
from pathlib import Path
from urllib.parse import urlparse
# saved live path
# "libnd-smb-rbsc": ["digital/bookreader", "collections/ead_xml/images"]
bucket_to_url = {
# "libnd-smb-rbsc": 'https://rarebooks.library.nd.edu/',
# "rbsc-test-files": 'https://rarebooks.library.nd.edu/',
"libnd-smb-marble": "https://marbleb-multimedia.library.nd.edu/",
"mlk-multimedia-333680067100": 'https://mlk-multimedia.library.nd.edu/',
"steve-multimedia-333680067100": 'https://steve-multimedia.libraries.nd.edu/',
"sm-multimedia-333680067100": 'https://sm-multimedia.libraries.nd.edu/',
"marble-multimedia-333680067100": 'https://marble-multimedia.library.nd.edu/',
"marble-multimedia-test-333680067100": 'https://marble-multimedia-test.library.nd.edu/',
"marble-multimedia-230391840102": 'https://marble-multimedia.library.nd.edu/',
"marble-multimedia-test-230391840102": 'https://marble-multimedia-test.library.nd.edu/',
"marbleb-multimedia-230391840102": 'https://marble-multimedia.library.nd.edu/',
"marbleb-multimedia-test-230391840102": 'https://marble-multimedia-test.library.nd.edu/',
}
folders_to_crawl = [
# "digital",
# "collections/ead_xml/images",
# "audio",
# "video"
"public-access",
"Aleph",
"ArchivesSpace",
"Curate",
# "other"
]
# patterns we skip if the file matches these
skip_files = [
r"^.*[.]072[.]jpg$",
r"^.*[.]100[.]jpg$",
r"^[.]_.*$",
r"^_.*$",
]
# patterns we skip if the folder matches these
skip_folders = [
r"^.*resource.frk.*$",
r"^.*resourc[0-9].frk.*$",
]
# patterns that corrispond to urls we can parse
valid_urls = [
r"http[s]?:[/]{2}rarebooks[.]library.*",
r"http[s]?:[/]{2}rarebooks[.]nd.*",
r"http[s]?:[/]{2}.*-multimedia[.]library.*",
r"http[s]?:[/]{2}.*-multimedia[.]nd.*",
]
regexps = {
# "ead_xml": [
# r"([a-zA-Z]{3}-[a-zA-Z]{2}_[0-9]{4}-[0-9]+)",
# r"([a-zA-Z]{3}_[0-9]{2,4}-[0-9]+)",
# r"(^[0-9]{4}-[0-9]{2})",
# ],
# "MARBLE-images": [
# r"([a-zA-Z]{3}_[0-9]{9})",
# r"([a-zA-Z]{3}-[a-zA-Z]{3}_[0-9]{4})",
# r"([a-zA-Z]{3}-[a-zA-Z]{3}_[0-9]{3}-[0-9]{3})",
# r"(^[a-zA-Z]{4}_[0-9]{4}-[0-9]{2})",
# ],
# "letters": [
# r"(^[0-9]{4}-[0-9]{2})",
# ],
# "colonial_american": [
# r"(^[0-9]{4}-[0-9]{2})",
# ],
# "diaries_journals": [
# r"(^[0-9]{4})",
# r"([a-zA-Z]{3}-[a-zA-Z]{2}_[0-9]{4})",
# ],
# "papers_personal": [
# r"(^[0-9]{4}-[0-9]{2})",
# ],
# "digital": [
# r"(^El_Duende)",
# r"(^Newberry-Case_[a-zA-Z]{2}_[0-9]{3})",
# r"([a-zA-Z]{3}-[a-zA-Z]{2}_[0-9]{4}-[0-9]+)",
# r"(^.*_(?:[0-9]{4}|[a-zA-Z][0-9]{1,3}))",
# r"(^[0-9]{4})",
# ],
# "audio": [
# r"/([^/]*)/[^/]*\.mp3", # Gets the directory the .mp3 is in
# r"/([^/]*)/[^/]*\.wav", # Gets the directory the .wav is in
# ],
# "video": [
# r"/([^/]*)/[^/]*\.mp4", # Gets the directory the .mp4 is in
# ],
"Aleph": [
r"([a-zA-Z]{3}_[0-9]{9})",
r"([a-zA-Z]{3}-[a-zA-Z]{3}_[0-9]{4})",
r"([a-zA-Z]{3}-[a-zA-Z]{3}_[0-9]{3}-[0-9]{3})",
r"(^[a-zA-Z]{4}_[0-9]{4}-[0-9]{2})",
],
"ArchivesSpace": [
r"(^[a-zA-Z0-9]+-[a-zA-Z0-9]+_[a-zA-Z0-9]+-[0-9]+)",
r"(^[a-zA-Z0-9]+-[a-zA-Z0-9]+_[a-zA-Z0-9]+)",
r"(^[a-zA-Z]{3}_[a-zA-Z0-9]+)",
r"(^[a-zA-Z]{4}_[0-9]{4}-[0-9]+)",
],
"Curate": [
r"/([^/]*)/([^/]*)/[^/]*\.tiff?",
],
"other": [
r"([a-zA-Z]{3}_[0-9]{9})",
r"([a-zA-Z]{3}-[a-zA-Z]{3}_[0-9]{4})",
r"([a-zA-Z]{3}-[a-zA-Z]{3}_[0-9]{3}-[0-9]{3})",
r"(^[a-zA-Z]{4}_[0-9]{4}-[0-9]{2})",
],
"public-access": [ # assumption is that format will be: public-access/media/parentId/filename we want to return media/parentId/filename, and will chose parentId as GroupId
r"/([^/]*)/([^/]*)/[^/]*\.mp4", # Gets the directory the file is in
r"/([^/]*)/([^/]*)/[^/]*\.mp3", # Gets the directory the file is in
r"/([^/]*)/([^/]*)/[^/]*\.wav", # Gets the directory the file is in
r"/([^/]*)/([^/]*)/[^/]*\.pdf", # Gets the directory the file is in
],
}
# Regexps for these folders should use the full path as input instead of just the filename
full_path_folders = [
# "audio",
# "video",
"public-access"
]
# urls in this list do not have a group note in the output of the parse_filename function
urls_without_a_group = [
r"^[a-zA-Z]+_[a-zA-Z][0-9]{2}.*$", # CodeLat_b04
]
folder_exposed_through_cdn = 'public-access'
def id_from_url(url):
if not url_can_be_harvested(url):
return False
url = urlparse(url)
file = os.path.basename(url.path)
if file_should_be_skipped(url.path):
return False
test_expressions = []
use_full_path = False
for key in regexps:
if key in url.path:
test_expressions = regexps[key]
if key in full_path_folders:
use_full_path = True
break
for exp in test_expressions:
test = re.search(exp, url.path if use_full_path else file)
if test:
if use_full_path:
return test.group(2)
return test.group(1)
return False
def get_matching_s3_objects(bucket, prefix="", suffix=""):
"""
Generate objects in an S3 bucket.
:param bucket: Name of the S3 bucket.
:param prefix: Only fetch objects whose key starts with
this prefix (optional).
:param suffix: Only fetch objects whose keys end with
this suffix (optional).
"""
s3 = boto3.client("s3")
paginator = s3.get_paginator("list_objects_v2")
kwargs = {'Bucket': bucket}
# We can pass the prefix directly to the S3 API. If the user has passed
# a tuple or list of prefixes, we go through them one by one.
if isinstance(prefix, str):
prefixes = (prefix, )
else:
prefixes = prefix
for key_prefix in prefixes:
kwargs["Prefix"] = key_prefix
for page in paginator.paginate(**kwargs):
try:
contents = page["Contents"]
except KeyError:
return
for obj in contents:
key = obj["Key"]
if key.endswith(suffix):
yield obj
def url_can_be_harvested(url):
for exp in valid_urls:
if re.match(exp, url):
return True
return False
def file_should_be_skipped(file_path):
file_name = os.path.basename(file_path)
folder_name = os.path.dirname(file_path)
for exp in skip_files:
if re.match(exp, file_name):
return True
for exp in skip_folders:
if re.match(exp, folder_name):
return True
return False
def make_label(url, id):
label = url.replace(id, "")
label = label.replace(".jpg", "")
label = label.replace(".tif", "")
label = label.replace(".tiff", "")
label = label.replace(".mp3", "")
label = label.replace(".mp4", "")
label = label.replace(".wav", "")
label = label.replace("-", " ")
label = label.replace("_", " ")
label = label.replace(".", " ")
label = re.sub(' +', ' ', label)
return label.strip()
def _convert_dict_to_camel_case(obj: dict) -> dict:
keys_to_remove = []
for k, v in dict(obj).items():
if re.match("^[A-Z]{1}.*", k):
obj[k[0].lower() + k[1:]] = v
keys_to_remove.append(k)
for k in keys_to_remove:
del obj[k]
return obj
def get_url_from_bucket_plus_key(bucket: str, key: str) -> str:
""" Added to simplify creating url from key, especially in testing environment """
if bucket in bucket_to_url:
return bucket_to_url[bucket] + key
else:
return bucket_to_url["marbleb-multimedia-230391840102"] + key
def crawl_available_files(config: dict, bucket: str):
order_field = {}
print("crawling image files in this bucket: ", bucket)
for directory in folders_to_crawl:
objects = get_matching_s3_objects(bucket, directory)
for obj in objects:
key = obj.get('Key')
if is_tracked_file(key):
url = get_url_from_bucket_plus_key(bucket, key)
id = id_from_url(url)
if id:
obj = _convert_dict_to_camel_case(obj)
if 'eTag' in obj:
obj['eTag'] = obj['eTag'].replace('"', '') # strip duplicated quotes: {'ETag': '"8b50cfed39b7d8bcb4bd652446fe8adf"'} # noqa: E501
if not order_field.get(id, False):
order_field[id] = {
"fileId": id,
"sourceType": "S3",
"source": bucket,
"lastModified": False,
"directory": os.path.dirname(key),
"files": [],
}
last_modified_iso = obj['lastModified'].isoformat()
obj['lastModified'] = obj['lastModified'].isoformat()
if not order_field[id]["lastModified"] or last_modified_iso > order_field[id]["lastModified"]:
order_field[id]["lastModified"] = last_modified_iso
augement_file_record(obj, id, url, config, bucket)
order_field[id]['files'].append(obj)
return order_field
def list_updated_files(config: dict, bucket: str, minutes_to_test: int):
print("crawling image files in this bucket: ", bucket)
time_threshold_for_processing = determine_time_threshold_for_processing(minutes_to_test).isoformat()
for directory in folders_to_crawl:
files = get_matching_s3_objects(bucket, directory)
for file in files:
if is_tracked_file(file.get('Key')):
url = get_url_from_bucket_plus_key(bucket, file.get('Key'))
id = id_from_url(url)
file = _convert_dict_to_camel_case(file)
if id and file['lastModified'].isoformat() >= time_threshold_for_processing:
augement_file_record(file, id, url, config, bucket)
yield file
def list_all_files(config: dict, bucket: str):
print("crawling image files in this bucket: ", bucket)
for directory in folders_to_crawl:
objects = get_matching_s3_objects(bucket, directory)
for obj in objects:
if is_tracked_file(obj.get('Key')):
url = get_url_from_bucket_plus_key(bucket, obj.get('Key'))
id = key_to_id(obj.get('Key'))
augement_file_record(obj, id, url, config, bucket)
yield obj
def list_all_directories(config: dict, bucket: str):
order_field = {}
print("crawling image files in this bucket: ", bucket)
for directory in folders_to_crawl:
objects = get_matching_s3_objects(bucket, directory)
for obj in objects:
if is_tracked_file(obj.get('Key')):
key = obj.get('Key')
url = get_url_from_bucket_plus_key(bucket, key)
if is_directory(key):
directory = key
else:
directory = os.path.dirname(key)
directory_id = key_to_id(directory)
id = id_from_url(url)
if id:
id = key_to_id(id)
if not order_field.get(directory_id, False):
order_field[directory_id] = {
"id": directory_id,
"path": directory,
"objects": {},
}
if not order_field[directory_id]['objects'].get(id, False):
order_field[directory_id]['objects'][id] = {
"id": id,
"path": directory,
"label": id.replace(directory_id, "").ltrim("-").replace("-", " "),
"directory_id": directory,
"Source": "RBSC" if bucket == config['rbsc-image-bucket'] else "Multimedia",
"LastModified": False,
"files": [],
}
last_modified_iso = obj['LastModified'].isoformat()
obj['LastModified'] = obj['LastModified'].isoformat()
if not order_field[directory_id]['objects'][id]["LastModified"] or last_modified_iso > order_field[directory_id]['objects'][id]["LastModified"]:
order_field[directory_id]['objects'][id]["LastModified"] = last_modified_iso
augement_file_record(obj, id, url, config, bucket)
order_field[directory_id]['objects'][id]['files'].append(obj)
return order_field
def key_to_id(key):
return key.lstrip("/").replace("/", "-")
def is_directory(file):
return file and re.match(".*[/]$", file) and not re.match("^[.]", file)
def augement_file_record(obj: dict, id: str, url: str, config: dict, bucket: str) -> dict:
''' Note: This was changed 7/2/2021 to rename *.pdf to *.tif so the Marble image processor will deal with pdfs correctly. '''
obj['fileId'] = id
obj['label'] = make_label(url, id)
obj['sourceType'] = 'S3'
obj['source'] = bucket
obj['path'] = obj['key'].replace('public-access/', '')
obj["sourceBucketName"] = bucket
obj["sourceFilePath"] = obj.get('key')
in_cdn_folder_flag = folder_exposed_through_cdn in obj.get('key')
file_extension = os.path.splitext(obj.get('key'))[1].lower()
if bucket == config.get('marble-content-bucket') and in_cdn_folder_flag and is_media_file(config.get('media-file-extensions', []), obj.get('key')):
# references to this file will only be through the CDN, not through S3
obj['filePath'] = obj.get('key').replace('public-access/', '')
obj['sourceType'] = 'Uri'
obj['mediaGroupId'] = id
obj['mediaServer'] = config['media-server-base-url']
obj['mediaResourceId'] = obj.get('filePath').replace('/', '%2F')
obj['sourceUri'] = os.path.join(obj.get('mediaServer'), obj.get('mediaResourceId'))
obj['typeOfData'] = 'Multimedia bucket'
elif (not in_cdn_folder_flag) and (file_extension in config['image-file-extensions'] or file_extension == '.pdf'):
file_path_without_extension = os.path.splitext(obj.get('key'))[0].replace('public-access/', '')
obj['filePath'] = obj.get('key')
if file_extension in ('.jpg', '.tif'):
obj['filePath'] = file_path_without_extension + '.tif'
if file_extension == '.pdf':
update_pdf_fields(obj)
file_extension = '.pdf'
obj['objectFileGroupId'] = id
obj['imageGroupId'] = id
obj['mediaServer'] = config['image-server-base-url']
file_path_no_extension = os.path.join(Path(obj.get('filePath')).parent, Path(obj.get('filePath')).stem)
obj['mediaResourceId'] = file_path_no_extension.replace('/', '%2F')
else:
obj = {}
if obj and file_extension and 'mimeType' not in obj:
obj['mimeType'] = _get_mime_type_given_file_extension(file_extension)
return obj
def determine_time_threshold_for_processing(time_in_min):
""" Creates the datetime object that is used to test all the files against """
time_threshold_for_processing = datetime.utcnow() - timedelta(minutes=time_in_min)
# since this is utc already but there is no timezone add it in so
# the data can be compared to the timze zone aware date in file
return time_threshold_for_processing.replace(tzinfo=timezone.utc)
def is_tracked_file(file):
if file_should_be_skipped(file):
return False
return re.match(r"^.*[.]((jpe?g)|(tiff?)|(pdf)|(wav)|(mp[34]))$", file, re.IGNORECASE)
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
def _get_mime_type_given_file_extension(file_extension: str) -> str:
if not file_extension:
return ""
if file_extension in ['.tif', '.tiff']:
return 'image/tiff'
elif file_extension in ['.pdf']:
return 'application/pdf'
elif file_extension in ['.mp3']:
return 'audio/mpeg'
elif file_extension in ['.mp4']:
return 'video/mp4'
elif file_extension in ['.wav']:
return 'audio/wav'
return ''
def is_media_file(media_file_extensions: list, file_name: str) -> bool:
""" If the file extension is in a list of media_file_extensions, then return True (this is a media file), else return False """
file_extension = Path(file_name).suffix
if file_extension in media_file_extensions:
return True
return False
def update_pdf_fields(standard_json: dict):
fields = ['id', 'filePath', 'description', 'title', 'path']
for field in fields:
if field in standard_json:
standard_json[field] = standard_json.get(field).replace('.pdf', '.tif')
standard_json['mimeType'] = 'image/tiff' # correct the mimeType to reflect tiff
# python -c 'from search_files import *; test()'
def test():
from pipeline_config import setup_pipeline_config
event = {"local": True}
config = setup_pipeline_config(event)
# change to the prod bucket
# config['rbsc-image-bucket'] = "libnd-smb-rbsc"
# config['multimedia-bucket'] = "marble-multimedia-230391840102"
config['marble-content-bucket'] = "libnd-smb-marble"
# data = list_updated_files(config, config['marble-content-bucket'], 1000000)
data = crawl_available_files(config, config['marble-content-bucket'])
for id, value in data.items():
print("results =", id)
# print(value)
return
| 36.70101
| 177
| 0.568283
|
29ee7530c3e344b09045241e0581a9b522494b8c
| 40
|
py
|
Python
|
example.py
|
velerofinance/velero-bot-sdk
|
6952141c1a445825de2fa421a15220d955bc6c87
|
[
"MIT"
] | null | null | null |
example.py
|
velerofinance/velero-bot-sdk
|
6952141c1a445825de2fa421a15220d955bc6c87
|
[
"MIT"
] | null | null | null |
example.py
|
velerofinance/velero-bot-sdk
|
6952141c1a445825de2fa421a15220d955bc6c87
|
[
"MIT"
] | null | null | null |
if __name__ == '__main__':
print(1)
| 13.333333
| 26
| 0.6
|
028cec6a4bca1bfad7d56c2dd5f9b9e8933a0849
| 2,751
|
py
|
Python
|
wagtail/asv_tests/unit/test_view_restrictions.py
|
Arvinwijsman/wagtail
|
83bc6da05d954d6c99eebc53a6667ebf7e8670bf
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/asv_tests/unit/test_view_restrictions.py
|
Arvinwijsman/wagtail
|
83bc6da05d954d6c99eebc53a6667ebf7e8670bf
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/asv_tests/unit/test_view_restrictions.py
|
Arvinwijsman/wagtail
|
83bc6da05d954d6c99eebc53a6667ebf7e8670bf
|
[
"BSD-3-Clause"
] | null | null | null |
from uuid import uuid4
from datetime import datetime, timezone
from unittest import mock
from unittest.mock import patch, MagicMock
from django import forms
from django.contrib.auth.models import Group
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.core.exceptions import ValidationError
from wagtail.core.models import BaseViewRestriction, Page, PageViewRestriction
from wagtail.asv_tests.unit.base_test import BaseTest
from wagtail.admin.forms.view_restrictions import BaseViewRestrictionForm
from wagtail.admin.forms.pages import WagtailAdminPageForm
class TestViewRestrictions(BaseTest):
def setUp(self):
super().setUp()
# Arrange
with mock.patch.object(BaseViewRestrictionForm, '__init__', return_value=None):
self.form = BaseViewRestrictionForm() # pylint: disable=no-value-for-parameter
self.form.fields = MagicMock()
self.form.fields['groups'].widget = MagicMock()
self.form.fields['groups'].queryset = MagicMock()
self.valid_data_password = {
'password': 'unh4ackable',
'restriction_type': BaseViewRestriction.PASSWORD,
}
self.valid_data_groups = {
'restriction_type': BaseViewRestriction.GROUPS,
'groups': 'group1'
}
def test_password_field_valid(self):
'''Tests the happy flow of password form validation '''
# Arrange
self.form.cleaned_data = self.valid_data_password
expected = 'unh4ackable'
# Act
result = self.form.clean_password()
# Assert
assert result == expected
def test_password_field_invalid(self):
'''Tests exception thrown for invalid input. '''
# Arrange
self.form.cleaned_data = self.valid_data_password
self.form.cleaned_data.pop('password')
# Act
with self.assertRaises(ValidationError) as context:
self.form.clean_password()
# Assert
assert context.message == "This field is required."
def test_group_field_valid(self):
'''Tests the happy flow of group form validation '''
# Arrange
self.form.cleaned_data = self.valid_data_groups
expected = 'group1'
# Act
result = self.form.clean_groups()
# Assert
assert result == expected
def test_group_field_invalid(self):
'''Tests exception thrown for invalid input. '''
# Arrange
self.form.cleaned_data = self.valid_data_groups
self.form.cleaned_data.pop('groups')
# Act and assert
with self.assertRaises(ValidationError) as context:
self.form.clean_groups()
| 32.364706
| 91
| 0.672119
|
ae26b0a396a178d5cb89afa9a43d312b7d660a80
| 1,107
|
py
|
Python
|
Malaria Detection using ConvNets/app.py
|
drdataSpp/Spp-End2End-Deep-Learning-Projects
|
84ab4ab58761effa4fe13e51297f584ec03a885d
|
[
"Apache-2.0"
] | null | null | null |
Malaria Detection using ConvNets/app.py
|
drdataSpp/Spp-End2End-Deep-Learning-Projects
|
84ab4ab58761effa4fe13e51297f584ec03a885d
|
[
"Apache-2.0"
] | null | null | null |
Malaria Detection using ConvNets/app.py
|
drdataSpp/Spp-End2End-Deep-Learning-Projects
|
84ab4ab58761effa4fe13e51297f584ec03a885d
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, request, render_template
import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
app = Flask(__name__)
MODEL_PATH ='Malaria-Model-2.h5'
model = load_model(MODEL_PATH)
@app.route('/', methods=['GET'])
def home():
return render_template('index.html')
@app.route('/', methods=['POST'])
def predict():
imagefile = request.files['imagefile']
image_path = "./images/" + imagefile.filename
imagefile.save(image_path)
image = load_img(image_path, target_size=(150, 150))
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
preds = model.predict(image)
if preds[0][0] == 1:
classification = "You are not infected by Malaria 😇"
else:
classification = "You are infected by Malaria. Please consult a doctor."
return render_template('index.html', prediction = classification)
if __name__ == '__main__':
app.run(debug=True, port=3000)
| 29.131579
| 82
| 0.68654
|
ecfcc0fbcc07e9bde4aeb2fe9ffaa6022d86b109
| 58,598
|
py
|
Python
|
stips/instruments/instrument.py
|
york-stsci/STScI-STIPS-1
|
16c439fcea22b2768d462b3e375257f0f050c26a
|
[
"BSD-3-Clause"
] | null | null | null |
stips/instruments/instrument.py
|
york-stsci/STScI-STIPS-1
|
16c439fcea22b2768d462b3e375257f0f050c26a
|
[
"BSD-3-Clause"
] | null | null | null |
stips/instruments/instrument.py
|
york-stsci/STScI-STIPS-1
|
16c439fcea22b2768d462b3e375257f0f050c26a
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import,division
__filetype__ = "base"
#External Modules
import glob, logging, os, shutil, sys, types, uuid
import numpy as np
import synphot as syn
import stsynphot as stsyn
from astropy import units as u
from astropy.io import fits as pyfits
from astropy.table import Table, Column, MaskedColumn
from functools import wraps
from pandeia.engine.custom_exceptions import DataConfigurationError
#Local Modules
from ..stellar_module import StarGenerator
from ..astro_image import AstroImage
from ..utilities import GetStipsData
from ..utilities import GetStipsDataDir
from ..utilities import internet
from ..utilities import OffsetPosition
from ..utilities import read_metadata
from ..utilities import read_table
from ..utilities import SelectParameter
from ..utilities import StipsDataTable
if sys.version_info[0] >= 3:
import builtins
from io import StringIO
else:
import __builtin__
from cStringIO import StringIO
class Instrument(object):
"""
The Instrument class represents a virtual base class which will be implemented as a variety of
JWST, HST, and Roman actual instruments. The Instrument class contains:
detectors : array of detectors, each an AstroImage, and each with its own RA/DEC
filter : string, what filter of the instrument is being observed
out_path : place to put temporary files and the like
"""
def __init__(self, **kwargs):
"""
Instrument. The __init__ function creates a (potentially) empty instrument.
"""
self.COMPFILES = sorted(glob.glob(os.path.join(os.environ["PYSYN_CDBS"],"mtab","*tmc.fits")))
self.GRAPHFILES = sorted(glob.glob(os.path.join(os.environ["PYSYN_CDBS"],"mtab","*tmg.fits")))
self.THERMFILES = sorted(glob.glob(os.path.join(os.environ["PYSYN_CDBS"],"mtab","*tmt.fits")))
if 'logger' in kwargs:
self.logger = kwargs['logger']
else:
self.logger = logging.getLogger('__stips__')
log_level = SelectParameter('log_level', kwargs)
self.logger.setLevel(getattr(logging, log_level))
if not len(self.logger.handlers):
stream_handler = logging.StreamHandler(sys.stderr)
format = '%(asctime)s %(levelname)s: %(message)s'
stream_handler.setFormatter(logging.Formatter(format))
self.logger.addHandler(stream_handler)
self.out_path = SelectParameter('out_path', kwargs)
self.prefix = kwargs.get('prefix', '')
self.cat_type = SelectParameter('cat_type', kwargs)
self.flatfile = GetStipsData(os.path.join("residual_files", self.FLATFILE))
self.darkfile = GetStipsData(os.path.join("residual_files", self.DARKFILE))
self.oversample = SelectParameter('oversample', kwargs)
self.psf_grid_size = SelectParameter('psf_grid_size', kwargs)
self.seed = SelectParameter('seed', kwargs)
self.imgbase = kwargs.get('imgbase', '')
self.ra = kwargs.get('ra', 0.)
self.dec = kwargs.get('dec', 0.)
self.pa = kwargs.get('pa', 0.)
self.distortion = SelectParameter('distortion', kwargs)
self.exptime = kwargs.get('exptime', 1.)
self.small_subarray = kwargs.get('small_subarray', False)
self.filter = None
self.detectors = None
self.psf_commands = kwargs.get('psf_commands', None)
self.instrument = kwargs.get('instrument', "")
self.background_value = SelectParameter('background', kwargs)
self.background_location = SelectParameter('jbt_location', kwargs)
self.custom_background = kwargs.get('custom_background', 0.)
self.CENTRAL_OFFSET = (0., 0., 0.)
self.convolve_size = SelectParameter('convolve_size', kwargs)
self.memmap = SelectParameter('memmap', kwargs)
self.set_celery = kwargs.get('set_celery', None)
self.get_celery = kwargs.get('get_celery', None)
#Adjust # of detectors based on keyword:
n_detectors = int(kwargs.get('detectors', len(self.DETECTOR_OFFSETS)))
self.DETECTOR_OFFSETS = self.DETECTOR_OFFSETS[:n_detectors]
self.OFFSET_NAMES = self.OFFSET_NAMES[:n_detectors]
if hasattr(self, "N_OFFSET"):
self.CENTRAL_OFFSET = self.N_OFFSET[n_detectors]
msg = "{} with {} detectors. Central offset {}"
self._log('info', msg.format(self.DETECTOR, n_detectors,
self.CENTRAL_OFFSET))
@classmethod
def initFromImage(cls, image, **kwargs):
"""
Takes an input AstroImage, and does an add-with-align for every detector present.
If units are present, does a unit conversion.
"""
units = kwargs.get('unit', 'c')
self._log("info","Initializing from with units {}".format(units))
ins = cls(**kwargs)
img = image * ins.convertToCounts(unit,scalex=image.scale[0],scaley=image.scale[1])
self._log("info","Converted image units")
for detector in ins.detectors:
self._log("info","Adding image to detector {}".format(detector.name))
detector.addWithAlignment(img)
self._log("info","Finished initialization")
return ins
@classmethod
def initFromCatalogue(cls,catalogue,**kwargs):
"""
Takes an input catalogue, and observes that catalogue with all detectors.
It currently assumes that the input catalogue has the following columns:
RA: RA of source
DEC: DEC of source
FLUX: flux of source
TYPE: type of source (point, sersic)
N: sersic index
Re: radius containing half of the light of the sersic profile
Phi: angle of the major axis of the sersic profile
Ratio: axial ratio of the Sersic profile
Obtaining the correct values for FLUX (if not done before initialization) is a job for
the subclasses.
"""
self._log("info","Initializing with catalogue {}".format(catalogue))
ins = cls(**kwargs)
self._log("info","Converting catalogue to internal format")
cat = ins.convertCatalogue(catalogue)
for detector in ins.detectors:
self._log("info","Adding image to detector {}".format(detector.name))
detector.addCatalogue(cat, dist=self.distortion)
self._log("info","Finished initialization")
return ins
def reset(self, ra, dec, pa, filter, obs_count, psf=True, detectors=True, celery=None):
"""
Reset instrument parameters.
"""
self._log("info","Resetting")
self.ra = ra
self.dec = dec
self.pa = pa
self.obs_count = obs_count
if filter != self.filter:
if filter not in self.FILTERS:
msg = "Filter {} is not a valid {} filter"
raise ValueError(msg.format(filter, self.instrument))
self.filter = filter
self.background = self.pixel_background
self.photfnu = self.PHOTFNU[self.filter]
self.photplam = self.PHOTPLAM[self.filter]
if hasattr(self, "_bp"):
del self._bp
if detectors:
self.resetDetectors(psf=psf)
def resetDetectors(self, psf=True):
if self.detectors is not None:
del self.detectors
#Create Detectors
self.detectors = []
for offset, name in zip(self.DETECTOR_OFFSETS, self.OFFSET_NAMES):
distortion = None
if self.distortion and hasattr(self, 'DISTORTION'):
distortion = self.DISTORTION[name]
(delta_ra, delta_dec, delta_pa) = offset
delta_ra = (delta_ra - self.CENTRAL_OFFSET[0])/3600.
delta_dec = (delta_dec - self.CENTRAL_OFFSET[1])/3600.
delta_pa = delta_pa - self.CENTRAL_OFFSET[2]
ra,dec = OffsetPosition(self.ra, self.dec, delta_ra, delta_dec)
pa = (self.pa + delta_pa)%360.
hdr = {"DETECTOR":name, "FILTER":self.filter}
msg = "Initialized {} Detector {} with filter {}"
hist = [msg.format(self.instrument, name, self.filter)]
msg = "Creating Detector {} with (RA,DEC,PA) = ({},{},{})"
self._log("info", msg.format(name, ra, dec, pa))
msg = "Creating Detector {} with offset ({},{})"
self._log("info", msg.format(name, delta_ra, delta_dec))
detector = AstroImage(parent=self, ra=ra, dec=dec, pa=pa, psf=psf,
header=hdr, history=hist, detname=name,
distortion=distortion)
self._log("info", "Detector {} created".format(name))
self.detectors.append(detector)
def toFits(self,outfile):
"""
Takes the detectors and turns them into a multi-extension FITS file.
"""
self._log("info","Converting to FITS file")
hdus = [pyfits.PrimaryHDU()]
for detector in self.detectors:
self._log("info","Converting detector {} to FITS extension".format(detector.name))
hdus.append(detector.imageHdu)
hdulist = pyfits.HDUList(hdus)
hdulist.writeto(outfile, overwrite=True)
self._log("info","Created FITS file {}".format(outfile))
def toMosaic(self,outfile):
"""
Creates a single FITS file from each detector, and then uses Montage to create a mosaic
of all of the said files.
"""
if len(self.detectors) > 1:
import montage_wrapper as montage
self._log("info","Converting to FITS mosaic")
tmp_dir = os.path.join(self.out_path,"tmp-"+str(uuid.uuid4()))
os.makedirs(tmp_dir)
tmp_out_dir = os.path.join(self.out_path,"tmp-out-"+str(uuid.uuid4()))
tmp_work_dir = os.path.join(self.out_path,"tmp-work-"+str(uuid.uuid4()))
self.toFits(os.path.join(tmp_dir,"image.fits"))
montage.mosaic(tmp_dir,tmp_out_dir,background_match=True,work_dir=tmp_work_dir)
self._log("info","Mosaic finished running")
shutil.copy(os.path.join(tmp_out_dir,"mosaic.fits"),outfile)
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
if os.path.exists(tmp_out_dir):
shutil.rmtree(tmp_out_dir)
if os.path.exists(tmp_work_dir):
shutil.rmtree(tmp_work_dir)
self._log("info","Created FITS file {} and cleaned up".format(outfile))
return [outfile]
else:
self._log("info","Not creating single-detector mosaic")
return []
def addImage(self, image, unit='c'):
"""
Takes an input AstroImage, and does an add-with-align for every detector present.
If units are present, does a unit conversion.
"""
self._log("info","Adding image with units {}".format(unit))
img = image * self.convertToCounts(unit,scalex=image.scale[0],scaley=image.scale[1])
self._log("info","Converted image count rate")
for detector in self.detectors:
self._log("info","Adding image to detector {}".format(detector.name))
detector.addWithAlignment(img)
self._log("info","Finished adding image")
def addCatalogue(self, catalogue, obs_num, *args, **kwargs):
"""
Takes an input catalogue, and observes that catalogue with all detectors.
It currently assumes that the input catalogue has the following columns:
RA: RA of source
DEC: DEC of source
FLUX: flux of source
TYPE: type of source (point, sersic)
N: sersic index
Re: radius containing half of the light of the sersic profile
Phi: angle of the major axis of the sersic profile
Ratio: axial ratio of the Sersic profile
Obtaining the correct values for FLUX (if not done before initialization) is a job for
the subclasses.
"""
base_state = self.getState()
self._log("info","Adding catalogue {}".format(catalogue))
self.updateState(base_state + "<br /><span class='indented'>Converting Catalogue to Internal Format</span>")
cat = self.convertCatalogue(catalogue, obs_num)
self._log("info","Finished converting catalogue to internal format")
cats = [cat]
for detector in self.detectors:
self.updateState(base_state + "<br /><span class='indented'>Detector {}</span>".format(detector.name))
self._log("info","Adding catalogue to detector {}".format(detector.name))
cats.append(detector.addCatalogue(cat, dist=self.distortion, *args, **kwargs))
self.updateState(base_state)
return cats
self._log("info","Finished Adding Catalogue")
def addTable(self, table, table_type, *args, **kwargs):
"""
Takes an input table (still in memory), and observes that table with all detectors.
It assumes that the table type is a supported type.
Obtaining the correct values for FLUX (if not done before initialization) is a job for
the subclasses.
"""
self._log("info","Adding {} table".format(table_type))
conversion_fn = self.getTableFunction(table_type)
internal_table, cache = conversion_fn(table, self.bandpass)
self._log("info","Finished converting table to internal format")
tables = [internal_table]
for detector in self.detectors:
self._log("info","Adding table to detector {}".format(detector.name))
tables.append(detector.addTable(internal_table, dist=self.distortion))
self._log("info","Finished Adding Catalogue")
return tables
def convertToCounts(self,unit,scalex=None,scaley=None):
"""
Convert input to Counts.
unit: one of 'p' (photons/s), 'e' (erg/s), 'c' (counts/s), 'j' (Jansky), 's' (W/m/m^2/Sr)
scale: needed for surface brightness conversions. Arcseconds/pixel
returns: factor (multiplicative conversion factor)
"""
units = ('p','e','c','j','s')
if unit not in units: raise ValueError("Unit {} is not one of {}".format(unit,units))
if unit == 'c':
return 1.
elif unit == 'j':
return 1./self.photfnu
elif unit =='p':
freq = 299792458000000. / self.photplam #c in um/s
energy = 6.6260755e-27 * freq # h in erg*s to give energy in ergs
return 1.e23 * energy / (self.photfnu * self.AREA * freq)
elif unit == 'e':
freq = 299792458000000. / self.photplam #c in um/s
return 1.e23 / (self.photfnu * self.AREA * freq)
else: #unit == 's'
#W/m/m^2/Sr -> Jy = 1.e14 * photplam^2 * (scalex*scaley/206265.^2) / 3.e8
# Jy -> counts = 1./photfnu
# Combined as follows
return 1.e14 * self.photplam**2 * (scalex*scaley/42545250225.) / (3.e8 * self.photfnu)
def convertCatalogue(self, catalogue, obs_num):
"""
Converts a catalogue to the expected format for AstroImage, including doing unit conversions
of columns if necessary. Acceptable formats are:
- Phoenix (models from the Phoenix stellar grid)
- BC95 (galaxy models from BC95)
- Internal (has columns RA/DEC/FLUX/TYPE/N/Re/Phi/Ratio/ID/Notes)
- Mixed (has columns RA/DEC/FLUX/UNITS/TYPE/N/Re/Phi/Ratio/ID/Notes)
- Generic (has columns RA/DEC/FILTER where FILTER == self.filter (and possibly also
other filters)
catalogue: catalogue name of input catalogue
returns: cat: new catalogue in Internal format
"""
(in_cat_path, in_cat_name) = os.path.split(catalogue)
(in_cat_base, ext) = os.path.splitext(in_cat_name)
obs_cat_name = "{}_{:02d}_conv_{}.{}".format(in_cat_base, obs_num, self.filter, self.cat_type)
obsname = os.path.join(self.out_path, obs_cat_name)
in_data_table = StipsDataTable.dataTableFromFile(catalogue)
cols = in_data_table.columns
if "keywords" in in_data_table.meta:
meta = {k.lower():v['value'] for k,v in in_data_table.meta['keywords'].items()}
else:
meta = {k.lower():v for k,v in in_data_table.meta.items()}
#Check for built-in metadata
table_type = ""
# if 'keywords' in in_meta:
# if 'type' in in_meta['keywords']:
# table_type = in_meta['keywords']['type']['value']
if 'type' in meta:
table_type = meta['type']
if table_type in ['phoenix', 'phoenix_realtime', 'bc95']:
pass
elif table_type == 'internal':
filter = meta['filter'].lower()
# filter = t.meta['keywords']['filter']['value'].lower()
if filter != self.filter.lower():
raise ValueError("Adding catalogue with filter {} to {} {}".format(filter, self.DETECTOR, self.filter))
return catalogue
elif table_type == 'mixed':
filter = meta['filter'].lower()
# filter = t.meta['keywords']['filter']['value'].lower()
if filter != self.filter.lower():
raise ValueError("Adding catalogue with filter {} to {} {}".format(filter, self.DETECTOR, self.filter))
elif table_type == 'multifilter':
if self.filter.lower() not in [c.lower() for c in in_data_table.columns]:
raise ValueError("Adding catalogue with filters {} to {} {}".format(in_data_table.columns, self.DETECTOR, self.filter))
else: #check for necessary columns
#We want RA, DEC, and count rate in the appropriate filter
if 'ra' not in cols or 'dec' not in cols or self.filter.lower() not in [c.lower() for c in cols]:
raise ValueError("Can't parse catalogue without proper columns")
return self.handleConversion(catalogue, table_type, obsname)
def getTableFunction(self, table_type):
if table_type == 'phoenix':
return self.readPhoenixTable
elif table_type == 'phoenix_realtime':
return self.readPhoenixRealtimeTable
elif table_type == 'pandeia':
return self.readPandeiaTable
elif table_type == 'bc95':
return self.readBC95Table
elif table_type == 'mixed':
return self.readMixedTable
elif table_type == 'multifilter':
return self.readMultiTable
return self.readGenericTable
def getTableFormat(self, table_type):
if table_type == 'phoenix':
return {'ra': ' %10g', 'dec': ' %10g', 'flux': ' %12g', 'type': '%6s', 'n': '%4s', 're': '%4s', 'phi': '%4s', 'ratio': '%6s', 'id': '%8d', 'notes': '%-25s'}
elif table_type == 'phoenix_realtime':
return {'ra': ' %10g', 'dec': ' %10g', 'flux': ' %12g', 'type': '%6s', 'n': '%4s', 're': '%4s', 'phi': '%4s', 'ratio': '%6s', 'id': '%8d', 'notes': '%-25s'}
elif table_type == 'pandeia':
return {'ra': ' %10g', 'dec': ' %10g', 'flux': ' %12g', 'type': '%6s', 'n': '%4s', 're': '%4s', 'phi': '%4s', 'ratio': '%6s', 'id': '%8d', 'notes': '%-25s'}
elif table_type == 'bc95':
return {'ra': ' %10g', 'dec': ' %10g', 'flux': ' %12g', 'type': '%6s', 'n': '%6.3g', 're': '%10g', 'phi': ' %10g', 'ratio': '%10g', 'id': '%8d', 'notes': '%-25s'}
elif table_type == 'mixed':
return {}
elif table_type == 'multifilter':
return {}
return {}
def handleConversion(self, catalogue, table_type, obsname):
"""
Converts an input catalogue into an internal format catalogue. Needs the appropriate
function (e.g. readPhoenixTable) in order to call it.
"""
self._log("info","Converting {} catalogue".format(table_type))
self._log("info", "Preparing output table")
if os.path.isfile(obsname):
os.remove(obsname)
cat_function = self.getTableFunction(table_type)
in_data_table = StipsDataTable.dataTableFromFile(catalogue)
out_data_table = StipsDataTable.dataTableFromFile(obsname)
out_data_table.meta = {'name': 'Internal Format Catalogue', 'type': 'internal',
'filter': self.filter}
bp = self.bandpass
cached = -1
current_chunk = in_data_table.read_chunk()
while current_chunk is not None:
self._log("info", "Converting chunk {}".format(in_data_table.chunk))
out_chunk, cached = cat_function(current_chunk, bp, cached)
out_data_table.write_chunk(out_chunk)
current_chunk = in_data_table.read_chunk()
return obsname
def readPhoenixTable(self, table, bp, cached=-1):
"""
Takes a table (or fraction of a table) with the data in the Phoenix catalogue format, and
return an output table (not yet in ascii form) in the internal format, with those sources
in it.
"""
if table is None:
return None
self._log("info","Converting Phoenix Table to Internal format")
if isinstance(table['id'], MaskedColumn):
ids = table['id'].filled()
else:
ids = table['id']
if isinstance(table['dataset'], MaskedColumn):
datasets = table['dataset'].filled()
else:
datasets = table['dataset']
if isinstance(table['ra'], MaskedColumn):
ras = table['ra'].filled()
else:
ras = table['ra']
if isinstance(table['dec'], MaskedColumn):
decs = table['dec'].filled()
else:
decs = table['dec']
if isinstance(table['age'], MaskedColumn):
ages = table['age'].filled()
else:
ages = table['age']
if isinstance(table['metallicity'], MaskedColumn):
metallicities = table['metallicity'].filled()
else:
metallicities = table['metallicity']
if isinstance(table['mass'], MaskedColumn):
masses = table['mass'].filled()
else:
masses = table['mass']
if isinstance(table['distance'], MaskedColumn):
distances = table['distance'].filled()
else:
distances = table['distance']
if isinstance(table['binary'], MaskedColumn):
binaries = table['binary'].filled()
else:
binaries = table['binary']
rates = np.zeros_like(ras)
all_datasets = np.unique(datasets)
self._log("info","{} datasets".format(len(all_datasets)))
for dataset in all_datasets:
idx = np.where(datasets == dataset)
if len(idx[0]) > 0:
stargen = StarGenerator(ages[idx][0], metallicities[idx][0], seed=self.seed, logger=self.logger)
rates[idx] = stargen.make_cluster_rates(masses[idx], self, bp)
del stargen
rates = rates * 100 / (distances**2) #convert absolute to apparent rates
if cached > 0:
rates[0] += cached
cached = -1
#Now, deal with binaries. Remember that if Binary == 1, then the star below is the binary companion.
idx = np.where(binaries==1.0)[0] #Stars with binary companions
idxp = (idx+1) #binary companions
if len(idxp) > 0 and idxp[-1] >= len(rates): #last one is a binary. Cache it.
cached = rates[-1]
idx, idxp = idx[:-1], idxp[:-1]
ids, datasets, ras, decs, ages, metallicities, = ids[:-1], datasets[:-1], ras[:-1], decs[:-1], ages[:-1], metallicities[:-1]
masses, distances, binaries, rates = masses[:-1], distances[:-1], binaries[:-1], rates[:-1]
rates[idx] += rates[idxp] #add count rates together
# Now that we've added rates together, remove the binary companions
ras = np.delete(ras,idxp)
decs = np.delete(decs,idxp)
rates = np.delete(rates,idxp)
ids = np.delete(ids,idxp)
binaries = np.delete(binaries,idxp)
notes = np.empty_like(ras,dtype="S6")
notes[np.where(binaries==1)] = 'Binary'
notes[np.where(binaries!=1)] = 'None'
t = Table()
t['ra'] = Column(data=ras)
t['dec'] = Column(data=decs)
t['flux'] = Column(data=rates)
t['type'] = Column(data=np.full_like(ras,"point",dtype="S6"))
t['n'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['re'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['phi'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['ratio'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['id'] = Column(data=ids)
t['notes'] = Column(data=notes)
return t, cached
def readPhoenixRealtimeTable(self, table, bp, cached=-1):
"""
Converts a set of Phoenix sources specified as (id, ra, dec, T, Z, log(g), apparent) into individual stars, observes them,
and then produces an output catalogue
"""
if table is None:
return None
if isinstance(table['id'], MaskedColumn):
ids = table['id'].filled()
else:
ids = table['id']
if isinstance(table['ra'], MaskedColumn):
ras = table['ra'].filled()
else:
ras = table['ra']
if isinstance(table['dec'], MaskedColumn):
decs = table['dec'].filled()
else:
decs = table['dec']
if isinstance(table['teff'], MaskedColumn):
temps = table['teff'].filled()
else:
temps = table['teff']
if isinstance(table['log_g'], MaskedColumn):
gravs = table['log_g'].filled()
else:
gravs = table['log_g']
if isinstance(table['metallicity'], MaskedColumn):
metallicities = table['metallicity'].filled()
else:
metallicities = table['metallicity']
if isinstance(table['apparent'], MaskedColumn):
apparents = table['apparent'].filled()
else:
apparents = table['apparent']
norm_bp = table.meta['BANDPASS']
self._log("info", "Normalization Bandpass is {} ({})".format(norm_bp, type(norm_bp)))
if norm_bp == '' or norm_bp is None or norm_bp == 'None':
norm_bp = 'johnson,i'
self._log("info", "Normalization Bandpass is {}".format(norm_bp))
rates = np.zeros_like(ras)
for index in range(len(ids)):
# self._log("info", "Converting index={} of {}".format(index, len(ids)))
t, g, Z, a = temps[index], gravs[index], metallicities[index], apparents[index]
sp = stsyn.grid_to_spec('phoenix', t, Z, g)
sp = self.normalize(sp, a, norm_bp)
obs = syn.Observation(sp, bp, binset=sp.waveset)
rates[index] = obs.countrate(area=self.AREA).value
t = Table()
t['ra'] = Column(data=ras)
t['dec'] = Column(data=decs)
t['flux'] = Column(data=rates)
t['type'] = Column(data=np.full_like(ras,"point",dtype="S6"))
t['n'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['re'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['phi'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['ratio'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['id'] = Column(data=ids)
t['notes'] = Column(data=np.full_like(ras,"None",dtype="S6"))
return t, cached
def readPandeiaTable(self, table, bp, cached=-1):
"""
Converts a set of Pandeia phoenix sources specified as (id, ra, dec, key, apparent) into individual stars, observes them,
and then produces an output catalogue
"""
if table is None:
return None
from pandeia.engine.sed import SEDFactory
if isinstance(table['id'], MaskedColumn):
ids = table['id'].filled()
else:
ids = table['id']
if isinstance(table['ra'], MaskedColumn):
ras = table['ra'].filled()
else:
ras = table['ra']
if isinstance(table['dec'], MaskedColumn):
decs = table['dec'].filled()
else:
decs = table['dec']
if isinstance(table['key'], MaskedColumn):
keys = table['key'].filled()
else:
keys = table['key']
if isinstance(table['apparent'], MaskedColumn):
apparents = table['apparent'].filled()
else:
apparents = table['apparent']
norm_bp = table.meta['BANDPASS']
self._log("info", "Normalization Bandpass is {} ({})".format(norm_bp, type(norm_bp)))
if norm_bp == '' or norm_bp is None or norm_bp == 'None':
norm_bp = 'johnson,i'
self._log("info", "Normalization Bandpass is {}".format(norm_bp))
rates = np.array((), dtype='float32')
for a, key in zip(apparents, keys):
config = {'sed_type': 'phoenix', 'key': key}
spectrum = SEDFactory(config=config)
wave, flux = spectrum.get_spectrum()
sp = self.normalize((wave, flux), a, norm_bp)
obs = syn.Observation(sp, bp, binset=sp.wave)
rates = np.append(rates, obs.countrate(area=self.AREA).value)
t = Table()
t['ra'] = Column(data=ras)
t['dec'] = Column(data=decs)
t['flux'] = Column(data=rates)
t['type'] = Column(data=np.full_like(ras,"point",dtype="S6"))
t['n'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['re'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['phi'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['ratio'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['id'] = Column(data=ids)
t['notes'] = Column(data=np.full_like(ras,"None",dtype="S6"))
return t, cached
def readBC95Table(self, table, bp, cached=-1):
"""
Converts a BC95 galaxy grid of sources into the internal source table standard
"""
from pandeia.engine.custom_exceptions import SynphotError as pSynError
if table is None:
return None
# This function is needed because I can't get python not to read '50E8' as a number, or to output it as a correctly formatted string
def stringify(num):
num = float(num)
exponent = int(np.floor(np.log10(num)))
value = int(10*(num/(10**np.floor(np.log10(num)))))
return "{}E{}".format(value, exponent-1)
self._log("info", "Converting BC95 Catalogue")
proflist = {"expdisk":1,"devauc":4}
distance_type = "redshift"
if isinstance(table['id'], MaskedColumn):
ids = table['id'].filled()
else:
ids = table['id']
if isinstance(table['ra'], MaskedColumn):
ras = table['ra'].filled()
else:
ras = table['ra']
if isinstance(table['dec'], MaskedColumn):
decs = table['dec'].filled()
else:
decs = table['dec']
try:
if isinstance(table['redshift'], MaskedColumn):
zs = table['redshift'].filled()
else:
zs = table['redshift']
except KeyError: #distances instead
if isinstance(table['distance'], MaskedColumn):
zs = table['distance'].filled()
else:
zs = table['distance']
distance_type = "pc"
if isinstance(table['model'], MaskedColumn):
models = table['model'].filled()
else:
models = table['model']
if isinstance(table['age'], MaskedColumn):
ages = table['age'].filled()
else:
ages = table['age']
if isinstance(table['profile'], MaskedColumn):
profiles = table['profile'].filled()
else:
profiles = table['profile']
radii = table['radius']/self.SCALE[0] #at some point, may want to figure out multiple scales.
if isinstance(table['axial_ratio'], MaskedColumn):
ratios = table['axial_ratio'].filled()
else:
ratios = table['axial_ratio']
pas = (table['pa'] + (self.pa*180./np.pi) )%360.
if isinstance(table['apparent_surface_brightness'], MaskedColumn):
vmags = table['apparent_surface_brightness'].filled()
else:
vmags = table['apparent_surface_brightness']
norm_bp = table.meta['BANDPASS']
self._log("info", "Normalization Bandpass is {} ({})".format(norm_bp, type(norm_bp)))
if norm_bp == '' or norm_bp is None or norm_bp == 'None':
norm_bp = 'johnson,v'
self._log("info", "Normalization Bandpass is {}".format(norm_bp))
rates = np.array(())
indices = np.array(())
notes = np.array((),dtype='object')
total = len(models)
for i, (z,model,age,profile,radius,ratio,pa,mag) in enumerate(zip(zs,models,ages,profiles,radii,ratios,pas,vmags)):
# self._log("info", "{} of {}: {} {} {} {} {} {} {} {}".format(i, total, z, model, age, profile, radius, ratio, pa, mag))
fname = "bc95_{}_{}.fits".format(model, stringify(age))
try:
sp = syn.SourceSpectrum.from_file(os.path.join(os.environ['PYSYN_CDBS'],"grid","bc95","templates",fname))
if distance_type == "redshift":
sp = syn.SourceSpectrum(sp.model, z=z)
sp = self.normalize(sp, mag, norm_bp)
obs = syn.Observation(sp, self.bandpass, force='taper',
binset=sp.waveset)
rate = obs.countrate(area=self.AREA).value
except pSynError as e:
msg = 'Source {} of {}: Pysynphot Error {} encountered'
self._log('warning', msg.format(i, total, e))
rate = 0.
except syn.exceptions.SynphotError as e:
msg = 'Source {} of {}: Synphot Error {} encountered'
self._log('warning', msg.format(i, total, e))
rate = 0.
rates = np.append(rates, rate)
indices = np.append(indices, proflist[profile])
note = "BC95_{}_{}_{}".format(model, stringify(age), mag)
notes = np.append(notes, note)
t = Table()
t['ra'] = Column(data=ras, dtype=np.float)
t['dec'] = Column(data=decs)
t['flux'] = Column(data=rates)
t['type'] = Column(data=np.full_like(ras, 'sersic', dtype='S7'))
t['n'] = Column(data=indices)
t['re'] = Column(data=radii)
t['phi'] = Column(data=pas)
t['ratio'] = Column(data=ratios)
t['id'] = Column(data=ids)
t['notes'] = Column(data=notes, dtype='S25')
return t, cached
def readMixedTable(self, table, bp, cached=-1):
"""
Converts a mixed internal list of sources into the internal source table standard
"""
if table is None:
return None
if isinstance(table['ra'], MaskedColumn):
ras = table['ra'].filled()
else:
ras = table['ra']
if isinstance(table['dec'], MaskedColumn):
decs = table['dec'].filled()
else:
decs = table['dec']
if isinstance(table['type'], MaskedColumn):
types = table['type'].filled()
else:
types = table['type']
if isinstance(table['n'], MaskedColumn):
indices = table['n'].filled()
else:
indices = table['n']
if isinstance(table['re'], MaskedColumn):
radii = table['re'].filled()
else:
radii = table['re']
if isinstance(table['phi'], MaskedColumn):
pas = table['phi'].filled()
else:
pas = table['phi']
if isinstance(table['ratio'], MaskedColumn):
ratios = table['ratio'].filled()
else:
ratios = table['ratio']
if isinstance(table['id'], MaskedColumn):
ids = table['id'].filled()
else:
ids = table['id']
if isinstance(table['notes'], MaskedColumn):
notes = table['notes'].filled()
else:
notes = table['notes']
if isinstance(table['flux'], MaskedColumn):
rates = table['flux'].filled()
else:
rates = table['flux']
if isinstance(table['units'], MaskedColumn):
units = table['units'].filled()
else:
units = table['units']
idxp = np.where(units == 'p')
rates[idxp] *= self.convertToCounts('p')
idxe = np.where(units == 'e')
rates[idxe] *= self.convertToCounts('e')
idxj = np.where(units == 'j')
rates[idxj] *= self.convertToCounts('j')
t = Table()
t['ra'] = Column(data=ras)
t['dec'] = Column(data=decs)
t['flux'] = Column(data=rates)
t['type'] = Column(data=types)
t['n'] = Column(data=indices)
t['re'] = Column(data=radii)
t['phi'] = Column(data=pas)
t['ratio'] = Column(data=ratios)
t['id'] = Column(data=ids)
t['notes'] = Column(data=notes)
return t, cached
def readMultiTable(self, table, bp, cached=-1):
"""
Converts an internal multifilter list of sources into the internal source table standard
"""
if table is None:
return None
if isinstance(table['ra'], MaskedColumn):
ras = table['ra'].filled()
else:
ras = table['ra']
if isinstance(table['dec'], MaskedColumn):
decs = table['dec'].filled()
else:
decs = table['dec']
if isinstance(table['type'], MaskedColumn):
types = table['type'].filled()
else:
types = table['type']
if isinstance(table['n'], MaskedColumn):
indices = table['n'].filled()
else:
indices = table['n']
radii = table['re']/self.SCALE[0] #at some point, may want to figure out multiple scales.
if isinstance(table['phi'], MaskedColumn):
pas = table['phi'].filled()
else:
pas = table['phi']
if isinstance(table['ratio'], MaskedColumn):
ratios = table['ratio'].filled()
else:
ratios = table['ratio']
if isinstance(table['id'], MaskedColumn):
ids = table['id'].filled()
else:
ids = table['id']
if isinstance(table['notes'], MaskedColumn):
notes = table['notes'].filled()
else:
notes = table['notes']
rates = table[self.filter]
t = Table()
t['ra'] = Column(data=ras)
t['dec'] = Column(data=decs)
t['flux'] = Column(data=rates)
t['type'] = Column(data=types)
t['n'] = Column(data=indices)
t['re'] = Column(data=radii)
t['phi'] = Column(data=pas)
t['ratio'] = Column(data=ratios)
t['id'] = Column(data=ids)
t['notes'] = Column(data=notes)
return t, cached
def readGenericTable(self, table, bp, cached=-1):
"""
Converts a generic list of point sources into the internal source table standard
"""
if table is None:
return None
if isinstance(table['ra'], MaskedColumn):
ras = table['ra'].filled()
else:
ras = table['ra']
if isinstance(table['dec'], MaskedColumn):
decs = table['dec'].filled()
else:
decs = table['dec']
rates = table[self.filter.lower()]
if 'id' in table:
if isinstance(table['id'], MaskedColumn):
ids = table['id'].filled()
else:
ids = table['id']
else:
ids = np.arange(len(ras),dtype=int)
t = Table()
t['ra'] = Column(data=ras)
t['dec'] = Column(data=decs)
t['flux'] = Column(data=rates)
t['type'] = Column(data=np.full_like(ras,"point",dtype="S6"))
t['n'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['re'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['phi'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['ratio'] = Column(data=np.full_like(ras,"N/A",dtype="S3"))
t['id'] = Column(data=ids)
t['notes'] = Column(data=np.full_like(ras, "N/A", dtype="S3"))
return t, cached
def generateReadnoise(self):
"""Base function for adding read noise"""
pass
@classmethod
def handleDithers(cls,form):
"""Base function for handling dither patterns"""
pass
@classmethod
def doSubpixel(cls,dithers,subpixels):
"""For each (x,y) in dithers, dither around that point for each point in subpixels. Return the full set"""
my_dithers = []
for (x,y) in dithers:
for (i,j) in subpixels:
my_dithers.append((x+i,y+j))
return my_dithers
def addError(self, *args, **kwargs):
"""Base function for adding in residual error"""
self._log("info","Adding residual error")
cores = SelectParameter('cores', kwargs)
convolve = SelectParameter('convolve', kwargs)
poisson = SelectParameter('residual_poisson', kwargs)
readnoise = SelectParameter('residual_readnoise', kwargs)
flat = SelectParameter('residual_flat', kwargs)
dark = SelectParameter('residual_dark', kwargs)
cosmic = SelectParameter('residual_cosmic', kwargs)
parallel = SelectParameter('parallel_enable', kwargs)
snapshots = kwargs.get("snapshots", {})
base_state = self.getState()
if flat:
flat = AstroImage.initDataFromFits(self.flatfile,ext='COMPRESSED_IMAGE', psf=False, logger=self.logger)
if dark:
dark = AstroImage.initDataFromFits(self.darkfile,ext='COMPRESSED_IMAGE', psf=False, logger=self.logger)
dark *= self.exptime
if readnoise:
rn = self.generateReadnoise()
for detector in self.detectors:
if 'initial' in snapshots:
detector.toFits(self.imgbase+"_{}_{}_snapshot_initial.fits".format(self.obs_count, detector.name))
self.updateState(base_state + "<br /><span class='indented'>Detector {}: Adding Background</span>".format(detector.name))
self._log("info","Adding error to detector {}".format(detector.name))
self._log("info","Adding background")
self._log("info","Background is {} counts/s/pixel".format(self.pixel_background))
detector.addBackground(self.pixel_background)
if 'background' in snapshots or 'all' in snapshots:
detector.toFits(self.imgbase+"_{}_{}_snapshot_background.fits".format(self.obs_count, detector.name))
self._log("info","Inserting correct exposure time")
self.updateState(base_state + "<br /><span class='indented'>Detector {}: Applying Exposure Time</span>".format(detector.name))
detector.setExptime(self.exptime)
if 'exptime' in snapshots or 'all' in snapshots:
detector.toFits(self.imgbase+"_{}_{}_snapshot_exptime.fits".format(self.obs_count, detector.name))
self._log("info","Convolving with PSF")
convolve_state = base_state + "<br /><span class='indented'>Detector {}: Convolving PSF</span>".format(detector.name)
self.updateState(convolve_state)
detector.convolve_psf(max_size=self.convolve_size-1, parallel=parallel, cores=cores)
if 'convolve' in snapshots or 'all' in snapshots:
detector.toFits(self.imgbase+"_{}_{}_snapshot_convolve.fits".format(self.obs_count, detector.name))
if self.oversample != 1:
self._log("info","Binning oversampled image")
self.updateState(base_state + "<br /><span class='indented'>Detector {}: Binning oversampled image</span>".format(detector.name))
detector.bin(self.oversample)
if 'bin' in snapshots or 'all' in snapshots:
detector.toFits(self.imgbase+"_{}_{}_snapshot_bin.fits".format(self.obs_count, detector.name))
if poisson:
self._log("info","Adding poisson noise")
self.updateState(base_state + "<br /><span class='indented'>Detector {}: Adding Poisson Noise</span>".format(detector.name))
detector.introducePoissonNoise()
if 'poisson' in snapshots or 'all' in snapshots:
detector.toFits(self.imgbase+"_{}_{}_snapshot_poisson.fits".format(self.obs_count, detector.name))
if readnoise:
self._log("info","Adding readnoise")
self.updateState(base_state + "<br /><span class='indented'>Detector {}: Adding Readnoise</span>".format(detector.name))
detector.introduceReadnoise(rn)
if 'readnoise' in snapshots or 'all' in snapshots:
detector.toFits(self.imgbase+"_{}_{}_snapshot_readnoise.fits".format(self.obs_count, detector.name))
if flat:
self._log("info","Adding flatfield residual")
self.updateState(base_state + "<br /><span class='indented'>Detector {}: Adding Flatfield Residual</span>".format(detector.name))
detector.introduceFlatfieldResidual(flat)
if 'flat' in snapshots or 'all' in snapshots:
detector.toFits(self.imgbase+"_{}_{}_snapshot_flat.fits".format(self.obs_count, detector.name))
if dark:
self._log("info","Adding dark residual")
self.updateState(base_state + "<br /><span class='indented'>Detector {}: Adding Dark Residual</span>".format(detector.name))
detector.introduceDarkResidual(dark)
if 'dark' in snapshots or 'all' in snapshots:
detector.toFits(self.imgbase+"_{}_{}_snapshot_dark.fits".format(self.obs_count, detector.name))
if cosmic:
self._log("info","Adding cosmic ray residual")
self.updateState(base_state + "<br /><span class='indented'>Detector {}: Adding Cosmic Ray Residual</span>".format(detector.name))
detector.introduceCosmicRayResidual(self.PIXEL_SIZE)
if 'cr' in snapshots or 'all' in snapshots:
detector.toFits(self.imgbase+"_{}_{}_snapshot_cr.fits".format(self.obs_count, detector.name))
self.updateState(base_state)
self._log("info","Finished adding error")
def normalize(self, source_spectrum_or_wave_flux, norm_flux, bandpass):
if "," in bandpass:
bandpass = bandpass.replace(",", "_")
norm_type = self.get_type(bandpass)
from pandeia.engine.normalization import NormalizationFactory
norm = NormalizationFactory(type=norm_type, bandpass=bandpass,
norm_fluxunit='abmag', norm_flux=norm_flux)
if isinstance(source_spectrum_or_wave_flux, tuple):
wave, flux = source_spectrum_or_wave_flux
else:
wave = source_spectrum_or_wave_flux.waveset
flux = source_spectrum_or_wave_flux(wave)
try:
norm_wave, norm_flux = norm.normalize(wave, flux)
except DataConfigurationError as e:
try:
norm = syn.SpectralElement.from_filter(bandpass)
except FileNotFoundError as e:
band_path = os.path.join(os.environ["PYSYN_CDBS"], "comp", "nonhst")
band_name = "{}*syn.fits".format(bandpass.replace(",", "_"))
band_files = glob.glob(os.path.join(band_path, band_name))
if len(band_files) > 0:
band_file = sorted(band_files)[-1]
sp = syn.SpectralElement.from_file(band_file)
else:
msg = "Unable to find local {} spectrum at {}\n"
msg = msg.format(bandpass, os.environ["PYSYN_CDBS"])
msg += "Original exception was {}".format(e)
raise FileNotFoundError(msg)
sp = syn.SourceSpectrum(syn.Empirical1D, points=wave, lookup_table=flux)
norm_sp = sp.normalize(norm_flux*u.ABmag, band=norm)
return norm_sp
sp = syn.SourceSpectrum(syn.Empirical1D, points=norm_wave, lookup_table=norm_flux)
return sp
def get_type(self, bandpass_str):
if 'miri' in bandpass_str or 'nircam' in bandpass_str:
return 'jwst'
#**WFIRST_REMNANT**
elif 'wfi' in bandpass_str or 'wfirst' in bandpass_str or 'roman' in bandpass_str:
return 'roman'
elif 'wfc3' in bandpass_str:
return 'hst'
return 'photsys'
@property
def bandpass(self):
if hasattr(self, "_bp"):
return self._bp
i = self.pandeia_instrument
det_params = i.get_detector_pars()
# 'rn_fudge': multiplied in to match IDT results.
# 'var_fudge': chromatic fudge factor. quantum yield squared.
# 'fullwell':
# 'ff_electrons':
# 'pix_size':
#
wr = i.get_wave_range()
wave = np.linspace(wr['wmin'], wr['wmax'], num=500)
pce = i.get_total_eff(wave)
if pce[0] != 0.:
wave = np.insert(wave, 0, wave[0]-(wave[1]-wave[0]))
pce = np.insert(pce, 0, 0.)
if pce[-1] != 0.:
wave = np.append(wave, wave[-1]+(wave[-1]-wave[-2]))
pce = np.append(pce, 0.)
self._bp = syn.SpectralElement(syn.Empirical1D, points=wave*u.micron,
lookup_table=pce)
return self._bp
@property
def pandeia_instrument(self):
if hasattr(self, "_instrument"):
return self._instrument
from pandeia.engine.calc_utils import build_default_calc
from pandeia.engine.instrument_factory import InstrumentFactory
translate_instrument = {
#**WFIRST_REMNANT**
# 'wfi': 'wfirstimager',
'nircamlong': 'nircam',
'nircamshort': 'nircam',
'miri': 'miri'
}
instrument = self.INSTRUMENT.lower()
if instrument in translate_instrument:
instrument = translate_instrument[instrument]
translate_telescope = {
# 'roman': 'wfirst'
}
telescope = self.TELESCOPE.lower()
if telescope in translate_telescope:
telescope = translate_telescope[telescope]
calc = build_default_calc(telescope, instrument, self.MODE)
conf = calc['configuration']
conf['instrument']['filter'] = self.filter.lower()
msg = "Creating Instrument with Configuration {}"
self.logger.info(msg.format(conf['instrument']))
self._instrument = InstrumentFactory(config=conf)
return self._instrument
@property
def zeropoint(self):
return self.zeropoint_unit.value
@property
def zeropoint_unit(self):
try:
sp = syn.SourceSpectrum.from_vega()
except FileNotFoundError as e:
vega_path = os.path.join(os.environ["PYSYN_CDBS"], "calspec")
vega_files = glob.glob(os.path.join(vega_path, "alpha_lyr*.fits"))
if len(vega_files) > 0:
vega_file = sorted(vega_files)[-1]
sp = syn.SourceSpectrum.from_file(vega_file)
else:
msg = "Unable to find local Vega spectrum at {}\n"
msg = msg.format(os.environ["PYSYN_CDBS"])
msg += "Original exception was {}".format(e)
raise FileNotFoundError(msg)
bp = self.bandpass
sp = sp.normalize(0.0*syn.units.VEGAMAG, band=bp, vegaspec=sp)
obs = syn.Observation(sp, bp, binset=sp.waveset)
zeropoint = obs.effstim(flux_unit=syn.units.OBMAG, area=self.AREA)
return zeropoint
@property
def photflam(self):
return self.photflam_unit.value
@property
def photflam_unit(self):
sp = syn.SourceSpectrum(syn.ConstFlux1D, amplitude=(0.*u.STmag))
bp = self.bandpass
obs = syn.Observation(sp, bp, binset=sp.waveset)
pf = (obs.effstim(flux_unit='flam') / obs.countrate(area=self.AREA))
return pf
@property
def pixel_background(self):
return self.pixel_background_unit.value
@property
def pixel_background_unit(self):
if isinstance(self.background_value, (int, float)):
msg = "Returning background {}."
self._log("info", msg.format(self.background_value))
return self.background_value*u.ct/u.s
elif self.background_value in ['none', 'low', 'avg', 'high']:
if self.background_value in self.BACKGROUND:
bkg = self.BACKGROUND[self.background_value][self.filter]*u.ct/u.s
else:
msg = "Background {} not found for {}. Using 0.0 for None"
self._log("warning", msg.format(self.background_value,
self.DETECTOR))
bkg = 0.*u.ct/u.s
msg = "Returning background {} for '{}'"
self._log("info", msg.format(bkg, self.background_value))
return bkg*u.ct/u.s
elif self.background_value == 'custom':
msg = "Returning background {} for 'custom'"
self._log("info", msg.format(self.custom_background))
return self.custom_background*u.ct/u.s
elif "jbt" in self.background_value:
if ":" in self.background_value:
bg_type = self.background_value.split(":")[-1]
else:
bg_type = "mean"
bg = None
if internet() and self.background_location == '$WEB':
from jwst_backgrounds import jbt
try:
bg = jbt.background(self.ra, self.dec,
self.PHOTPLAM[self.filter])
except Exception as e:
msg = "Accessing JBT background produced error {}"
self._log("error", msg.format(e))
self._log("warning", "Unable to connect to the JBT server")
if os.path.exists(self.background_location):
self._log("info", "Using local JBT background cache.")
from ..utilities import CachedJbtBackground
try:
bg = CachedJbtBackground(self.ra, self.dec,
self.PHOTPLAM[self.filter])
except Exception as e:
msg = "Retrieving local cache produced error {}"
self._log("error", msg.format(e))
self._log("info", "More complete error: {}".format(repr(e)))
msg = "Unable to retrieve local cache. Returning "
msg += "background 0.0 for '{}'"
self._log("warning", msg.format(self.background_value))
return 0.*u.ct/u.s
if bg is None:
msg = "Unable to retrieve JBT background data."
self._log("error", msg)
msg = "Falling back to zero background."
self._log("warning", msg)
return 0.*u.photon/u.second
wave_array = bg.bkg_data['wave_array']
combined_bg_array = bg.bkg_data['total_bg']
if bg_type in ['avg', 'mean']:
flux_array = np.mean(combined_bg_array, axis=0)
elif bg_type in ['med', 'median']:
flux_array = np.median(combined_bg_array, axis=0)
elif bg_type == 'max':
flux_array = np.max(combined_bg_array, axis=0)
elif bg_type == 'min':
flux_array = np.min(combined_bg_array, axis=0)
else:
flux_array = combined_bg_array[0]
# Background Flux is MJy/sr^2
flux_data_array = 1e6 * flux_array * u.Jy / (u.sr * u.sr)
flux_data_array = flux_data_array.to(u.Jy/(1.e6*u.arcsec*u.arcsec))
# convert from arcsec^-2 to pixel^-2
flux_data_array = flux_data_array * self.SCALE[0] * self.SCALE[1]
sp = syn.SourceSpectrum(syn.Empirical1D, points=wave_array*u.micron,
lookup_table=flux_array_pixels)
obs = syn.Observation(sp, self.bandpass, binset=sp.waveset,
force='taper')
bg = obs.countrate(area=self.AREA)
msg = "Returning background {} for '{}'"
self._log("info", msg.format(bg, self.background_value))
return bg
msg = "Unknown Background {}. Returning 0."
self._log("warning", msg.format(self.background_value))
return 0.*u.photon/u.second
def _log(self,mtype,message):
"""
Checks if a logger exists. Else prints.
"""
if hasattr(self,'logger'):
getattr(self.logger,mtype)(message)
else:
sys.stderr.write("{}: {}\n".format(mtype,message))
def updateState(self, state):
if self.set_celery is not None:
self.set_celery(state)
def getState(self):
if self.get_celery is not None:
return self.get_celery()
return ""
| 45.566096
| 174
| 0.568927
|
5dedc7dc5b87a794d992bee82fc8a0a55fd7b457
| 7,336
|
py
|
Python
|
utils/s3dis_utils/dataset_s3dis.py
|
Chenfeng1271/JSPNet-Learning-Joint-Semantic-Instance-Segmentation-of-Point-Clouds-via-Similarity-and-Probabili
|
1d50417431b5af1ad76b96749e841969d4dc1359
|
[
"MIT"
] | 78
|
2019-11-19T00:49:37.000Z
|
2022-02-15T04:21:49.000Z
|
utils/s3dis_utils/dataset_s3dis.py
|
Chenfeng1271/JSPNet-Learning-Joint-Semantic-Instance-Segmentation-of-Point-Clouds-via-Similarity-and-Probabili
|
1d50417431b5af1ad76b96749e841969d4dc1359
|
[
"MIT"
] | 15
|
2019-12-15T10:07:57.000Z
|
2022-02-18T09:21:28.000Z
|
utils/s3dis_utils/dataset_s3dis.py
|
Chenfeng1271/JSPNet
|
1d50417431b5af1ad76b96749e841969d4dc1359
|
[
"MIT"
] | 16
|
2019-12-03T15:50:03.000Z
|
2021-11-08T14:12:40.000Z
|
import os
import sys
import time
import gc
import numpy as np
import multiprocessing
from concurrent import futures
from functools import partial as functools_partial
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))
sys.path.append(BASE_DIR)
sys.path.append(ROOT_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import provider
import indoor3d_util
def data_sample(data_sample_queue, input_list, split, epoch, num_works, block_points=4096,
block_size=1.0, stride=0.5, random_sample=False, sample_num=None, sample_aug=1):
assert (input_list[0].endswith('npy') or input_list[0].endswith('h5')), "data format must be .npy or .h5"
input_list_length = len(input_list)
num_work = min(min(num_works, multiprocessing.cpu_count()), input_list_length // 4)
if input_list_length > 4:
num_work = max(num_work, 4)
chunksize = input_list_length // num_work
print("num input_list: {}, num works: {}, chunksize: {}".format(input_list_length, num_work, chunksize))
if input_list[0].endswith('npy'):
data_sample_func = functools_partial(
indoor3d_util.room2blocks_wrapper_normalized, num_point=block_points, block_size=block_size,
stride=stride, random_sample=random_sample, sample_num=sample_num, sample_aug=sample_aug)
elif input_list[0].endswith('h5'):
def load_data_file(input_file):
cur_data, cur_group, _, cur_sem = provider.loadDataFile_with_groupseglabel_stanfordindoor(input_file)
return cur_data, cur_sem, cur_group
data_sample_func = load_data_file
def data_sample_single(input_file):
datalabel = data_sample_func(input_file)
if split == 'train':
datalabel = provider.shuffle_data(*datalabel)
return datalabel
for _ in range(epoch):
np.random.shuffle(input_list)
for idx in range(chunksize + 1):
start_idx = min(idx * num_work, input_list_length)
end_idx = min((idx + 1) * num_work, input_list_length)
if start_idx >= input_list_length or end_idx > input_list_length:
continue
with futures.ThreadPoolExecutor(num_work) as pool:
data_sem_ins = list(pool.map(data_sample_single, input_list[start_idx:end_idx], chunksize=1))
for dsi in data_sem_ins:
shuffle_dsi = provider.shuffle_data(*dsi)
data_sample_queue.put(shuffle_dsi)
del dsi
gc.collect()
pool.shutdown()
gc.collect()
def data_prepare(data_sample_queue, data_queue, blocks, epoch, batch_size):
data_list = list()
sem_label_list = list()
ins_label_list = list()
total_batch = (blocks // batch_size) * epoch
while total_batch > 0:
data, sem_label, ins_label = data_sample_queue.get()
data_list.append(data)
sem_label_list.append(sem_label)
ins_label_list.append(ins_label)
del data
del sem_label
del ins_label
batch_data = np.concatenate(data_list, axis=0)
batch_sem_label = np.concatenate(sem_label_list, axis=0)
batch_ins_label = np.concatenate(ins_label_list, axis=0)
batch_data_length = batch_data.shape[0]
num_batch_size = batch_data_length // batch_size
for idx in range(num_batch_size):
total_batch -= 1
start_idx = idx * batch_size
end_idx = (idx + 1) * batch_size
data_queue.put((batch_data[start_idx: end_idx, ...],
batch_sem_label[start_idx: end_idx],
batch_ins_label[start_idx: end_idx]))
remainder = batch_data_length % batch_size
if remainder:
data_list = [batch_data[-remainder:]]
sem_label_list = [batch_sem_label[-remainder:]]
ins_label_list = [batch_ins_label[-remainder:]]
else:
data_list = list()
sem_label_list = list()
ins_label_list = list()
del batch_data
del batch_sem_label
del batch_ins_label
gc.collect()
class S3DISDataset(object):
def __init__(self, data_root, input_list_txt, split='train', epoch=1, batch_size=24, num_works=8,
data_type='numpy', block_points=4096, block_size=1.0, stride=0.5, random_sample=False,
sample_num=None, sample_aug=1, with_rgb=True):
self.input_list_txt = input_list_txt
self.split = split
self.data_root = data_root
self.data_type = data_type
self.capacity = 30
self.length = 0
assert (data_type == 'numpy' or data_type == 'hdf5'), 'data_type must be "numpy" or "hdf5"'
self.input_list = self.get_input_list()
self.manager = multiprocessing.Manager()
self.data_sample_queue = self.manager.Queue(3)
self.data_queue = multiprocessing.Manager().Queue(self.capacity)
self.producer_process = multiprocessing.Process(target=data_sample, args=(
self.data_sample_queue, self.input_list, split, epoch, num_works,
block_points, block_size, stride, random_sample, sample_num, sample_aug))
self.consumer_process = multiprocessing.Process(target=data_prepare, args=(
self.data_sample_queue, self.data_queue, self.length, epoch, batch_size))
self.producer_process.start()
self.consumer_process.start()
def __del__(self):
while not self.data_sample_queue.empty() and not self.data_queue.empty():
self.data_queue.get_nowait()
if self.producer_process.is_alive():
self.producer_process.join()
if self.consumer_process.is_alive():
self.consumer_process.join()
def __len__(self):
return self.length
def get_input_list(self):
input_list = [line.strip() for line in open(self.input_list_txt, 'r')]
temp_list = [item.split('/')[-1].strip('.h5').strip('.npy') for item in input_list]
temp_input_list = [line.strip() for line in
open(os.path.join(self.data_root, 'data/indoor3d_ins_seg_hdf5/room_filelist.txt'), 'r')]
cnt_length = 0
for item in temp_input_list:
if item in temp_list:
cnt_length += 1
del temp_input_list
self.length = cnt_length
input_list = [os.path.join(self.data_root, item) for item in input_list]
return input_list
def get_batch(self, data_aug=False):
data, sem_label, ins_label = self.data_queue.get()
if data_aug and self.split == 'train':
data[:, :, 0:3] = provider.jitter_point_cloud(data[:, :, 0:3])
return data, sem_label, ins_label
def get_length(self):
return self.__len__()
if __name__ == '__main__':
batch_size = 24
data_set = S3DISDataset(ROOT_DIR, 'data/test_file_list_Area1.txt', epoch=2)
num_batch = data_set.get_length() // batch_size
for epoch in range(2):
for idx in range(num_batch):
_, _, _ = data_set.get_batch()
print('epoch/num_epoch: {}/{}; batch/num_batch: {}/{};'.format(epoch, 2, idx, num_batch))
time.sleep(1)
print('finish')
| 36.68
| 115
| 0.645447
|
8f55885df8274328198901e751ce368d67567548
| 11,817
|
py
|
Python
|
python/ccxt/async/btcchina.py
|
born2net/ccxt
|
9995e50ca28513b9a68f774a3517f2c396cc0001
|
[
"MIT"
] | null | null | null |
python/ccxt/async/btcchina.py
|
born2net/ccxt
|
9995e50ca28513b9a68f774a3517f2c396cc0001
|
[
"MIT"
] | null | null | null |
python/ccxt/async/btcchina.py
|
born2net/ccxt
|
9995e50ca28513b9a68f774a3517f2c396cc0001
|
[
"MIT"
] | 1
|
2018-08-09T18:11:13.000Z
|
2018-08-09T18:11:13.000Z
|
# -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import base64
import hashlib
from ccxt.base.errors import AuthenticationError
class btcchina (Exchange):
def describe(self):
return self.deep_extend(super(btcchina, self).describe(), {
'id': 'btcchina',
'name': 'BTCChina',
'countries': 'CN',
'rateLimit': 1500,
'version': 'v1',
'hasCORS': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766368-465b3286-5ed6-11e7-9a11-0f6467e1d82b.jpg',
'api': {
'plus': 'https://plus-api.btcchina.com/market',
'public': 'https://data.btcchina.com/data',
'private': 'https://api.btcchina.com/api_trade_v1.php',
},
'www': 'https://www.btcchina.com',
'doc': 'https://www.btcchina.com/apidocs'
},
'api': {
'plus': {
'get': [
'orderbook',
'ticker',
'trade',
],
},
'public': {
'get': [
'historydata',
'orderbook',
'ticker',
'trades',
],
},
'private': {
'post': [
'BuyIcebergOrder',
'BuyOrder',
'BuyOrder2',
'BuyStopOrder',
'CancelIcebergOrder',
'CancelOrder',
'CancelStopOrder',
'GetAccountInfo',
'getArchivedOrder',
'getArchivedOrders',
'GetDeposits',
'GetIcebergOrder',
'GetIcebergOrders',
'GetMarketDepth',
'GetMarketDepth2',
'GetOrder',
'GetOrders',
'GetStopOrder',
'GetStopOrders',
'GetTransactions',
'GetWithdrawal',
'GetWithdrawals',
'RequestWithdrawal',
'SellIcebergOrder',
'SellOrder',
'SellOrder2',
'SellStopOrder',
],
},
},
'markets': {
'BTC/CNY': {'id': 'btccny', 'symbol': 'BTC/CNY', 'base': 'BTC', 'quote': 'CNY', 'api': 'public', 'plus': False},
'LTC/CNY': {'id': 'ltccny', 'symbol': 'LTC/CNY', 'base': 'LTC', 'quote': 'CNY', 'api': 'public', 'plus': False},
'LTC/BTC': {'id': 'ltcbtc', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'api': 'public', 'plus': False},
'BCH/CNY': {'id': 'bcccny', 'symbol': 'BCH/CNY', 'base': 'BCH', 'quote': 'CNY', 'api': 'plus', 'plus': True},
'ETH/CNY': {'id': 'ethcny', 'symbol': 'ETH/CNY', 'base': 'ETH', 'quote': 'CNY', 'api': 'plus', 'plus': True},
},
})
async def fetch_markets(self):
markets = await self.publicGetTicker({
'market': 'all',
})
result = []
keys = list(markets.keys())
for p in range(0, len(keys)):
key = keys[p]
market = markets[key]
parts = key.split('_')
id = parts[1]
base = id[0:3]
quote = id[3:6]
base = base.upper()
quote = quote.upper()
symbol = base + '/' + quote
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostGetAccountInfo()
balances = response['result']
result = {'info': balances}
for c in range(0, len(self.currencies)):
currency = self.currencies[c]
lowercase = currency.lower()
account = self.account()
if lowercase in balances['balance']:
account['total'] = float(balances['balance'][lowercase]['amount'])
if lowercase in balances['frozen']:
account['used'] = float(balances['frozen'][lowercase]['amount'])
account['free'] = account['total'] - account['used']
result[currency] = account
return self.parse_balance(result)
def create_market_request(self, market):
request = {}
field = 'symbol' if (market['plus']) else 'market'
request[field] = market['id']
return request
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['api'] + 'GetOrderbook'
request = self.createMarketRequest(market)
orderbook = await getattr(self, method)(self.extend(request, params))
timestamp = orderbook['date'] * 1000
result = self.parse_order_book(orderbook, timestamp)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def parse_ticker(self, ticker, market):
timestamp = ticker['date'] * 1000
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['high']),
'low': float(ticker['low']),
'bid': float(ticker['buy']),
'ask': float(ticker['sell']),
'vwap': float(ticker['vwap']),
'open': float(ticker['open']),
'close': float(ticker['prev_close']),
'first': None,
'last': float(ticker['last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['vol']),
'quoteVolume': None,
'info': ticker,
}
def parse_ticker_plus(self, ticker, market):
timestamp = ticker['Timestamp']
symbol = None
if market:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['High']),
'low': float(ticker['Low']),
'bid': float(ticker['BidPrice']),
'ask': float(ticker['AskPrice']),
'vwap': None,
'open': float(ticker['Open']),
'close': float(ticker['PrevCls']),
'first': None,
'last': float(ticker['Last']),
'change': None,
'percentage': None,
'average': None,
'baseVolume': float(ticker['Volume24H']),
'quoteVolume': None,
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['api'] + 'GetTicker'
request = self.createMarketRequest(market)
tickers = await getattr(self, method)(self.extend(request, params))
ticker = tickers['ticker']
if market['plus']:
return self.parseTickerPlus(ticker, market)
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
timestamp = int(trade['date']) * 1000
return {
'id': trade['tid'],
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': None,
'price': trade['price'],
'amount': trade['amount'],
}
def parse_trade_plus(self, trade, market):
timestamp = self.parse8601(trade['timestamp'])
return {
'id': None,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['side'].lower(),
'price': trade['price'],
'amount': trade['size'],
}
def parse_trades_plus(self, trades, market=None):
result = []
for i in range(0, len(trades)):
result.append(self.parseTradePlus(trades[i], market))
return result
async def fetch_trades(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
method = market['api'] + 'GetTrade'
request = self.createMarketRequest(market)
if market['plus']:
now = self.milliseconds()
request['start_time'] = now - 86400 * 1000
request['end_time'] = now
else:
method += 's' # trades vs trade
response = await getattr(self, method)(self.extend(request, params))
if market['plus']:
return self.parseTradesPlus(response['trades'], market)
return self.parse_trades(response, market)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
method = 'privatePost' + self.capitalize(side) + 'Order2'
order = {}
id = market['id'].upper()
if type == 'market':
order['params'] = [None, amount, id]
else:
order['params'] = [price, amount, id]
response = await getattr(self, method)(self.extend(order, params))
return {
'info': response,
'id': response['id'],
}
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
market = params['market'] # TODO fixme
return await self.privatePostCancelOrder(self.extend({
'params': [id, market],
}, params))
def nonce(self):
return self.microseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + path
if api == 'private':
if not self.apiKey:
raise AuthenticationError(self.id + ' requires `' + self.id + '.apiKey` property for authentication')
if not self.secret:
raise AuthenticationError(self.id + ' requires `' + self.id + '.secret` property for authentication')
p = []
if 'params' in params:
p = params['params']
nonce = self.nonce()
request = {
'method': path,
'id': nonce,
'params': p,
}
p = ','.join(p)
body = self.json(request)
query = (
'tonce=' + nonce +
'&accesskey=' + self.apiKey +
'&requestmethod=' + method.lower() +
'&id=' + nonce +
'&method=' + path +
'¶ms=' + p
)
signature = self.hmac(self.encode(query), self.encode(self.secret), hashlib.sha1)
auth = self.apiKey + ':' + signature
headers = {
'Authorization': 'Basic ' + base64.b64encode(auth),
'Json-Rpc-Tonce': nonce,
}
else:
if params:
url += '?' + self.urlencode(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| 37.39557
| 128
| 0.469832
|
c86e5d39d9d33a463da10b86e7449392f36eb36f
| 2,161
|
py
|
Python
|
chainer/functions/array/get_item.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | 7
|
2017-05-08T07:02:40.000Z
|
2018-12-02T18:35:39.000Z
|
chainer/functions/array/get_item.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | null | null | null |
chainer/functions/array/get_item.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | 1
|
2021-05-27T16:52:11.000Z
|
2021-05-27T16:52:11.000Z
|
import collections
import numpy
import chainer
from chainer import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
from chainer import variable
class GetItem(function.Function):
"""Function that slices array and extract elements."""
def __init__(self, slices):
if not isinstance(slices, collections.Iterable):
slices = tuple([slices])
if chainer.is_debug():
n_ellipses = 0
for s in slices:
if numpy.isscalar(s) or s is None or isinstance(s, slice):
pass
elif s is Ellipsis:
n_ellipses += 1
else:
raise ValueError('Only basic indexing is supported')
if n_ellipses > 1:
raise ValueError('Only one Ellipsis is allowed')
self.slices = slices
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
valid_slice = len(self.slices) - self.slices.count(None)
type_check.expect(in_types[0].ndim >= valid_slice)
def forward(self, xs):
ary = xs[0]
return utils.force_array(ary[tuple(self.slices)]),
def backward(self, xs, gys):
xp = cuda.get_array_module(*xs)
gy = gys[0]
gx = xp.zeros_like(xs[0])
gx[tuple(self.slices)] = gy
return gx,
def get_item(x, slices):
"""Extract elements from array with specified shape, axes and offsets.
Args:
x (tuple of Variables): Variable to be sliced.
slices (int, slice, None or Ellipsis or tuple of them): Basic slicing
to slice a variable. It supports ``int``, ``slice``, ``newaxis``
(equivalent to ``None``) and ``Ellipsis``.
Returns:
Variable: :class:`~chainer.Variable` object
which contains sliced array of ``x``.
.. note::
See NumPy document for details of `indexing
<http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html>`_.
"""
return GetItem(slices)(x)
def install_variable_get_item():
variable.Variable.__getitem__ = get_item
| 28.434211
| 77
| 0.610366
|
300dae5aaf07fcc8d5e73a6e0025f1da7a895f68
| 6,397
|
py
|
Python
|
venv1/Lib/site-packages/tensorflow/contrib/opt/python/training/powersign.py
|
Soum-Soum/Tensorflow_Face_Finder
|
fec6c15d2df7012608511ad87f4b55731bf99478
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
venv1/Lib/site-packages/tensorflow/contrib/opt/python/training/powersign.py
|
Soum-Soum/Tensorflow_Face_Finder
|
fec6c15d2df7012608511ad87f4b55731bf99478
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-05-20T00:58:04.000Z
|
2021-05-20T00:58:04.000Z
|
venv1/Lib/site-packages/tensorflow/contrib/opt/python/training/powersign.py
|
Soum-Soum/Tensorflow_Face_Finder
|
fec6c15d2df7012608511ad87f4b55731bf99478
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of PowerSign."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class PowerSignOptimizer(optimizer.Optimizer):
"""Optimizer that implements the PowerSign update.
See [Bello et al., ICML2017],
[Neural Optimizer Search with RL](https://arxiv.org/abs/1709.07417).
"""
def __init__(self,
learning_rate=0.1,
base=math.e,
beta=0.9,
sign_decay_fn=None,
use_locking=False,
name='PowerSignOptimizer'):
"""Constructs a new PowerSignOptimizer object.
Initialization:
```
m_0 <- 0 (Initialize initial 1st moment vector)
t <- 0 (Initialize timestep)
```
Update:
```
t <- t + 1
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
sign_decay <- sign_decay_fn(t)
update <- base ** (sign_decay * sign(g) * sign(m)) * g
variable <- variable - lr_t * update
```
Example usage for PowerSign-cd (PowerSign with cosine sign decay)
```
decay_steps = 1000
linear_decay_fn = sign_decays.get_linear_decay_fn(decay_steps)
opt = PowerSignOptimizer(learning_rate=0.1, sign_decay_fn=linear_decay_fn)
```
Args:
learning_rate: learning_rate used when taking a step.
base: base used in optimizer.
beta: decay used for computing the moving average m.
sign_decay_fn: decay function applied to the sign(g) sign(m) quantity.
Takes global_step as an argument. See sign_decay.py for some examples.
use_locking: If True, use locks for update operations.
name: Optional name for the operations created iwhen applying gradients.
Defaults to "PowerSignOptimizer".
"""
super(PowerSignOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta = beta
self._logbase = math.log(base)
self._sign_decay_fn = sign_decay_fn
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta_t = None
self._logbase_t = None
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
if self._sign_decay_fn is not None:
self._sign_decay_t = ops.convert_to_tensor(
self._sign_decay_fn(global_step), name='sign_decay')
return super(PowerSignOptimizer, self).apply_gradients(
grads_and_vars, global_step=global_step, name=name)
def _create_slots(self, var_list):
# Create slots for the first moment.
for v in var_list:
self._zeros_slot(v, 'm', self._name)
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name='learning_rate')
self._beta_t = ops.convert_to_tensor(self._beta, name='beta')
self._logbase_t = ops.convert_to_tensor(self._logbase, name='logbase')
if self._sign_decay_fn is None:
self._sign_decay_t = ops.convert_to_tensor(1.0, name='sign_decay')
def _apply_dense(self, grad, var):
m = self.get_slot(var, 'm')
return training_ops.apply_power_sign(
var,
m,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._logbase_t, var.dtype.base_dtype),
math_ops.cast(self._sign_decay_t, var.dtype.base_dtype),
math_ops.cast(self._beta_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, 'm')
return training_ops.resource_apply_power_sign(
var.handle,
m.handle,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._logbase_t, var.dtype.base_dtype),
math_ops.cast(self._sign_decay_t, var.dtype.base_dtype),
math_ops.cast(self._beta_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta_t = math_ops.cast(self._beta_t, var.dtype.base_dtype)
logbase_t = math_ops.cast(self._logbase_t, var.dtype.base_dtype)
e_t = math_ops.cast(math.e, var.dtype.base_dtype)
m = self.get_slot(var, 'm')
m_t = state_ops.assign(
m, (m * beta_t) + (grad * (1 - beta_t)), use_locking=self._use_locking)
sign_g = ops.IndexedSlices(
math_ops.sign(grad.values), grad.indices, dense_shape=grad.dense_shape)
sign_gm = ops.IndexedSlices(
array_ops.gather(math_ops.sign(m_t), sign_g.indices) * sign_g.values,
sign_g.indices,
dense_shape=sign_g.dense_shape)
sign_decayed = math_ops.cast(
self._sign_decay_t, var.dtype.base_dtype)
multiplier_values = math_ops.pow(
e_t, logbase_t * sign_decayed * sign_gm.values)
multiplier = ops.IndexedSlices(
multiplier_values, sign_gm.indices, dense_shape=sign_gm.dense_shape)
final_update = ops.IndexedSlices(
lr_t * multiplier.values * grad.values,
multiplier.indices,
dense_shape=multiplier.dense_shape)
var_update = state_ops.scatter_sub(
var,
final_update.indices,
final_update.values,
use_locking=self._use_locking)
return control_flow_ops.group(* [var_update, m_t])
| 36.976879
| 81
| 0.67094
|
8407f25b56b32abc6538ad9e9dcc41c210ae29fc
| 152
|
py
|
Python
|
project/markdown_files/long/docs/function_programming/func01.py
|
mintlov3r/oh-my-python
|
b99e65ebe31926d92d825d8ad3294e970d9dc722
|
[
"Apache-2.0"
] | null | null | null |
project/markdown_files/long/docs/function_programming/func01.py
|
mintlov3r/oh-my-python
|
b99e65ebe31926d92d825d8ad3294e970d9dc722
|
[
"Apache-2.0"
] | null | null | null |
project/markdown_files/long/docs/function_programming/func01.py
|
mintlov3r/oh-my-python
|
b99e65ebe31926d92d825d8ad3294e970d9dc722
|
[
"Apache-2.0"
] | null | null | null |
# 匿名函数
f = lambda x: x+2
# 等价于,可扩展
def f2(x):
# print(x)
return x+2
# 等价于
# def f3(x): return x+2
print('匿名函数f=', f(2))
print('普通函数f2=', f2(2))
| 13.818182
| 23
| 0.546053
|
01608d54693c40caf9d57bd8fbbfb181c027065b
| 4,403
|
py
|
Python
|
calc_loc_from_loc_and_bearing_and_dist.py
|
Guymer/PyGuymer
|
7970659645f363788d371d00e2128f0cc3a47362
|
[
"Apache-2.0"
] | 6
|
2018-09-18T00:48:03.000Z
|
2019-12-15T23:48:58.000Z
|
calc_loc_from_loc_and_bearing_and_dist.py
|
Guymer/PyGuymer
|
7970659645f363788d371d00e2128f0cc3a47362
|
[
"Apache-2.0"
] | 1
|
2019-11-19T03:17:25.000Z
|
2019-11-19T17:24:45.000Z
|
calc_loc_from_loc_and_bearing_and_dist.py
|
Guymer/PyGuymer
|
7970659645f363788d371d00e2128f0cc3a47362
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
##############################################################################################
# This file is deprecated because Python 2.x is deprecated #
# A Python 3.x version of this file can be found at: #
# #
# https://github.com/Guymer/PyGuymer3/blob/master/calc_loc_from_loc_and_bearing_and_dist.py #
##############################################################################################
def calc_loc_from_loc_and_bearing_and_dist(lon1_deg, lat1_deg, alpha1_deg, s_m, nmax = 100, eps = 1.0e-12):
"""
This function reads in coordinates (in degrees) on the surface of Earth
and a heading (in degrees) and a distance (in metres) it then calculates the
coordinates (in degrees) that are at the end of the vector.
"""
# NOTE: https://en.wikipedia.org/wiki/Vincenty%27s_formulae
# NOTE: https://www.movable-type.co.uk/scripts/latlong-vincenty.html
# NOTE: "lambda" is a reserved word in Python so I use "lam" as my variable
# name.
# Import modules ...
import math
# Convert to radians ...
lon1 = math.radians(lon1_deg) # [rad]
lat1 = math.radians(lat1_deg) # [rad]
alpha1 = math.radians(alpha1_deg) # [rad]
# Set constants ...
a = 6378137.0 # [m]
f = 1.0 / 298.257223563
b = (1.0 - f) * a # [m]
u1 = math.atan((1.0 - f) * math.tan(lat1)) # [rad]
sigma1 = math.atan2(
math.tan(u1),
math.cos(alpha1)
)
sin_alpha = math.cos(u1) * math.sin(alpha1)
cosSq_alpha = 1.0 - sin_alpha ** 2
c = f * cosSq_alpha * (4.0 + f * (4.0 - 3.0 * cosSq_alpha)) / 16.0
uSq = cosSq_alpha * (a ** 2 - b ** 2) / b ** 2
bigA = 1.0 + uSq * (4096.0 + uSq * (-768.0 + uSq * (320.0 - 175.0 * uSq))) / 16384.0
bigB = uSq * (256.0 + uSq * (-128.0 + uSq * (74.0 - 47.0 * uSq))) / 1024.0
# Set initial value of sigma and initialize counter ...
sigma = s_m / (b * bigA)
i = 0
# Start infinite loop ...
while True:
# Stop looping if the function has been called too many times ...
if i >= nmax:
raise Exception("failed to converge")
# Find new value of sigma and increment counter ...
two_sigma_m = 2.0 * sigma1 + sigma
delta_sigma = bigB * math.sin(sigma) * (math.cos(two_sigma_m) + 0.25 * bigB * (math.cos(sigma) * (2.0 * math.cos(two_sigma_m) ** 2 - 1.0) - bigB * math.cos(two_sigma_m) * (4.0 * math.sin(sigma) ** 2 - 3.0) * (4.0 * math.cos(two_sigma_m) ** 2 - 3.0) / 6.0))
sigmaNew = s_m / (b * bigA) + delta_sigma
i += 1
# Only check the solution after at least 3 function calls ...
if i >= 3:
if abs(sigmaNew - sigma) / abs(sigmaNew) <= eps:
break
# Replace old sigma with new sigma ...
sigma = sigmaNew
# Calculate end point and forward azimuth ...
lat2 = math.atan2(
math.sin(u1) * math.cos(sigma) + math.cos(u1) * math.sin(sigma) * math.cos(alpha1),
(1.0 - f) * math.hypot(
sin_alpha,
math.sin(u1) * math.sin(sigma) - math.cos(u1) * math.cos(sigma) * math.cos(alpha1)
)
)
lam = math.atan2(
math.sin(sigma) * math.sin(alpha1),
math.cos(u1) * math.cos(sigma) - math.sin(u1) * math.sin(sigma) * math.cos(alpha1)
)
l = lam - (1.0 - c) * f * sin_alpha * (sigma + c * math.sin(sigma) * (math.cos(two_sigma_m) + c * math.cos(sigma) * (2.0 * math.cos(two_sigma_m) ** 2 - 1.0)))
lon2 = (l + lon1 + 3.0 * math.pi) % (2.0 * math.pi) - math.pi # NOTE: Normalize to -180 <--> +180
alpha2 = math.atan2(
sin_alpha,
math.cos(u1) * math.cos(sigma) * math.cos(alpha1) - math.sin(u1) * math.sin(sigma)
)
alpha2 = (alpha2 + 2.0 * math.pi) % (2.0 * math.pi) # NOTE: Normalize to +0 <--> +360
# Return end point and forward azimuth ...
return math.degrees(lon2), math.degrees(lat2), math.degrees(alpha2)
| 47.858696
| 264
| 0.488531
|
16d42cd98e83852f95112644d2a01e6a30c1fc1c
| 1,577
|
py
|
Python
|
programming/python/abbreviate.py
|
ngie-cooper/scratch
|
c0d806505c2ca5146e2cc04a352a8b2a8c9024e8
|
[
"BSD-2-Clause"
] | 3
|
2019-07-03T02:16:39.000Z
|
2020-10-25T23:37:00.000Z
|
programming/python/abbreviate.py
|
ngie-cooper/scratch
|
c0d806505c2ca5146e2cc04a352a8b2a8c9024e8
|
[
"BSD-2-Clause"
] | null | null | null |
programming/python/abbreviate.py
|
ngie-cooper/scratch
|
c0d806505c2ca5146e2cc04a352a8b2a8c9024e8
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
"""Abbreviate buffer content repetitions into a more human readable format.
This also demonstrates how to use line_profiler, memory_profiler, and timeit to
measure computation and memory performance, as well as quantify wall time spent
computing the repetitions.
Examples:
% python -m kernprof -bl abbreviate.py
% python -m line_profiler abbreviate.py.lprof
% python -m memory_profiler abbreviate.py
% python -m timeit 'import abbreviate; abbreviate.run()'
IMPORTANT:
This method of compressing text only works with strings that don't contain digits.
"""
from functools import reduce
from itertools import groupby
try:
#from memory_profiler import profile
profile
except NameError:
profile = lambda x: x
@profile
def abbreviate(buf):
return reduce(lambda x, y: x + y[0] + str(len(list(y[1]))), groupby(buf), "")
@profile
def abbreviate_loop(buf):
result = ""
if not buf:
return ""
c0 = buf[0]
count = 0
for c in buf:
if c == c0:
count += 1
else:
result += c0 + str(count) if count > 2 else c0
count = 1
c0 = c
result += c0 + str(count) if count > 2 else c0
return result
a_buf = "abbcccddddeeeeeffffffggggggghhhhhhhh" * 100
repeat_buf = "a" * 1000
repeat_buf = list(i % ord("A") for i in range())
def run():
abbreviate(a_buf)
abbreviate_loop(a_buf)
abbreviate(repeat_buf)
abbreviate_loop(repeat_buf)
abbreviate(non_repeat_buf)
abbreviate_loop(non_repeat_buf)
if __name__ == "__main__":
run()
| 22.211268
| 82
| 0.675967
|
ab56a65a8eee168844db66f0bc8edadd1d3c1d41
| 7,096
|
py
|
Python
|
testing/test_phpcompiler.py
|
jweinraub/hippyvm
|
09c7643aaa1c4ade566e8681abd2543f12bf874c
|
[
"MIT"
] | 289
|
2015-01-01T15:36:55.000Z
|
2022-03-27T00:22:27.000Z
|
testing/test_phpcompiler.py
|
jweinraub/hippyvm
|
09c7643aaa1c4ade566e8681abd2543f12bf874c
|
[
"MIT"
] | 26
|
2015-01-21T16:34:41.000Z
|
2020-08-26T15:12:54.000Z
|
testing/test_phpcompiler.py
|
jweinraub/hippyvm
|
09c7643aaa1c4ade566e8681abd2543f12bf874c
|
[
"MIT"
] | 35
|
2015-01-05T12:09:41.000Z
|
2022-03-16T09:30:16.000Z
|
import py
from hippy.phpcompiler import compile_php, PHPLexerWrapper
from hippy.objspace import ObjSpace
from testing.directrunner import run_php_source, DirectRunner
from testing.test_interpreter import BaseTestInterpreter, MockInterpreter
class LiteralInterpreter(MockInterpreter):
def run_bytecode(self, bc, expected_warnings=None):
output_w = MockInterpreter.run_bytecode(self, bc)
space = self.space
output = [space.str_w(v) for v in output_w]
return ''.join(output)
def compile(self, source):
return self.compile_bytecode('<input>', source)
class LiteralRunner(DirectRunner):
def _run(self, source, expected_warnings=None, **ignored):
s = run_php_source(source)
return s
class BaseTestPHP(BaseTestInterpreter):
interpreter = LiteralInterpreter
DirectRunner = LiteralRunner
def test_phplexerwrapper():
phplexerwrapper = PHPLexerWrapper(
'Foo\n<?php echo 5 ?>\nBar\nBaz\n<? echo')
for expected in [('B_LITERAL_BLOCK', 'Foo\n', 1),
('T_ECHO', 'echo', 2),
('T_LNUMBER', '5', 2),
(';', ';', 2),
('B_LITERAL_BLOCK', 'Bar\nBaz\n', 3),
('T_ECHO', 'echo', 5)]:
tok = phplexerwrapper.next()
assert (tok.name, tok.source, tok.source_pos.lineno) == expected
tok = phplexerwrapper.next()
assert tok is None
def test_line_start_offset():
space = ObjSpace()
MockInterpreter(space)
bc = compile_php('<input>', 'Hi there\n', space)
assert bc.startlineno == 1
class TestPHPCompiler(BaseTestPHP):
def test_simple(self):
output = self.run('Foo <?php echo 5; ?> Bar')
assert output == 'Foo 5 Bar'
def test_simple_2(self):
output = self.run('Foo <? echo 5; ?> Bar')
assert output == 'Foo 5 Bar'
output = self.run('Foo<?echo 5;?>Bar')
assert output == 'Foo5Bar'
def test_windows_line_ending(self):
output = self.run("Foo<?php\r\necho 5;\r\n?>Bar")
assert output == "Foo5Bar"
def test_case_insensitive(self):
output = self.run('Foo <?phP echo 5; ?> Bar')
assert output == 'Foo 5 Bar'
def test_no_php_code(self):
output = self.run('Foo\n')
assert output == 'Foo\n'
output = self.run('\nFoo')
assert output == '\nFoo'
def test_eol_after_closing_tag(self):
output = self.run('Foo <?phP echo 5; ?>\nBar')
assert output == 'Foo 5Bar'
output = self.run('Foo <?phP echo 5; ?> \nBar')
assert output == 'Foo 5 \nBar'
output = self.run('Foo <?phP echo 5; ?>\n')
assert output == 'Foo 5'
output = self.run('Foo <?phP echo 5; ?>\n\n')
assert output == 'Foo 5\n'
output = self.run('Foo <?phP echo 5; ?> \n')
assert output == 'Foo 5 \n'
def test_end_in_comment_ignored_1(self):
output = self.run('Foo <?php echo 5; /* ?> */ echo 6; ?> Bar')
assert output == 'Foo 56 Bar'
def test_end_in_comment_not_ignored_1(self):
output = self.run('Foo <?php echo 5; //?>\necho 6; ?> Bar')
assert output == 'Foo 5echo 6; ?> Bar'
def test_end_in_comment_not_ignored_2(self):
output = self.run('Foo <?php echo 5; #?>\necho 6; ?> Bar')
assert output == 'Foo 5echo 6; ?> Bar'
def test_double_end(self):
output = self.run('<?php echo 5; ?> echo 6; ?>\n')
assert output == '5 echo 6; ?>\n'
def test_multiple_blocks(self):
output = self.run('-<?php echo 5;?>+<?php echo 6;?>*')
assert output == '-5+6*'
def test_non_closing_last_block_of_code(self):
output = self.run('-<?php echo 5;?>+<?php echo 6;')
assert output == '-5+6'
def test_missing_semicolon_before_end(self):
output = self.run('-<?php echo 5?>+')
assert output == '-5+'
def test_reuse_var(self):
output = self.run('<?php $x=5?>----<?php echo $x;')
assert output == '----5'
def test_multiple_use_of_block_of_text(self):
output = self.run('<?php for($x=0; $x<5; $x++){?>-+-+-\n<?php }')
assert output == '-+-+-\n' * 5
def test_automatic_echo_1(self):
output = self.run('abc<?=2+3?>def')
assert output == 'abc5def'
def test_automatic_echo_2(self):
output = self.run('abc<?=2+3,7-1?>def')
assert output == 'abc56def'
def test_automatic_echo_3(self):
output = self.run('abc<?=2+3,7-1; echo 8+1;?>def')
assert output == 'abc569def'
def test_automatic_echo_4(self):
output = self.run('abc<?=2+3?><?=6*7?>def')
assert output == 'abc542def'
def test_automatic_echo_5(self):
py.test.raises(Exception, self.run, 'abc<? =2+3?>def')
def test_automatic_echo_6(self):
output = self.run('abc<?=2+3?>\ndef<?=6*7?> \nghi')
assert output == 'abc5def42 \nghi'
def test_automatic_echo_7(self):
output = self.run('abc<?=2+3;')
assert output == 'abc5'
py.test.raises(Exception, self.run, 'abc<?=2+3')
def test_halt_compiler(self):
output = self.run('abc<?php echo 5;__halt_compiler();]]]]]]]]]]?>def')
assert output == 'abc5'
output = self.run('abc<?php echo 5;__halt_compiler()?>def')
assert output == 'abc5'
output = self.run('abc<?php echo __COMPILER_HALT_OFFSET__;\n'
'__halt_compiler() ;]]]]]]]]]]?>def')
assert output == 'abc59'
output = self.run('abc<?php echo __COMPILER_HALT_OFFSET__;\n'
'__halt_compiler() ?> def')
assert output == 'abc62'
output = self.run('abc<?php echo __COMPILER_HALT_OFFSET__;\n'
'__halt_compiler() ?>\n def')
assert output == 'abc63'
def test_heredoc(self):
output = self.run('''<?php $x = <<< \tPHP
Hello World
PHP;
echo $x;
?>''')
assert output == 'Hello World'
def test_heredoc_2(self):
output = self.run('''<?php $x = <<<PHP
Hello World
12
;;
"hello"
19x333
class var
PHP;
echo $x;
?>''')
assert output == 'Hello World\n12\n;;\n"hello"\n19x333\nclass var'
def test_heredoc_error(self):
input = '''<?php $x = <<<PHP
Hello World
PH;
echo $x;
?>'''
py.test.raises(Exception, self.run, input)
def test_heredoc_escape(self):
output = self.run(r'''<?php $x = <<<EOS
\n
\$variable
\"quotes\
EOS;
echo $x;
?>''')
assert output == '\n\n$variable\n\\"quotes\\'
def test_heredoc_NUL(self):
output = self.run(r'''<?php $x = <<<EOS
Hello\0world
EOS;
echo $x;
?>''')
assert output == "Hello\0world"
output = self.run('''<?php $x = <<<EOS
Hello\0world
EOS;
echo $x;
?>''')
assert output == "Hello\0world"
def test_heredoc_unfinished(self):
output = self.run(r'''<?php
class T {
public function test($var) {
echo $var;
}
}
$t = new T;
$t->test(<<<HTML
test
HTML
);
?>''')
assert output == "test\n"
| 29.940928
| 78
| 0.568348
|
a37c8ad0a8931db56dd078b4c5d35b71ccf1ef66
| 1,160
|
py
|
Python
|
env/Lib/site-packages/OpenGL/GLES2/EXT/color_buffer_half_float.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 210
|
2016-04-09T14:26:00.000Z
|
2022-03-25T18:36:19.000Z
|
env/Lib/site-packages/OpenGL/GLES2/EXT/color_buffer_half_float.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 72
|
2016-09-04T09:30:19.000Z
|
2022-03-27T17:06:53.000Z
|
env/Lib/site-packages/OpenGL/GLES2/EXT/color_buffer_half_float.py
|
5gconnectedbike/Navio2
|
8c3f2b5d8bbbcea1fc08739945183c12b206712c
|
[
"BSD-3-Clause"
] | 64
|
2016-04-09T14:26:49.000Z
|
2022-03-21T11:19:47.000Z
|
'''OpenGL extension EXT.color_buffer_half_float
This module customises the behaviour of the
OpenGL.raw.GLES2.EXT.color_buffer_half_float to provide a more
Python-friendly API
Overview (from the spec)
This extension allows 16-bit floating point formats as defined in
OES_texture_half_float to be rendered to via framebuffer objects.
When using floating-point formats, certain color clamps are disabled.
This extension also updates the framebuffer object API to allow querying
attachment component types.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/EXT/color_buffer_half_float.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES2 import _types, _glgets
from OpenGL.raw.GLES2.EXT.color_buffer_half_float import *
from OpenGL.raw.GLES2.EXT.color_buffer_half_float import _EXTENSION_NAME
def glInitColorBufferHalfFloatEXT():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
| 35.151515
| 73
| 0.817241
|
b08b26dd9a9c0549c026084ddf9e722a380cd38a
| 6,337
|
py
|
Python
|
docs/conf.py
|
drexelwireless/dragonradio
|
885abd68d56af709e7a53737352641908005c45b
|
[
"MIT"
] | 8
|
2020-12-05T20:30:54.000Z
|
2022-01-22T13:32:14.000Z
|
docs/conf.py
|
drexelwireless/dragonradio
|
885abd68d56af709e7a53737352641908005c45b
|
[
"MIT"
] | 3
|
2020-10-28T22:15:27.000Z
|
2021-01-27T14:43:41.000Z
|
docs/conf.py
|
drexelwireless/dragonradio
|
885abd68d56af709e7a53737352641908005c45b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
# -- Project information -----------------------------------------------------
project = 'DragonRadio'
copyright = '2018-2020, Drexel University'
author = 'Geoffrey Mainland'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'breathe',
'sphinx.ext.autodoc',
'sphinx.ext.githubpages',
'sphinx.ext.graphviz',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinxarg.ext',
]
autodoc_mock_imports = ['google.protobuf']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'dragonradio'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DragonRadio.tex', 'DragonRadio Documentation',
'Geoffrey Mainland', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dragonradio', 'DragonRadio Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DragonRadio', 'DragonRadio Documentation',
author, 'DragonRadio', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# Breathe Configuration
breathe_projects = {'DragonRadio': 'build/doxygenxml/'}
breathe_default_project = 'DragonRadio'
breathe_domain_by_extension = {'hh': 'cpp'}
inheritance_graph_attrs = dict(rankdir="TB", size='""')
def generate_doxygen_xml(app):
build_dir = os.path.join(app.confdir, 'build')
if not os.path.exists(build_dir):
os.mkdir(build_dir)
try:
subprocess.call(['doxygen', '--version'])
retcode = subprocess.call(['doxygen'], cwd=app.confdir)
if retcode < 0:
sys.stderr.write("doxygen error code: {}\n".format(-retcode))
except OSError as e:
sys.stderr.write("doxygen execution failed: {}\n".format(e))
def setup(app):
"""Add hook for building doxygen xml when needed"""
app.connect("builder-inited", generate_doxygen_xml)
| 30.033175
| 79
| 0.654095
|
0ce622fef89857e09828a092962b4bb7bd3b581c
| 2,505
|
py
|
Python
|
algorithms/random.py
|
abacusai/intraprocessing_debiasing
|
b4f0c35e299022b1e71e26686220e90440687100
|
[
"Apache-2.0"
] | 7
|
2020-12-06T17:05:37.000Z
|
2021-09-23T10:59:42.000Z
|
algorithms/random.py
|
abacusai/intraprocessing_debiasing
|
b4f0c35e299022b1e71e26686220e90440687100
|
[
"Apache-2.0"
] | 2
|
2021-03-01T19:59:50.000Z
|
2021-09-25T22:23:27.000Z
|
algorithms/random.py
|
abacusai/intraprocessing_debiasing
|
b4f0c35e299022b1e71e26686220e90440687100
|
[
"Apache-2.0"
] | 2
|
2020-12-06T12:13:44.000Z
|
2021-02-16T13:50:00.000Z
|
"""
Random Intraprocessing Algorithm.
"""
import copy
import logging
import math
import numpy as np
import torch
from models import load_model
from utils import get_best_thresh, get_test_objective, get_valid_objective
logger = logging.getLogger("Debiasing")
def random_debiasing(model_state_dict, data, config, device, verbose=True):
logger.info('Generating Random Debiased models.')
rand_model = load_model(data.num_features, config.get('hyperparameters', {}))
rand_model.to(device)
rand_result = {'objective': -math.inf, 'model': rand_model.state_dict(), 'thresh': -1}
for iteration in range(config['random']['num_trials']):
rand_model.load_state_dict(model_state_dict)
for param in rand_model.parameters():
param.data = param.data * (torch.randn_like(param) * config['random']['stddev'] + 1)
rand_model.eval()
with torch.no_grad():
scores = rand_model(data.X_valid_gpu)[:, 0].reshape(-1).cpu().numpy()
threshs = np.linspace(0, 1, 501)
best_rand_thresh, best_obj = get_best_thresh(scores, threshs, data, config, valid=False, margin=config['random']['margin'])
if best_obj > rand_result['objective']:
rand_result = {'objective': best_obj, 'model': copy.deepcopy(rand_model.state_dict()), 'thresh': best_rand_thresh}
rand_model.eval()
with torch.no_grad():
y_pred = (rand_model(data.X_test_gpu)[:, 0] > best_rand_thresh).reshape(-1).cpu().numpy()
best_test_result = get_test_objective(y_pred, data, config)['objective']
if iteration % 10 == 0 and verbose:
logger.info(f'{iteration} / {config["random"]["num_trials"]} trials have been sampled.')
logger.info(f'Best result so far = {rand_result["objective"]}')
logger.info(f'Best test result so = {best_test_result}')
logger.info('Evaluating best random debiased model.')
rand_model.load_state_dict(rand_result['model'])
rand_model.eval()
with torch.no_grad():
y_pred = (rand_model(data.X_valid_gpu)[:, 0] > rand_result['thresh']).reshape(-1).cpu().numpy()
results_valid = get_valid_objective(y_pred, data, config)
logger.info(f'Results: {results_valid}')
rand_model.eval()
with torch.no_grad():
y_pred = (rand_model(data.X_test_gpu)[:, 0] > rand_result['thresh']).reshape(-1).cpu().numpy()
results_test = get_test_objective(y_pred, data, config)
return results_valid, results_test
| 42.457627
| 131
| 0.67505
|
e06f3e512041d8d841c57138b912e18e55a25ef0
| 5,567
|
py
|
Python
|
planetary_computer/sas.py
|
QPC-database/planetary-computer-sdk-for-python
|
ae8fbf6eb9e743100339195cb883e7f8b07d4190
|
[
"MIT"
] | 1
|
2021-07-06T14:47:18.000Z
|
2021-07-06T14:47:18.000Z
|
planetary_computer/sas.py
|
QPC-database/planetary-computer-sdk-for-python
|
ae8fbf6eb9e743100339195cb883e7f8b07d4190
|
[
"MIT"
] | null | null | null |
planetary_computer/sas.py
|
QPC-database/planetary-computer-sdk-for-python
|
ae8fbf6eb9e743100339195cb883e7f8b07d4190
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timezone
from typing import Any, Dict
from functools import singledispatch
import requests
from pydantic import BaseModel, Field
from pystac import Asset, Item, ItemCollection
from pystac.utils import datetime_to_str
from pystac_client import ItemSearch
from planetary_computer.settings import Settings
from planetary_computer.utils import parse_blob_url
class SASBase(BaseModel):
"""Base model for responses."""
expiry: datetime = Field(alias="msft:expiry")
"""RFC339 datetime format of the time this token will expire"""
class Config:
json_encoders = {datetime: datetime_to_str}
allow_population_by_field_name = True
class SignedLink(SASBase):
"""Signed SAS URL response"""
href: str
"""The HREF in the format of a URL that can be used in HTTP GET operations"""
class SASToken(SASBase):
"""SAS Token response"""
token: str
"""The Shared Access (SAS) Token that can be used to access the data
in, for example, Azure's Python SDK"""
def sign(self, href: str) -> SignedLink:
"""Signs an href with this token"""
return SignedLink(href=f"{href}?{self.token}", expiry=self.expiry)
def ttl(self) -> float:
"""Number of seconds the token is still valid for"""
return (self.expiry - datetime.now(timezone.utc)).total_seconds()
# Cache of signing requests so we can reuse them
# Key is the signing URL, value is the SAS token
TOKEN_CACHE: Dict[str, SASToken] = {}
@singledispatch
def sign(obj: Any) -> Any:
"""Sign the relevant URLs belonging to any supported object with a
Shared Access (SAS) Token, which allows for read access.
Args:
obj (Any): The object to sign. Must be one of:
str (URL), Asset, Item, ItemCollection, or ItemSearch
Returns:
Any: A copy of the object where all relevant URLs have been signed
"""
raise TypeError(
"Invalid type, must be one of: str, Asset, Item, ItemCollection, or ItemSearch"
)
@sign.register(str)
def _sign_url(url: str) -> str:
"""Sign a URL with a Shared Access (SAS) Token, which allows for read access.
Args:
url (str): The HREF of the asset in the format of a URL.
This can be found on STAC Item's Asset 'href'
value.
Returns:
str: The signed HREF
"""
settings = Settings.get()
account, container = parse_blob_url(url)
token_request_url = f"{settings.sas_url}/{account}/{container}"
token = TOKEN_CACHE.get(token_request_url)
# Refresh the token if there's less than a minute remaining,
# in order to give a small amount of buffer
if not token or token.ttl() < 60:
headers = (
{"Ocp-Apim-Subscription-Key": settings.subscription_key}
if settings.subscription_key
else None
)
response = requests.get(token_request_url, headers=headers)
response.raise_for_status()
token = SASToken(**response.json())
if not token:
raise ValueError(f"No token found in response: {response.json()}")
TOKEN_CACHE[token_request_url] = token
return token.sign(url).href
@sign.register(Item)
def _sign_item(item: Item) -> Item:
"""Sign all assets within a PySTAC item
Args:
item (Item): The Item whose assets that will be signed
Returns:
Item: A new copy of the Item where all assets' HREFs have
been replaced with a signed version. In addition, a "msft:expiry"
property is added to the Item properties indicating the earliest
expiry time for any assets that were signed.
"""
signed_item = item.clone()
for key in signed_item.assets:
signed_item.assets[key] = sign(signed_item.assets[key])
return signed_item
@sign.register(Asset)
def _sign_asset(asset: Asset) -> Asset:
"""Sign a PySTAC asset
Args:
asset (Asset): The Asset to sign
Returns:
Asset: A new copy of the Asset where the HREF is replaced with a
signed version.
"""
signed_asset = asset.clone()
signed_asset.href = sign(signed_asset.href)
return signed_asset
@sign.register(ItemCollection)
def _sign_item_collection(item_collection: ItemCollection) -> ItemCollection:
"""Sign a PySTAC item collection
Args:
item_collection (ItemCollection): The ItemCollection whose assets will be signed
Returns:
ItemCollection: A new copy of the ItemCollection where all assets'
HREFs for each item have been replaced with a signed version. In addition,
a "msft:expiry" property is added to the Item properties indicating the
earliest expiry time for any assets that were signed.
"""
return ItemCollection.from_dict(
{
"type": "FeatureCollection",
"features": [sign(item).to_dict() for item in item_collection],
}
)
@sign.register(ItemSearch)
def _search_and_sign(search: ItemSearch) -> ItemCollection:
"""Perform a PySTAC Client search, and sign the resulting item collection
Args:
search (ItemSearch): The ItemSearch whose resulting item assets will be signed
Returns:
ItemCollection: The resulting ItemCollection of the search where all assets'
HREFs for each item have been replaced with a signed version. In addition,
a "msft:expiry" property is added to the Item properties indicating the
earliest expiry time for any assets that were signed.
"""
return sign(search.get_all_items())
| 31.994253
| 88
| 0.678283
|
31d2573564b5c2caa252000def708938c7741569
| 568
|
py
|
Python
|
demos/nyu_finger_double_calibration_hwp.py
|
open-dynamic-robot-initiative/nyu_finger
|
6a80c9186cf78e98cb4b6edc2c7531a440266910
|
[
"BSD-3-Clause"
] | 2
|
2021-02-17T16:14:20.000Z
|
2021-04-26T03:14:30.000Z
|
demos/nyu_finger_double_calibration_hwp.py
|
open-dynamic-robot-initiative/nyu_finger
|
6a80c9186cf78e98cb4b6edc2c7531a440266910
|
[
"BSD-3-Clause"
] | 3
|
2021-06-23T18:05:33.000Z
|
2021-08-12T15:55:14.000Z
|
demos/nyu_finger_double_calibration_hwp.py
|
open-dynamic-robot-initiative/nyu_finger
|
6a80c9186cf78e98cb4b6edc2c7531a440266910
|
[
"BSD-3-Clause"
] | 3
|
2021-02-27T17:48:12.000Z
|
2021-07-13T21:14:37.000Z
|
import os.path
import numpy as np
from robot_properties_nyu_finger.config import (
NYUFingerDoubleConfig0, NYUFingerDoubleConfig1)
from nyu_finger.nyu_finger_hwp_cpp import NYUFingerHWP
if __name__ == "__main__":
finger0 = NYUFingerHWP()
finger1 = NYUFingerHWP()
finger0.initialize(NYUFingerDoubleConfig0.dgm_yaml_path)
finger1.initialize(NYUFingerDoubleConfig1.dgm_yaml_path)
finger0.run()
finger1.run()
print()
input("Press enter to start calibration.")
finger0.calibrate(np.zeros(3))
finger1.calibrate(np.zeros(3))
| 24.695652
| 60
| 0.757042
|
cb3e295a1b7fb40b72a5855cb8e060dea2dfb7b0
| 221
|
py
|
Python
|
game/engine/__init__.py
|
afreeorange/clueless
|
b6365e15989763fe6f3f7b4031a8ca3ce3a6420e
|
[
"MIT"
] | null | null | null |
game/engine/__init__.py
|
afreeorange/clueless
|
b6365e15989763fe6f3f7b4031a8ca3ce3a6420e
|
[
"MIT"
] | null | null | null |
game/engine/__init__.py
|
afreeorange/clueless
|
b6365e15989763fe6f3f7b4031a8ca3ce3a6420e
|
[
"MIT"
] | null | null | null |
import logging
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| 20.090909
| 53
| 0.710407
|
c610bdcbbfa382f6145058ebf299b88c82b3d90f
| 118
|
py
|
Python
|
moma_example/bootstrapform/__init__.py
|
gadio/moma-django
|
13265379be1dbab18697e5f42f38b3b37f928aa9
|
[
"Apache-2.0"
] | 12
|
2015-03-29T05:31:25.000Z
|
2019-06-13T16:17:37.000Z
|
moma_example/bootstrapform/__init__.py
|
antoniotaranto/moma-django
|
13265379be1dbab18697e5f42f38b3b37f928aa9
|
[
"Apache-2.0"
] | 8
|
2015-09-04T21:00:50.000Z
|
2021-06-10T17:39:44.000Z
|
moma_example/bootstrapform/__init__.py
|
antoniotaranto/moma-django
|
13265379be1dbab18697e5f42f38b3b37f928aa9
|
[
"Apache-2.0"
] | 3
|
2015-03-25T21:52:14.000Z
|
2021-01-11T03:02:29.000Z
|
from bootstrapform.meta import VERSION
"""
This is updated version. Not original one.
"""
__version__ = str(VERSION)
| 16.857143
| 42
| 0.754237
|
7924d1c0994842ae8b1861b4c5edeece6d79e5ed
| 1,424
|
py
|
Python
|
recipe/serializers.py
|
Asko-Dev/REST-API---Recipe
|
e6bff4a0a733b541106c4b30150eb5b327b8f7e2
|
[
"MIT"
] | null | null | null |
recipe/serializers.py
|
Asko-Dev/REST-API---Recipe
|
e6bff4a0a733b541106c4b30150eb5b327b8f7e2
|
[
"MIT"
] | 5
|
2020-06-06T01:34:08.000Z
|
2022-03-12T00:21:49.000Z
|
recipe/serializers.py
|
Asko-Dev/REST-API---Recipe
|
e6bff4a0a733b541106c4b30150eb5b327b8f7e2
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from core.models import Tag, Ingredient, Recipe
class TagSerializer(serializers.ModelSerializer):
"""Serializer for tag objects"""
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class IngredientSerializer(serializers.ModelSerializer):
"""Serializer for ingredient objects"""
class Meta:
model = Ingredient
fields = ('id', 'name')
read_only_fields = ('id',)
class RecipeSerializer(serializers.ModelSerializer):
"""Serializer for Recipe object"""
ingredients = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Ingredient.objects.all()
)
tags = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Tag.objects.all()
)
class Meta:
model = Recipe
fields = ('id', 'title', 'ingredients', 'tags', 'time_minutes',
'price', 'link')
read_only_fields = ('id',)
class RecipeDetailSerializer(RecipeSerializer):
"""Serialize a recipe detail"""
ingredients = IngredientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True)
class RecipeImageSerializer(serializers.ModelSerializer):
"""Serializer for uploading images to recipes"""
class Meta:
model = Recipe
fields = ('id', 'image')
read_only_fields = ('id',)
| 25.890909
| 71
| 0.648174
|
f6c33111a33d19235c151e4953325ac3112d6650
| 17,698
|
py
|
Python
|
test/test_readme_examples.py
|
cnheider/imgaug
|
f0a7432205975e4435c81894a63bb147af67c476
|
[
"MIT"
] | 4
|
2018-11-24T15:31:36.000Z
|
2020-06-23T02:52:45.000Z
|
test/test_readme_examples.py
|
cnheider/imgaug
|
f0a7432205975e4435c81894a63bb147af67c476
|
[
"MIT"
] | null | null | null |
test/test_readme_examples.py
|
cnheider/imgaug
|
f0a7432205975e4435c81894a63bb147af67c476
|
[
"MIT"
] | 2
|
2020-01-14T14:29:49.000Z
|
2021-02-20T07:47:02.000Z
|
"""
Script to verify all examples in the readme.
Simply execute
python test_readme_examples.py
"""
from __future__ import print_function, division
import numpy as np
def main():
example_standard_situation()
example_heavy_augmentations()
example_show()
#example_grayscale()
example_determinism()
example_keypoints()
example_single_augmenters()
example_withchannels()
example_unusual_distributions()
example_hooks()
example_background_augment_batches()
example_background_classes()
def example_standard_situation():
print("Example: Standard Situation")
# -------
# dummy functions to make the example runnable here
def load_batch(batch_idx):
return np.random.randint(0, 255, (1, 16, 16, 3), dtype=np.uint8)
def train_on_images(images):
pass
# -------
from imgaug import augmenters as iaa
seq = iaa.Sequential([
iaa.Crop(px=(0, 16)), # crop images from each side by 0 to 16px (randomly chosen)
iaa.Fliplr(0.5), # horizontally flip 50% of the images
iaa.GaussianBlur(sigma=(0, 3.0)) # blur images with a sigma of 0 to 3.0
])
for batch_idx in range(1000):
# 'images' should be either a 4D numpy array of shape (N, height, width, channels)
# or a list of 3D numpy arrays, each having shape (height, width, channels).
# Grayscale images must have shape (height, width, 1) each.
# All images must have numpy's dtype uint8. Values are expected to be in
# range 0-255.
images = load_batch(batch_idx)
images_aug = seq.augment_images(images)
train_on_images(images_aug)
# -----
# Make sure that the example really does something
if batch_idx == 0:
assert not np.array_equal(images, images_aug)
def example_heavy_augmentations():
print("Example: Heavy Augmentations")
import imgaug as ia
from imgaug import augmenters as iaa
# random example images
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
# Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
# e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
st = lambda aug: iaa.Sometimes(0.5, aug)
# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
seq = iaa.Sequential([
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.5), # vertically flip 50% of all images
st(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
st(iaa.GaussianBlur((0, 3.0))), # blur images with a sigma between 0 and 3.0
st(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5)), # add gaussian noise to images
st(iaa.Dropout((0.0, 0.1), per_channel=0.5)), # randomly remove up to 10% of the pixels
st(iaa.Add((-10, 10), per_channel=0.5)), # change brightness of images (by -10 to 10 of original value)
st(iaa.Multiply((0.5, 1.5), per_channel=0.5)), # change brightness of images (50-150% of original value)
st(iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5)), # improve or worsen the contrast
st(iaa.Grayscale((0.0, 1.0))), # blend with grayscale image
st(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
translate_px={"x": (-16, 16), "y": (-16, 16)}, # translate by -16 to +16 pixels (per axis)
rotate=(-45, 45), # rotate by -45 to +45 degrees
shear=(-16, 16), # shear by -16 to +16 degrees
order=[0, 1], # use scikit-image's interpolation orders 0 (nearest neighbour) and 1 (bilinear)
cval=(0, 255), # if mode is constant, use a cval between 0 and 1.0
mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
)),
st(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)) # apply elastic transformations with random strengths
],
random_order=True # do all of the above in random order
)
images_aug = seq.augment_images(images)
# -----
# Make sure that the example really does something
assert not np.array_equal(images, images_aug)
def example_show():
print("Example: Show")
from imgaug import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
seq = iaa.Sequential([iaa.Fliplr(0.5), iaa.GaussianBlur((0, 3.0))])
# show an image with 8*8 augmented versions of image 0
seq.show_grid(images[0], cols=8, rows=8)
# Show an image with 8*8 augmented versions of image 0 and 8*8 augmented
# versions of image 1. The identical augmentations will be applied to
# image 0 and 1.
seq.show_grid([images[0], images[1]], cols=8, rows=8)
# this example is no longer necessary as the library can now handle 2D images
"""
def example_grayscale():
print("Example: Grayscale")
from imgaug import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128), dtype=np.uint8)
seq = iaa.Sequential([iaa.Fliplr(0.5), iaa.GaussianBlur((0, 3.0))])
# The library expects a list of images (3D inputs) or a single array (4D inputs).
# So we add an axis to our grayscale array to convert it to shape (16, 128, 128, 1).
images_aug = seq.augment_images(images[:, :, :, np.newaxis])
# -----
# Make sure that the example really does something
assert not np.array_equal(images, images_aug)
"""
def example_determinism():
print("Example: Determinism")
from imgaug import augmenters as iaa
# Standard scenario: You have N RGB-images and additionally 21 heatmaps per image.
# You want to augment each image and its heatmaps identically.
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
heatmaps = np.random.randint(0, 255, (16, 128, 128, 21), dtype=np.uint8)
seq = iaa.Sequential([iaa.GaussianBlur((0, 3.0)), iaa.Affine(translate_px={"x": (-40, 40)})])
# Convert the stochastic sequence of augmenters to a deterministic one.
# The deterministic sequence will always apply the exactly same effects to the images.
seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start
images_aug = seq_det.augment_images(images)
heatmaps_aug = seq_det.augment_images(heatmaps)
# -----
# Make sure that the example really does something
import imgaug as ia
assert not np.array_equal(images, images_aug)
assert not np.array_equal(heatmaps, heatmaps_aug)
images_show = []
for img_idx in range(len(images)):
images_show.extend([images[img_idx], images_aug[img_idx], heatmaps[img_idx][..., 0:3], heatmaps_aug[img_idx][..., 0:3]])
ia.show_grid(images_show, cols=4)
def example_keypoints():
print("Example: Keypoints")
import imgaug as ia
from imgaug import augmenters as iaa
import random
images = np.random.randint(0, 50, (4, 128, 128, 3), dtype=np.uint8)
# Generate random keypoints.
# The augmenters expect a list of imgaug.KeypointsOnImage.
keypoints_on_images = []
for image in images:
height, width = image.shape[0:2]
keypoints = []
for _ in range(4):
x = random.randint(0, width-1)
y = random.randint(0, height-1)
keypoints.append(ia.Keypoint(x=x, y=y))
keypoints_on_images.append(ia.KeypointsOnImage(keypoints, shape=image.shape))
seq = iaa.Sequential([iaa.GaussianBlur((0, 3.0)), iaa.Affine(scale=(0.5, 0.7))])
seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start
# augment keypoints and images
images_aug = seq_det.augment_images(images)
keypoints_aug = seq_det.augment_keypoints(keypoints_on_images)
# Example code to show each image and print the new keypoints coordinates
for img_idx, (image_before, image_after, keypoints_before, keypoints_after) in enumerate(zip(images, images_aug, keypoints_on_images, keypoints_aug)):
image_before = keypoints_before.draw_on_image(image_before)
image_after = keypoints_after.draw_on_image(image_after)
ia.imshow(np.concatenate((image_before, image_after), axis=1)) # before and after
for kp_idx, keypoint in enumerate(keypoints_after.keypoints):
keypoint_old = keypoints_on_images[img_idx].keypoints[kp_idx]
x_old, y_old = keypoint_old.x, keypoint_old.y
x_new, y_new = keypoint.x, keypoint.y
print("[Keypoints for image #%d] before aug: x=%d y=%d | after aug: x=%d y=%d" % (img_idx, x_old, y_old, x_new, y_new))
def example_single_augmenters():
print("Example: Single Augmenters")
from imgaug import augmenters as iaa
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
flipper = iaa.Fliplr(1.0) # always horizontally flip each input image
images[0] = flipper.augment_image(images[0]) # horizontally flip image 0
vflipper = iaa.Flipud(0.9) # vertically flip each input image with 90% probability
images[1] = vflipper.augment_image(images[1]) # probably vertically flip image 1
blurer = iaa.GaussianBlur(3.0)
images[2] = blurer.augment_image(images[2]) # blur image 2 by a sigma of 3.0
images[3] = blurer.augment_image(images[3]) # blur image 3 by a sigma of 3.0 too
translater = iaa.Affine(translate_px={"x": -16}) # move each input image by 16px to the left
images[4] = translater.augment_image(images[4]) # move image 4 to the left
scaler = iaa.Affine(scale={"y": (0.8, 1.2)}) # scale each input image to 80-120% on the y axis
images[5] = scaler.augment_image(images[5]) # scale image 5 by 80-120% on the y axis
def example_withchannels():
print("Example: WithChannels")
from imgaug import augmenters as iaa
import numpy as np
# fake RGB images
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
# add a random value from the range (-30, 30) to the first two channels of
# input images (e.g. to the R and G channels)
aug = iaa.WithChannels(
channels=[0, 1],
children=iaa.Add((-30, 30))
)
images_aug = aug.augment_images(images)
def example_unusual_distributions():
print("Example: Unusual Distributions")
from imgaug import augmenters as iaa
from imgaug import parameters as iap
images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)
# Blur by a value sigma which is sampled from a uniform distribution
# of range 0.1 <= x < 3.0.
# The convenience shortcut for this is: iaa.GaussianBlur((0.1, 3.0))
blurer = iaa.GaussianBlur(iap.Uniform(0.1, 3.0))
images_aug = blurer.augment_images(images)
# Blur by a value sigma which is sampled from a normal distribution N(1.0, 0.1),
# i.e. sample a value that is usually around 1.0.
# Clip the resulting value so that it never gets below 0.1 or above 3.0.
blurer = iaa.GaussianBlur(iap.Clip(iap.Normal(1.0, 0.1), 0.1, 3.0))
images_aug = blurer.augment_images(images)
# Same again, but this time the mean of the normal distribution is not constant,
# but comes itself from a uniform distribution between 0.5 and 1.5.
blurer = iaa.GaussianBlur(iap.Clip(iap.Normal(iap.Uniform(0.5, 1.5), 0.1), 0.1, 3.0))
images_aug = blurer.augment_images(images)
# Use for sigma one of exactly three allowed values: 0.5, 1.0 or 1.5.
blurer = iaa.GaussianBlur(iap.Choice([0.5, 1.0, 1.5]))
images_aug = blurer.augment_images(images)
# Sample sigma from a discrete uniform distribution of range 1 <= sigma <= 5,
# i.e. sigma will have any of the following values: 1, 2, 3, 4, 5.
blurer = iaa.GaussianBlur(iap.DiscreteUniform(1, 5))
images_aug = blurer.augment_images(images)
def example_hooks():
print("Example: Hooks")
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
# images and heatmaps, just arrays filled with value 30
images = np.ones((16, 128, 128, 3), dtype=np.uint8) * 30
heatmaps = np.ones((16, 128, 128, 21), dtype=np.uint8) * 30
# add vertical lines to see the effect of flip
images[:, 16:128-16, 120:124, :] = 120
heatmaps[:, 16:128-16, 120:124, :] = 120
seq = iaa.Sequential([
iaa.Fliplr(0.5, name="Flipper"),
iaa.GaussianBlur((0, 3.0), name="GaussianBlur"),
iaa.Dropout(0.02, name="Dropout"),
iaa.AdditiveGaussianNoise(scale=0.01*255, name="MyLittleNoise"),
iaa.AdditiveGaussianNoise(loc=32, scale=0.0001*255, name="SomeOtherNoise"),
iaa.Affine(translate_px={"x": (-40, 40)}, name="Affine")
])
# change the activated augmenters for heatmaps
def activator_heatmaps(images, augmenter, parents, default):
if augmenter.name in ["GaussianBlur", "Dropout", "MyLittleNoise"]:
return False
else:
# default value for all other augmenters
return default
hooks_heatmaps = ia.HooksImages(activator=activator_heatmaps)
seq_det = seq.to_deterministic() # call this for each batch again, NOT only once at the start
images_aug = seq_det.augment_images(images)
heatmaps_aug = seq_det.augment_images(heatmaps, hooks=hooks_heatmaps)
# -----------
ia.show_grid(images_aug)
ia.show_grid(heatmaps_aug[..., 0:3])
def example_background_augment_batches():
print("Example: Background Augmentation via augment_batches()")
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
from skimage import data
# Number of batches and batch size for this example
nb_batches = 10
batch_size = 32
# Example augmentation sequence to run in the background
augseq = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.CoarseDropout(p=0.1, size_percent=0.1)
])
# For simplicity, we use the same image here many times
astronaut = data.astronaut()
astronaut = ia.imresize_single_image(astronaut, (64, 64))
# Make batches out of the example image (here: 10 batches, each 32 times
# the example image)
batches = []
for _ in range(nb_batches):
batches.append(
np.array(
[astronaut for _ in range(batch_size)],
dtype=np.uint8
)
)
# Show the augmented images.
# Note that augment_batches() returns a generator.
for images_aug in augseq.augment_batches(batches, background=True):
ia.imshow(ia.draw_grid(images_aug, cols=8))
def example_background_classes():
print("Example: Background Augmentation via Classes")
import imgaug as ia
from imgaug import augmenters as iaa
import numpy as np
from skimage import data
# Example augmentation sequence to run in the background.
augseq = iaa.Sequential([
iaa.Fliplr(0.5),
iaa.CoarseDropout(p=0.1, size_percent=0.1)
])
# A generator that loads batches from the hard drive.
def load_batches():
# Here, load 10 batches of size 4 each.
# You can also load an infinite amount of batches, if you don't train
# in epochs.
batch_size = 4
nb_batches = 10
# Here, for simplicity we just always use the same image.
astronaut = data.astronaut()
astronaut = ia.imresize_single_image(astronaut, (64, 64))
for i in range(nb_batches):
# A list containing all images of the batch.
batch_images = []
# A list containing IDs per image. This is not necessary for the
# background augmentation and here just used to showcase that you
# can transfer additional information.
batch_data = []
# Add some images to the batch.
for b in range(batch_size):
batch_images.append(astronaut)
batch_data.append((i, b))
# Create the batch object to send to the background processes.
batch = ia.Batch(
images=np.array(batch_images, dtype=np.uint8),
data=batch_data
)
yield batch
# background augmentation consists of two components:
# (1) BatchLoader, which runs in a Thread and calls repeatedly a user-defined
# function (here: load_batches) to load batches (optionally with keypoints
# and additional information) and sends them to a queue of batches.
# (2) BackgroundAugmenter, which runs several background processes (on other
# CPU cores). Each process takes batches from the queue defined by (1),
# augments images/keypoints and sends them to another queue.
# The main process can then read augmented batches from the queue defined
# by (2).
batch_loader = ia.BatchLoader(load_batches)
bg_augmenter = ia.BackgroundAugmenter(batch_loader, augseq)
# Run until load_batches() returns nothing anymore. This also allows infinite
# training.
while True:
print("Next batch...")
batch = bg_augmenter.get_batch()
if batch is None:
print("Finished epoch.")
break
images_aug = batch.images_aug
print("Image IDs: ", batch.data)
ia.imshow(np.hstack(list(images_aug)))
batch_loader.terminate()
bg_augmenter.terminate()
if __name__ == "__main__":
main()
| 41.544601
| 154
| 0.660696
|
bff424ad495922db84e5e6a1d48960fa485e863d
| 443
|
py
|
Python
|
db.py
|
M4RFF/perfd
|
4a4dfaf7b5c82438b1759e4699b192ab6b510823
|
[
"MIT"
] | null | null | null |
db.py
|
M4RFF/perfd
|
4a4dfaf7b5c82438b1759e4699b192ab6b510823
|
[
"MIT"
] | 1
|
2021-12-12T03:29:52.000Z
|
2021-12-17T21:20:40.000Z
|
db.py
|
M4RFF/perfd
|
4a4dfaf7b5c82438b1759e4699b192ab6b510823
|
[
"MIT"
] | 2
|
2021-12-09T17:37:53.000Z
|
2021-12-19T05:45:49.000Z
|
#!/usr/bin/python3
from sql30 import db
class Database(db.Model):
TABLE = 'database'
DB_SCHEMA = {
'db_name': './perfd.db',
'tables': [
{
'name': TABLE,
'fields': {
'perf_name': 'text',
'name_obj_file': 'text',
'amount_of_samples': 'int'
},
}
]
}
VALIDATE_WRITE = True
| 21.095238
| 46
| 0.395034
|
776b516f9473b58c71ad0e22d5d3f01faf133d64
| 6,837
|
py
|
Python
|
test/functional/rpc_users.py
|
jkbao21/bitcoin
|
de78eb66ba67325852b351b1128b3cef6fd4abf3
|
[
"MIT"
] | null | null | null |
test/functional/rpc_users.py
|
jkbao21/bitcoin
|
de78eb66ba67325852b351b1128b3cef6fd4abf3
|
[
"MIT"
] | null | null | null |
test/functional/rpc_users.py
|
jkbao21/bitcoin
|
de78eb66ba67325852b351b1128b3cef6fd4abf3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
get_datadir_path,
str_to_b64str,
)
import os
import http.client
import urllib.parse
import subprocess
from random import SystemRandom
import string
import configparser
import sys
def call_with_auth(node, user, password):
url = urllib.parse.urlparse(node.url)
headers = {"Authorization": "Basic " + str_to_b64str('{}:{}'.format(user, password))}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
conn.close()
return resp
class HTTPBasicsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.supports_cli = False
def setup_chain(self):
super().setup_chain()
self.authinfo = []
#Append rpcauth to bitcoin.conf before initialization
self.rtpassword = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
self.rpcuser = "rpcuser💻"
self.rpcpassword = "rpcpassword🔑"
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
gen_rpcauth = config['environment']['RPCAUTH']
# Generate RPCAUTH with specified password
self.rt2password = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
p = subprocess.Popen([sys.executable, gen_rpcauth, 'rt2', self.rt2password], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth2 = lines[1]
# Generate RPCAUTH without specifying password
self.user = ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
p = subprocess.Popen([sys.executable, gen_rpcauth, self.user], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth3 = lines[1]
self.password = lines[3]
# Generate rpcauthfile with one entry
username = 'rpcauth_single_' + ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
p = subprocess.Popen([sys.executable, gen_rpcauth, "--output", os.path.join(self.options.tmpdir, "rpcauth_single"), username], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
self.authinfo.append( (username, lines[1]) )
# Generate rpcauthfile with two entries
username = 'rpcauth_multi1_' + ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
p = subprocess.Popen([sys.executable, gen_rpcauth, "--output", os.path.join(self.options.tmpdir, "rpcauth_multi"), username], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
self.authinfo.append( (username, lines[1]) )
# Blank lines in between should get ignored
with open(os.path.join(self.options.tmpdir, "rpcauth_multi"), "a", encoding='utf8') as f:
f.write("\n\n")
username = 'rpcauth_multi2_' + ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
p = subprocess.Popen([sys.executable, gen_rpcauth, "--output", os.path.join(self.options.tmpdir, "rpcauth_multi"), username], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
self.authinfo.append( (username, lines[1]) )
# Hand-generated rpcauthfile with one entry and no newline
username = 'rpcauth_nonewline_' + ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
p = subprocess.Popen([sys.executable, gen_rpcauth, username], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
assert "\n" not in lines[1]
assert lines[1][:8] == 'rpcauth='
with open(os.path.join(self.options.tmpdir, "rpcauth_nonewline"), "a", encoding='utf8') as f:
f.write(lines[1][8:])
self.authinfo.append( (username, lines[3]) )
with open(os.path.join(get_datadir_path(self.options.tmpdir, 0), "bitcoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth + "\n")
f.write(rpcauth2 + "\n")
f.write(rpcauth3 + "\n")
f.write("rpcauthfile=rpcauth_single\n")
f.write("rpcauthfile=rpcauth_multi\n")
f.write("rpcauthfile=rpcauth_nonewline\n")
with open(os.path.join(get_datadir_path(self.options.tmpdir, 1), "bitcoin.conf"), 'a', encoding='utf8') as f:
f.write("rpcuser={}\n".format(self.rpcuser))
f.write("rpcpassword={}\n".format(self.rpcpassword))
def test_auth(self, node, user, password):
self.log.info('Correct...')
assert_equal(200, call_with_auth(node, user, password).status)
self.log.info('Wrong...')
assert_equal(401, call_with_auth(node, user, password + 'wrong').status)
self.log.info('Wrong...')
assert_equal(401, call_with_auth(node, user + 'wrong', password).status)
self.log.info('Wrong...')
assert_equal(401, call_with_auth(node, user + 'wrong', password + 'wrong').status)
def run_test(self):
self.log.info('Check correctness of the rpcauth config option')
url = urllib.parse.urlparse(self.nodes[0].url)
self.test_auth(self.nodes[0], url.username, url.password)
self.test_auth(self.nodes[0], 'rt', self.rtpassword)
self.test_auth(self.nodes[0], 'rt2', self.rt2password)
self.test_auth(self.nodes[0], self.user, self.password)
for info in self.authinfo:
self.test_auth(self.nodes[0], *info)
self.log.info('Check correctness of the rpcuser/rpcpassword config options')
url = urllib.parse.urlparse(self.nodes[1].url)
self.test_auth(self.nodes[1], self.rpcuser, self.rpcpassword)
self.log.info('Check that failure to write cookie file will abort the node gracefully')
self.stop_node(0)
cookie_file = os.path.join(get_datadir_path(self.options.tmpdir, 0), self.chain, '.cookie.tmp')
os.mkdir(cookie_file)
init_error = 'Error: Unable to start HTTP server. See debug log for details.'
self.nodes[0].assert_start_raises_init_error(expected_msg=init_error)
if __name__ == '__main__':
HTTPBasicsTest().main()
| 46.195946
| 183
| 0.674419
|
13196dd8532e3b83c7cfd8e3b063431eb18cdeef
| 48,684
|
py
|
Python
|
tools/gen_app_code/app_code_src.py
|
junitas/coreflightexec
|
fdeb437e3052f4c89632dfcfe540140ddb8c70b3
|
[
"NASA-1.3"
] | null | null | null |
tools/gen_app_code/app_code_src.py
|
junitas/coreflightexec
|
fdeb437e3052f4c89632dfcfe540140ddb8c70b3
|
[
"NASA-1.3"
] | null | null | null |
tools/gen_app_code/app_code_src.py
|
junitas/coreflightexec
|
fdeb437e3052f4c89632dfcfe540140ddb8c70b3
|
[
"NASA-1.3"
] | null | null | null |
#========================================================================================
# File: app_code_src.py
# Author: Tam Ngo/JSC
# Date: 2012-02-22
#
# Modification History:
# Date | Author | Description
# ---------------------------
# 10/7/15 | Susanne Strege | Added msg.h header definition and generation. Updated
# app.h and app.c definitions.
#
# 03/31/16 | Michael Rosburg | Updated app.h and app.c definitions. Changed SCH pipe
# definition.
# 05/25/16 | Michael Rosburg | Removed pipe depth macros definition, put in _platform_cfg.h
# 05/26/16 | Michael Rosburg | Add CFE_ES_WaitForStartupSync to _AppMain 1000msec and
# Enhance Performance Monitoring for the application
#========================================================================================
import os, app_code_configs, app_utils
#========================================================================================
# Global Variables - only global to this file
g_Date = "na"
g_Owner = "na"
g_Mission = "na"
g_OutDir = "."
g_Tbls = []
#========================================================================================
def inits():
global g_OutDir, g_Mission, g_Date, g_Owner, g_Tbls
g_Date = app_code_configs.get_date()
g_Owner = app_code_configs.get_owner()
g_Mission = app_code_configs.get_mission()
g_OutDir = app_code_configs.get_outdir()
g_Tbls.extend(app_code_configs.get_tables())
#========================================================================================
def construct_msgheader_content(tgtApp):
global g_Mission, g_Owner, g_Date
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
# Construct file description
head = """\
/*=======================================================================================
** File Name: %s_msg.h
**
** Title: Message Definition Header File for %s Application
**
** $Author: %s
** $Revision: 1.1 $
** $Date: %s
**
** Purpose: To define %s's command and telemetry message defintions
**
** Modification History:
** Date | Author | Description
** ---------------------------
** %s | %s | Build #: Code Started
**
**=====================================================================================*/
""" % (lcApp, ucApp, g_Owner, g_Date, ucApp, g_Date, g_Owner)
# Construct file content
tmpStr = ucApp + "_MSG_H"
part1 = """
#ifndef _%s_
#define _%s_
/*
** Pragmas
*/
/*
** Include Files
*/
""" % (tmpStr, tmpStr)
part2 = ""
if "iload" in g_Tbls:
part2 = """
#include \"%s_iload_utils.h\"""" % (lcApp)
part3 = ""
if "cds" in g_Tbls:
part3 = """
#include \"%s_cds_utils.h\"""" % (lcApp)
part4 = """
/*
** Local Defines
*/
/*
** %s command codes
*/
#define %s_NOOP_CC 0
#define %s_RESET_CC 1
/*
** Local Structure Declarations
*/
typedef struct
{
uint8 TlmHeader[CFE_SB_TLM_HDR_SIZE];
uint8 usCmdCnt;
uint8 usCmdErrCnt;
/* TODO: Add declarations for additional housekeeping data here */
} %s_HkTlm_t;
#endif /* _%s_ */
/*=======================================================================================
** End of file %s_msg.h
**=====================================================================================*/
""" % (ucApp, ucApp, ucApp, ucApp, tmpStr, lcApp)
content = head + part1 + part2 + part3 + part4
return content
#========================================================================================
def construct_header_content(tgtApp):
global g_Mission, g_Owner, g_Date
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
# Construct file description
head = """\
/*=======================================================================================
** File Name: %s_app.h
**
** Title: Header File for %s Application
**
** $Author: %s
** $Revision: 1.1 $
** $Date: %s
**
** Purpose: To define %s's internal macros, data types, global variables and
** function prototypes
**
** Modification History:
** Date | Author | Description
** ---------------------------
** %s | %s | Build #: Code Started
**
**=====================================================================================*/
""" % (lcApp, ucApp, g_Owner, g_Date, ucApp, g_Date, g_Owner)
# Construct file content
tmpStr = ucApp + "_APP_H"
part1 = """
#ifndef _%s_
#define _%s_
/*
** Pragmas
*/
/*
** Include Files
*/
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include \"%s_platform_cfg.h\"
#include \"%s_mission_cfg.h\"
#include \"%s_private_ids.h\"
#include \"%s_private_types.h\"
#include \"%s_perfids.h\"
#include \"%s_msgids.h\"
#include \"%s_msg.h\"
""" % (tmpStr, tmpStr, lcApp, lcApp, lcApp, lcApp, lcApp, lcApp, lcApp)
part2 = ""
if "iload" in g_Tbls:
part2 = """
#include \"%s_iload_utils.h\"""" % (lcApp)
part3 = ""
if "cds" in g_Tbls:
part3 = """
#include \"%s_cds_utils.h\"""" % (lcApp)
part4 = """
/*
** Local Defines
*/
#define %s_TIMEOUT_MSEC 1000
/*
** Local Structure Declarations
*/
typedef struct
{
/* CFE Event table */
CFE_EVS_BinFilter_t EventTbl[%s_EVT_CNT];
/* CFE scheduling pipe */
CFE_SB_PipeId_t SchPipeId;
uint16 usSchPipeDepth;
char cSchPipeName[OS_MAX_API_NAME];
/* CFE command pipe */
CFE_SB_PipeId_t CmdPipeId;
uint16 usCmdPipeDepth;
char cCmdPipeName[OS_MAX_API_NAME];
/* CFE telemetry pipe */
CFE_SB_PipeId_t TlmPipeId;
uint16 usTlmPipeDepth;
char cTlmPipeName[OS_MAX_API_NAME];
/* Task-related */
uint32 uiRunStatus;
""" % (ucApp, ucApp)
part5 = ""
if "iload" in g_Tbls:
part5 = """
/* ILoad table-related */
CFE_TBL_Handle_t ILoadTblHdl;
%s_ILoadTblEntry_t* ILoadTblPtr;
""" % (ucApp)
part6 = ""
if "cds" in g_Tbls:
part6 = """
/* Critical Data Storage (CDS) table-related */
CFE_ES_CDSHandle_t CdsTblHdl;
%s_CdsTbl_t CdsTbl;
""" % (ucApp)
part7 = """
/* Input data - from I/O devices or subscribed from other apps' output data.
Data structure should be defined in %s/fsw/src/%s_private_types.h */
%s_InData_t InData;
/* Output data - to be published at the end of a Wakeup cycle.
Data structure should be defined in %s/fsw/src/%s_private_types.h */
%s_OutData_t OutData;
/* Housekeeping telemetry - for downlink only.
Data structure should be defined in %s/fsw/src/%s_msg.h */
%s_HkTlm_t HkTlm;
/* TODO: Add declarations for additional private data here */
} %s_AppData_t;
/*
** External Global Variables
*/
/*
** Global Variables
*/
/*
** Local Variables
*/
/*
** Local Function Prototypes
*/
int32 %s_InitApp(void);
int32 %s_InitEvent(void);
int32 %s_InitData(void);
int32 %s_InitPipe(void);
void %s_AppMain(void);
void %s_CleanupCallback(void);
int32 %s_RcvMsg(int32 iBlocking);
void %s_ProcessNewData(void);
void %s_ProcessNewCmds(void);
void %s_ProcessNewAppCmds(CFE_SB_Msg_t*);
void %s_ReportHousekeeping(void);
void %s_SendOutData(void);
boolean %s_VerifyCmdLength(CFE_SB_Msg_t*, uint16);
#endif /* _%s_ */
/*=======================================================================================
** End of file %s_app.h
**=====================================================================================*/
""" % (lcApp, lcApp, ucApp, lcApp, lcApp, ucApp, lcApp, lcApp, \
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, \
ucApp, ucApp, ucApp, ucApp, ucApp, tmpStr, lcApp)
content = head + part1 + part2 + part3 + part4 + part5 + part6 + part7
return content
#========================================================================================
def construct_source_head_content(tgtApp):
global g_Owner, g_Date
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
head = """\
/*=======================================================================================
** File Name: %s_app.c
**
** Title: Function Definitions for %s Application
**
** $Author: %s
** $Revision: 1.1 $
** $Date: %s
**
** Purpose: This source file contains all necessary function definitions to run %s
** application.
**
** Functions Defined:
** Function X - Brief purpose of function X
** Function Y - Brief purpose of function Y
** Function Z - Brief purpose of function Z
**
** Limitations, Assumptions, External Events, and Notes:
** 1. List assumptions that are made that apply to all functions in the file.
** 2. List the external source(s) and event(s) that can cause the funcs in this
** file to execute.
** 3. List known limitations that apply to the funcs in this file.
** 4. If there are no assumptions, external events, or notes then enter NONE.
** Do not omit the section.
**
** Modification History:
** Date | Author | Description
** ---------------------------
** %s | %s | Build #: Code Started
**
**=====================================================================================*/
/*
** Pragmas
*/
/*
** Include Files
*/
#include <string.h>
#include \"cfe.h\"
#include \"%s_platform_cfg.h\"
#include \"%s_mission_cfg.h\"
#include \"%s_app.h\"
/*
** Local Defines
*/
/*
** Local Structure Declarations
*/
/*
** External Global Variables
*/
/*
** Global Variables
*/
%s_AppData_t g_%s_AppData;
/*
** Local Variables
*/
/*
** Local Function Definitions
*/
""" % (lcApp, ucApp, g_Owner, g_Date, ucApp, g_Date, g_Owner, lcApp, \
lcApp, lcApp, ucApp, ucApp)
return head
#========================================================================================
def construct_source_init_app_content(tgtApp):
global g_Owner, g_Date, g_Tbls
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
part1 = """
/*=====================================================================================
** Name: %s_InitApp
**
** Purpose: To initialize all data local to and used by %s application
**
** Arguments:
** None
**
** Returns:
** int32 iStatus - Status of initialization
**
** Routines Called:
** CFE_ES_RegisterApp
** CFE_ES_WriteToSysLog
** CFE_EVS_SendEvent
** OS_TaskInstallDeleteHandler
** %s_InitEvent
** %s_InitPipe
** %s_InitData""" % (ucApp, ucApp, ucApp, ucApp, ucApp)
part2 = ""
if "iload-table" in g_Tbls:
part2 = """
** %s_ILoadInit""" % (ucApp)
part3 = ""
if "cds-table" in g_Tbls:
part3 = """
** %s_CdsInit""" % (ucApp)
part4 = """
**
** Called By:
** %s_AppMain
**
** Global Inputs/Reads:
** TBD
**
** Global Outputs/Writes:
** TBD
**
** Limitations, Assumptions, External Events, and Notes:
** 1. List assumptions that are made that apply to this function.
** 2. List the external source(s) and event(s) that can cause this function to execute.
** 3. List known limitations that apply to this function.
** 4. If there are no assumptions, external events, or notes then enter NONE.
** Do not omit the section.
**
** Algorithm:
** Psuedo-code or description of basic algorithm
**
** Author(s): %s
**
** History: Date Written %s
** Unit Tested yyyy-mm-dd
**=====================================================================================*/
int32 %s_InitApp()
{
int32 iStatus=CFE_SUCCESS;
g_%s_AppData.uiRunStatus = CFE_ES_APP_RUN;
iStatus = CFE_ES_RegisterApp();
if (iStatus != CFE_SUCCESS)
{
CFE_ES_WriteToSysLog(\"%s - Failed to register the app (0x%%08X)\\n\", iStatus);
goto %s_InitApp_Exit_Tag;
}
if ((%s_InitEvent() != CFE_SUCCESS) ||
(%s_InitPipe() != CFE_SUCCESS) ||
(%s_InitData() != CFE_SUCCESS)""" \
% (ucApp, g_Owner, g_Date, ucApp, ucApp, ucApp, ucApp, ucApp, \
ucApp, ucApp)
part5 = ""
if "iload" in g_Tbls:
part5 = """ ||
(%s_InitILoadTbl() != CFE_SUCCESS)""" % (ucApp)
part6 = ""
if "cds" in g_Tbls:
part6 = """ ||
(%s_InitCdsTbl() != CFE_SUCCESS)""" % (ucApp)
part7 = """)
{
iStatus = -1;
goto %s_InitApp_Exit_Tag;
}
/* Install the cleanup callback */
OS_TaskInstallDeleteHandler((void*)&%s_CleanupCallback);
%s_InitApp_Exit_Tag:
if (iStatus == CFE_SUCCESS)
{
CFE_EVS_SendEvent(%s_INIT_INF_EID, CFE_EVS_INFORMATION,
\"%s - Application initialized\");
}
else
{
CFE_ES_WriteToSysLog(\"%s - Application failed to initialize\\n\");
}
return (iStatus);
}
""" % (ucApp, ucApp, ucApp, ucApp, ucApp, ucApp)
content = part1 + part2 + part3 + part4 + part5 + part6 + part7
return content
#========================================================================================
def construct_source_init_event_content(tgtApp):
global g_Owner, g_Date
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
content = """
/*=====================================================================================
** Name: %s_InitEvent
**
** Purpose: To initialize and register event table for %s application
**
** Arguments:
** None
**
** Returns:
** int32 iStatus - Status of initialization
**
** Routines Called:
** CFE_EVS_Register
** CFE_ES_WriteToSysLog
**
** Called By:
** %s_InitApp
**
** Global Inputs/Reads:
** TBD
**
** Global Outputs/Writes:
** g_%s_AppData.EventTbl
**
** Limitations, Assumptions, External Events, and Notes:
** 1. List assumptions that are made that apply to this function.
** 2. List the external source(s) and event(s) that can cause this function to execute.
** 3. List known limitations that apply to this function.
** 4. If there are no assumptions, external events, or notes then enter NONE.
** Do not omit the section.
**
** Algorithm:
** Psuedo-code or description of basic algorithm
**
** Author(s): %s
**
** History: Date Written %s
** Unit Tested yyyy-mm-dd
**=====================================================================================*/
int32 %s_InitEvent()
{
int32 iStatus=CFE_SUCCESS;
/* Create the event table */
memset((void*)g_%s_AppData.EventTbl, 0x00, sizeof(g_%s_AppData.EventTbl));
g_%s_AppData.EventTbl[0].EventID = %s_RESERVED_EID;
g_%s_AppData.EventTbl[1].EventID = %s_INF_EID;
g_%s_AppData.EventTbl[2].EventID = %s_INIT_INF_EID;
g_%s_AppData.EventTbl[3].EventID = %s_ILOAD_INF_EID;
g_%s_AppData.EventTbl[4].EventID = %s_CDS_INF_EID;
g_%s_AppData.EventTbl[5].EventID = %s_CMD_INF_EID;
g_%s_AppData.EventTbl[ 6].EventID = %s_ERR_EID;
g_%s_AppData.EventTbl[ 7].EventID = %s_INIT_ERR_EID;
g_%s_AppData.EventTbl[ 8].EventID = %s_ILOAD_ERR_EID;
g_%s_AppData.EventTbl[ 9].EventID = %s_CDS_ERR_EID;
g_%s_AppData.EventTbl[10].EventID = %s_CMD_ERR_EID;
g_%s_AppData.EventTbl[11].EventID = %s_PIPE_ERR_EID;
g_%s_AppData.EventTbl[12].EventID = %s_MSGID_ERR_EID;
g_%s_AppData.EventTbl[13].EventID = %s_MSGLEN_ERR_EID;
/* Register the table with CFE */
iStatus = CFE_EVS_Register(g_%s_AppData.EventTbl,
%s_EVT_CNT, CFE_EVS_BINARY_FILTER);
if (iStatus != CFE_SUCCESS)
{
CFE_ES_WriteToSysLog(\"%s - Failed to register with EVS (0x%%08X)\\n\", iStatus);
}
return (iStatus);
}
""" % (ucApp, ucApp, ucApp, ucApp, g_Owner, g_Date, ucApp, \
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, \
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, \
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, \
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, \
ucApp, ucApp, ucApp, ucApp, ucApp)
return content
#========================================================================================
def construct_source_init_pipe_content(tgtApp):
global g_Owner, g_Date
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
content = """
/*=====================================================================================
** Name: %s_InitPipe
**
** Purpose: To initialize all message pipes and subscribe to messages for %s application
**
** Arguments:
** None
**
** Returns:
** int32 iStatus - Status of initialization
**
** Routines Called:
** CFE_SB_CreatePipe
** CFE_SB_Subscribe
** CFE_ES_WriteToSysLog
**
** Called By:
** %s_InitApp
**
** Global Inputs/Reads:
** None
**
** Global Outputs/Writes:
** g_%s_AppData.usSchPipeDepth
** g_%s_AppData.cSchPipeName
** g_%s_AppData.SchPipeId
** g_%s_AppData.usCmdPipeDepth
** g_%s_AppData.cCmdPipeName
** g_%s_AppData.CmdPipeId
** g_%s_AppData.usTlmPipeDepth
** g_%s_AppData.cTlmPipeName
** g_%s_AppData.TlmPipeId
**
** Limitations, Assumptions, External Events, and Notes:
** 1. List assumptions that are made that apply to this function.
** 2. List the external source(s) and event(s) that can cause this function to execute.
** 3. List known limitations that apply to this function.
** 4. If there are no assumptions, external events, or notes then enter NONE.
** Do not omit the section.
**
** Algorithm:
** Psuedo-code or description of basic algorithm
**
** Author(s): %s
**
** History: Date Written %s
** Unit Tested yyyy-mm-dd
**=====================================================================================*/
int32 %s_InitPipe()
{
int32 iStatus=CFE_SUCCESS;
/* Init schedule pipe */
g_%s_AppData.usSchPipeDepth = %s_SCH_PIPE_DEPTH;
memset((void*)g_%s_AppData.cSchPipeName, '\\0', sizeof(g_%s_AppData.cSchPipeName));
strncpy(g_%s_AppData.cSchPipeName, \"%s_SCH_PIPE\", OS_MAX_API_NAME-1);
/* Subscribe to Wakeup messages */
iStatus = CFE_SB_CreatePipe(&g_%s_AppData.SchPipeId,
g_%s_AppData.usSchPipeDepth,
g_%s_AppData.cSchPipeName);
if (iStatus == CFE_SUCCESS)
{
iStatus = CFE_SB_SubscribeEx(%s_WAKEUP_MID, g_%s_AppData.SchPipeId, CFE_SB_Default_Qos, 1);
if (iStatus != CFE_SUCCESS)
{
CFE_ES_WriteToSysLog(\"%s - Sch Pipe failed to subscribe to %s_WAKEUP_MID. (0x%%08X)\\n\", iStatus);
goto %s_InitPipe_Exit_Tag;
}
}
else
{
CFE_ES_WriteToSysLog(\"%s - Failed to create SCH pipe (0x%%08X)\\n\", iStatus);
goto %s_InitPipe_Exit_Tag;
}
/* Init command pipe */
g_%s_AppData.usCmdPipeDepth = %s_CMD_PIPE_DEPTH ;
memset((void*)g_%s_AppData.cCmdPipeName, '\\0', sizeof(g_%s_AppData.cCmdPipeName));
strncpy(g_%s_AppData.cCmdPipeName, \"%s_CMD_PIPE\", OS_MAX_API_NAME-1);
/* Subscribe to command messages */
iStatus = CFE_SB_CreatePipe(&g_%s_AppData.CmdPipeId,
g_%s_AppData.usCmdPipeDepth,
g_%s_AppData.cCmdPipeName);
if (iStatus == CFE_SUCCESS)
{
/* Subscribe to command messages */
iStatus = CFE_SB_Subscribe(%s_CMD_MID, g_%s_AppData.CmdPipeId);
if (iStatus != CFE_SUCCESS)
{
CFE_ES_WriteToSysLog(\"%s - CMD Pipe failed to subscribe to %s_CMD_MID. (0x%%08X)\\n\", iStatus);
goto %s_InitPipe_Exit_Tag;
}
iStatus = CFE_SB_Subscribe(%s_SEND_HK_MID, g_%s_AppData.CmdPipeId);
if (iStatus != CFE_SUCCESS)
{
CFE_ES_WriteToSysLog(\"%s - CMD Pipe failed to subscribe to %s_SEND_HK_MID. (0x%%08X)\\n\", iStatus);
goto %s_InitPipe_Exit_Tag;
}
}
else
{
CFE_ES_WriteToSysLog(\"%s - Failed to create CMD pipe (0x%%08X)\\n\", iStatus);
goto %s_InitPipe_Exit_Tag;
}
/* Init telemetry pipe */
g_%s_AppData.usTlmPipeDepth = %s_TLM_PIPE_DEPTH;
memset((void*)g_%s_AppData.cTlmPipeName, '\\0', sizeof(g_%s_AppData.cTlmPipeName));
strncpy(g_%s_AppData.cTlmPipeName, \"%s_TLM_PIPE\", OS_MAX_API_NAME-1);
/* Subscribe to telemetry messages on the telemetry pipe */
iStatus = CFE_SB_CreatePipe(&g_%s_AppData.TlmPipeId,
g_%s_AppData.usTlmPipeDepth,
g_%s_AppData.cTlmPipeName);
if (iStatus == CFE_SUCCESS)
{
/* TODO: Add CFE_SB_Subscribe() calls for other apps' output data here.
**
** Examples:
** CFE_SB_Subscribe(GNCEXEC_OUT_DATA_MID, g_%s_AppData.TlmPipeId);
*/
}
else
{
CFE_ES_WriteToSysLog(\"%s - Failed to create TLM pipe (0x%%08X)\\n\", iStatus);
goto %s_InitPipe_Exit_Tag;
}
%s_InitPipe_Exit_Tag:
return (iStatus);
}
""" % (ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, \
ucApp, ucApp, ucApp, ucApp, ucApp, g_Owner, g_Date, \
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp,\
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp,\
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp,\
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, \
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, \
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp)
return content
#========================================================================================
def construct_source_init_data_content(tgtApp):
global g_Owner, g_Date
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
content = """
/*=====================================================================================
** Name: %s_InitData
**
** Purpose: To initialize global variables used by %s application
**
** Arguments:
** None
**
** Returns:
** int32 iStatus - Status of initialization
**
** Routines Called:
** CFE_SB_InitMsg
**
** Called By:
** %s_InitApp
**
** Global Inputs/Reads:
** TBD
**
** Global Outputs/Writes:
** g_%s_AppData.InData
** g_%s_AppData.OutData
** g_%s_AppData.HkTlm
**
** Limitations, Assumptions, External Events, and Notes:
** 1. List assumptions that are made that apply to this function.
** 2. List the external source(s) and event(s) that can cause this function to execute.
** 3. List known limitations that apply to this function.
** 4. If there are no assumptions, external events, or notes then enter NONE.
** Do not omit the section.
**
** Algorithm:
** Psuedo-code or description of basic algorithm
**
** Author(s): %s
**
** History: Date Written %s
** Unit Tested yyyy-mm-dd
**=====================================================================================*/
int32 %s_InitData()
{
int32 iStatus=CFE_SUCCESS;
/* Init input data */
memset((void*)&g_%s_AppData.InData, 0x00, sizeof(g_%s_AppData.InData));
/* Init output data */
memset((void*)&g_%s_AppData.OutData, 0x00, sizeof(g_%s_AppData.OutData));
CFE_SB_InitMsg(&g_%s_AppData.OutData,
%s_OUT_DATA_MID, sizeof(g_%s_AppData.OutData), TRUE);
/* Init housekeeping packet */
memset((void*)&g_%s_AppData.HkTlm, 0x00, sizeof(g_%s_AppData.HkTlm));
CFE_SB_InitMsg(&g_%s_AppData.HkTlm,
%s_HK_TLM_MID, sizeof(g_%s_AppData.HkTlm), TRUE);
return (iStatus);
}
""" % (ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, g_Owner, \
g_Date, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, \
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp)
return content
#========================================================================================
def construct_source_main_content(tgtApp):
global g_Owner, g_Date, g_Tbls
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
part1 = """
/*=====================================================================================
** Name: %s_AppMain
**
** Purpose: To define %s application's entry point and main process loop
**
** Arguments:
** None
**
** Returns:
** None
**
** Routines Called:
** CFE_ES_RegisterApp
** CFE_ES_RunLoop
** CFE_ES_PerfLogEntry
** CFE_ES_PerfLogExit
** CFE_ES_ExitApp
** CFE_ES_WaitForStartupSync
** %s_InitApp
** %s_RcvMsg
**
** Called By:
** TBD
**
** Global Inputs/Reads:
** TBD
**
** Global Outputs/Writes:
** TBD
**
** Limitations, Assumptions, External Events, and Notes:
** 1. List assumptions that are made that apply to this function.
** 2. List the external source(s) and event(s) that can cause this function to execute.
** 3. List known limitations that apply to this function.
** 4. If there are no assumptions, external events, or notes then enter NONE.
** Do not omit the section.
**
** Algorithm:
** Psuedo-code or description of basic algorithm
**
** Author(s): %s
**
** History: Date Written %s
** Unit Tested yyyy-mm-dd
**=====================================================================================*/
void %s_AppMain()
{
/* Register the application with Executive Services */
CFE_ES_RegisterApp();
/* Start Performance Log entry */
CFE_ES_PerfLogEntry(%s_MAIN_TASK_PERF_ID);
/* Perform application initializations */
if (%s_InitApp() != CFE_SUCCESS)
{
g_%s_AppData.uiRunStatus = CFE_ES_APP_ERROR;
} else {
/* Do not perform performance monitoring on startup sync */
CFE_ES_PerfLogExit(%s_MAIN_TASK_PERF_ID);
CFE_ES_WaitForStartupSync(%s_TIMEOUT_MSEC);
CFE_ES_PerfLogEntry(%s_MAIN_TASK_PERF_ID);
}
/* Application main loop */
while (CFE_ES_RunLoop(&g_%s_AppData.uiRunStatus) == TRUE)
{
%s_RcvMsg(CFE_SB_PEND_FOREVER);""" \
% (ucApp, ucApp, ucApp, ucApp, g_Owner, g_Date, ucApp, ucApp, ucApp, ucApp, \
ucApp, ucApp, ucApp, ucApp, ucApp)
part2 = ""
if "cds" in g_Tbls:
part2 = """
/* This is only a suggestion for when to update and save CDS table.
** Depends on the nature of the application, the frequency of update
** and save can be more or less independently.
*/
/* Start Performance Log entry */
CFE_ES_PerfLogEntry(%s_MAIN_TASK_PERF_ID);
%s_UpdateCdsTbl();
%s_SaveCdsTbl();
/* Stop Performance Log entry */
CFE_ES_PerfLogExit(%s_MAIN_TASK_PERF_ID); """ % (ucApp, ucApp, ucApp, ucApp)
part3 = """
}
/* Stop Performance Log entry */
CFE_ES_PerfLogExit(%s_MAIN_TASK_PERF_ID);
/* Exit the application */
CFE_ES_ExitApp(g_%s_AppData.uiRunStatus);
}
""" % (ucApp, ucApp)
content = part1 + part2 + part3
return content
#========================================================================================
def construct_source_cleanup_content(tgtApp):
global g_Owner, g_Date
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
content = """
/*=====================================================================================
** Name: %s_CleanupCallback
**
** Purpose: To handle any neccesary cleanup prior to application exit
**
** Arguments:
** None
**
** Returns:
** None
**
** Routines Called:
** TBD
**
** Called By:
** TBD
**
** Global Inputs/Reads:
** TBD
**
** Global Outputs/Writes:
** TBD
**
** Limitations, Assumptions, External Events, and Notes:
** 1. List assumptions that are made that apply to this function.
** 2. List the external source(s) and event(s) that can cause this function to execute.
** 3. List known limitations that apply to this function.
** 4. If there are no assumptions, external events, or notes then enter NONE.
** Do not omit the section.
**
** Algorithm:
** Psuedo-code or description of basic algorithm
**
** Author(s): %s
**
** History: Date Written %s
** Unit Tested yyyy-mm-dd
**=====================================================================================*/
void %s_CleanupCallback()
{
/* TODO: Add code to cleanup memory and other cleanup here */
}
""" % (ucApp, g_Owner, g_Date, ucApp)
return content
#========================================================================================
def construct_source_recv_msg_content(tgtApp):
global g_Owner, g_Date
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
content = """
/*=====================================================================================
** Name: %s_RcvMsg
**
** Purpose: To receive and process messages for %s application
**
** Arguments:
** None
**
** Returns:
** int32 iStatus - Status of initialization
**
** Routines Called:
** CFE_SB_RcvMsg
** CFE_SB_GetMsgId
** CFE_EVS_SendEvent
** CFE_ES_PerfLogEntry
** CFE_ES_PerfLogExit
** %s_ProcessNewCmds
** %s_ProcessNewData
** %s_SendOutData
**
** Called By:
** %s_Main
**
** Global Inputs/Reads:
** g_%s_AppData.SchPipeId
**
** Global Outputs/Writes:
** g_%s_AppData.uiRunStatus
**
** Limitations, Assumptions, External Events, and Notes:
** 1. List assumptions that are made that apply to this function.
** 2. List the external source(s) and event(s) that can cause this function to execute.
** 3. List known limitations that apply to this function.
** 4. If there are no assumptions, external events, or notes then enter NONE.
** Do not omit the section.
**
** Algorithm:
** Psuedo-code or description of basic algorithm
**
** Author(s): %s
**
** History: Date Written %s
** Unit Tested yyyy-mm-dd
**=====================================================================================*/
int32 %s_RcvMsg(int32 iBlocking)
{
int32 iStatus=CFE_SUCCESS;
CFE_SB_Msg_t* MsgPtr=NULL;
CFE_SB_MsgId_t MsgId;
/* Stop Performance Log entry */
CFE_ES_PerfLogExit(%s_MAIN_TASK_PERF_ID);
/* Wait for WakeUp messages from scheduler */
iStatus = CFE_SB_RcvMsg(&MsgPtr, g_%s_AppData.SchPipeId, iBlocking);
/* Start Performance Log entry */
CFE_ES_PerfLogEntry(%s_MAIN_TASK_PERF_ID);
if (iStatus == CFE_SUCCESS)
{
MsgId = CFE_SB_GetMsgId(MsgPtr);
switch (MsgId)
{
case %s_WAKEUP_MID:
%s_ProcessNewCmds();
%s_ProcessNewData();
/* TODO: Add more code here to handle other things when app wakes up */
/* The last thing to do at the end of this Wakeup cycle should be to
automatically publish new output. */
%s_SendOutData();
break;
default:
CFE_EVS_SendEvent(%s_MSGID_ERR_EID, CFE_EVS_ERROR,
\"%s - Recvd invalid SCH msgId (0x%%08X)\", MsgId);
}
}
else if (iStatus == CFE_SB_NO_MESSAGE)
{
/* If there's no incoming message, you can do something here, or nothing */
}
else
{
/* This is an example of exiting on an error.
** Note that a SB read error is not always going to result in an app quitting.
*/
CFE_EVS_SendEvent(%s_PIPE_ERR_EID, CFE_EVS_ERROR,
\"%s: SB pipe read error (0x%%08X), app will exit\", iStatus);
g_%s_AppData.uiRunStatus= CFE_ES_APP_ERROR;
}
return (iStatus);
}
""" % (ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, g_Owner, g_Date, \
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, \
ucApp, ucApp)
return content
#========================================================================================
def construct_source_process_new_data_content(tgtApp):
global g_Owner, g_Date
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
content = """
/*=====================================================================================
** Name: %s_ProcessNewData
**
** Purpose: To process incoming data subscribed by %s application
**
** Arguments:
** None
**
** Returns:
** None
**
** Routines Called:
** CFE_SB_RcvMsg
** CFE_SB_GetMsgId
** CFE_EVS_SendEvent
**
** Called By:
** %s_RcvMsg
**
** Global Inputs/Reads:
** None
**
** Global Outputs/Writes:
** None
**
** Limitations, Assumptions, External Events, and Notes:
** 1. List assumptions that are made that apply to this function.
** 2. List the external source(s) and event(s) that can cause this function to execute.
** 3. List known limitations that apply to this function.
** 4. If there are no assumptions, external events, or notes then enter NONE.
** Do not omit the section.
**
** Algorithm:
** Psuedo-code or description of basic algorithm
**
** Author(s): %s
**
** History: Date Written %s
** Unit Tested yyyy-mm-dd
**=====================================================================================*/
void %s_ProcessNewData()
{
int iStatus = CFE_SUCCESS;
CFE_SB_Msg_t* TlmMsgPtr=NULL;
CFE_SB_MsgId_t TlmMsgId;
/* Process telemetry messages till the pipe is empty */
while (1)
{
iStatus = CFE_SB_RcvMsg(&TlmMsgPtr, g_%s_AppData.TlmPipeId, CFE_SB_POLL);
if (iStatus == CFE_SUCCESS)
{
TlmMsgId = CFE_SB_GetMsgId(TlmMsgPtr);
switch (TlmMsgId)
{
/* TODO: Add code to process all subscribed data here
**
** Example:
** case NAV_OUT_DATA_MID:
** %s_ProcessNavData(TlmMsgPtr);
** break;
*/
default:
CFE_EVS_SendEvent(%s_MSGID_ERR_EID, CFE_EVS_ERROR,
\"%s - Recvd invalid TLM msgId (0x%%08X)\", TlmMsgId);
break;
}
}
else if (iStatus == CFE_SB_NO_MESSAGE)
{
break;
}
else
{
CFE_EVS_SendEvent(%s_PIPE_ERR_EID, CFE_EVS_ERROR,
\"%s: CMD pipe read error (0x%%08X)\", iStatus);
g_%s_AppData.uiRunStatus = CFE_ES_APP_ERROR;
break;
}
}
}
""" % (ucApp, ucApp, ucApp, g_Owner, g_Date, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp)
return content
#========================================================================================
def construct_source_process_new_cmds_content(tgtApp):
global g_Owner, g_Date
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
content = """
/*=====================================================================================
** Name: %s_ProcessNewCmds
**
** Purpose: To process incoming command messages for %s application
**
** Arguments:
** None
**
** Returns:
** None
**
** Routines Called:
** CFE_SB_RcvMsg
** CFE_SB_GetMsgId
** CFE_EVS_SendEvent
** %s_ProcessNewAppCmds
** %s_ReportHousekeeping
**
** Called By:
** %s_RcvMsg
**
** Global Inputs/Reads:
** None
**
** Global Outputs/Writes:
** None
**
** Limitations, Assumptions, External Events, and Notes:
** 1. List assumptions that are made that apply to this function.
** 2. List the external source(s) and event(s) that can cause this function to execute.
** 3. List known limitations that apply to this function.
** 4. If there are no assumptions, external events, or notes then enter NONE.
** Do not omit the section.
**
** Algorithm:
** Psuedo-code or description of basic algorithm
**
** Author(s): %s
**
** History: Date Written %s
** Unit Tested yyyy-mm-dd
**=====================================================================================*/
void %s_ProcessNewCmds()
{
int iStatus = CFE_SUCCESS;
CFE_SB_Msg_t* CmdMsgPtr=NULL;
CFE_SB_MsgId_t CmdMsgId;
/* Process command messages till the pipe is empty */
while (1)
{
iStatus = CFE_SB_RcvMsg(&CmdMsgPtr, g_%s_AppData.CmdPipeId, CFE_SB_POLL);
if(iStatus == CFE_SUCCESS)
{
CmdMsgId = CFE_SB_GetMsgId(CmdMsgPtr);
switch (CmdMsgId)
{
case %s_CMD_MID:
%s_ProcessNewAppCmds(CmdMsgPtr);
break;
case %s_SEND_HK_MID:
%s_ReportHousekeeping();
break;
/* TODO: Add code to process other subscribed commands here
**
** Example:
** case CFE_TIME_DATA_CMD_MID:
** %s_ProcessTimeDataCmd(CmdMsgPtr);
** break;
*/
default:
CFE_EVS_SendEvent(%s_MSGID_ERR_EID, CFE_EVS_ERROR,
\"%s - Recvd invalid CMD msgId (0x%%08X)\", CmdMsgId);
break;
}
}
else if (iStatus == CFE_SB_NO_MESSAGE)
{
break;
}
else
{
CFE_EVS_SendEvent(%s_PIPE_ERR_EID, CFE_EVS_ERROR,
\"%s: CMD pipe read error (0x%%08X)\", iStatus);
g_%s_AppData.uiRunStatus = CFE_ES_APP_ERROR;
break;
}
}
}
""" % (ucApp, ucApp, ucApp, ucApp, ucApp, g_Owner, g_Date, ucApp, ucApp, ucApp, ucApp, \
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp)
return content
#========================================================================================
def construct_source_process_new_app_cmds_content(tgtApp):
global g_Owner, g_Date
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
content = """
/*=====================================================================================
** Name: %s_ProcessNewAppCmds
**
** Purpose: To process command messages targeting %s application
**
** Arguments:
** CFE_SB_Msg_t* MsgPtr - new command message pointer
**
** Returns:
** None
**
** Routines Called:
** CFE_SB_GetCmdCode
** CFE_EVS_SendEvent
**
** Called By:
** %s_ProcessNewCmds
**
** Global Inputs/Reads:
** None
**
** Global Outputs/Writes:
** g_%s_AppData.HkTlm.usCmdCnt
** g_%s_AppData.HkTlm.usCmdErrCnt
**
** Limitations, Assumptions, External Events, and Notes:
** 1. List assumptions that are made that apply to this function.
** 2. List the external source(s) and event(s) that can cause this function to execute.
** 3. List known limitations that apply to this function.
** 4. If there are no assumptions, external events, or notes then enter NONE.
** Do not omit the section.
**
** Algorithm:
** Psuedo-code or description of basic algorithm
**
** Author(s): %s
**
** History: Date Written %s
** Unit Tested yyyy-mm-dd
**=====================================================================================*/
void %s_ProcessNewAppCmds(CFE_SB_Msg_t* MsgPtr)
{
uint32 uiCmdCode=0;
if (MsgPtr != NULL)
{
uiCmdCode = CFE_SB_GetCmdCode(MsgPtr);
switch (uiCmdCode)
{
case %s_NOOP_CC:
g_%s_AppData.HkTlm.usCmdCnt++;
CFE_EVS_SendEvent(%s_CMD_INF_EID, CFE_EVS_INFORMATION,
\"%s - Recvd NOOP cmd (%%d)\", uiCmdCode);
break;
case %s_RESET_CC:
g_%s_AppData.HkTlm.usCmdCnt = 0;
g_%s_AppData.HkTlm.usCmdErrCnt = 0;
CFE_EVS_SendEvent(%s_CMD_INF_EID, CFE_EVS_INFORMATION,
\"%s - Recvd RESET cmd (%%d)\", uiCmdCode);
break;
/* TODO: Add code to process the rest of the %s commands here */
default:
g_%s_AppData.HkTlm.usCmdErrCnt++;
CFE_EVS_SendEvent(%s_MSGID_ERR_EID, CFE_EVS_ERROR,
\"%s - Recvd invalid cmdId (%%d)", uiCmdCode);
break;
}
}
}
""" % (ucApp, ucApp, ucApp, ucApp, ucApp, g_Owner, g_Date, ucApp, ucApp, ucApp, \
ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp, ucApp)
return content
#========================================================================================
def construct_source_report_hk_content(tgtApp):
global g_Owner, g_Date
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
content = """
/*=====================================================================================
** Name: %s_ReportHousekeeping
**
** Purpose: To send housekeeping message
**
** Arguments:
** None
**
** Returns:
** None
**
** Routines Called:
** TBD
**
** Called By:
** %s_ProcessNewCmds
**
** Global Inputs/Reads:
** None
**
** Global Outputs/Writes:
** TBD
**
** Limitations, Assumptions, External Events, and Notes:
** 1. List assumptions that are made that apply to this function.
** 2. List the external source(s) and event(s) that can cause this function to execute.
** 3. List known limitations that apply to this function.
** 4. If there are no assumptions, external events, or notes then enter NONE.
** Do not omit the section.
**
** Algorithm:
** Psuedo-code or description of basic algorithm
**
** Author(s): GSFC, %s
**
** History: Date Written %s
** Unit Tested yyyy-mm-dd
**=====================================================================================*/
void %s_ReportHousekeeping()
{
/* TODO: Add code to update housekeeping data, if needed, here. */
CFE_SB_TimeStampMsg((CFE_SB_Msg_t*)&g_%s_AppData.HkTlm);
CFE_SB_SendMsg((CFE_SB_Msg_t*)&g_%s_AppData.HkTlm);
}
""" % (ucApp, ucApp, g_Owner, g_Date, ucApp, ucApp, ucApp)
return content
#========================================================================================
def construct_source_send_out_data_content(tgtApp):
global g_Owner, g_Date
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
content = """
/*=====================================================================================
** Name: %s_SendOutData
**
** Purpose: To publish 1-Wakeup cycle output data
**
** Arguments:
** None
**
** Returns:
** None
**
** Routines Called:
** TBD
**
** Called By:
** %s_RcvMsg
**
** Global Inputs/Reads:
** None
**
** Global Outputs/Writes:
** TBD
**
** Limitations, Assumptions, External Events, and Notes:
** 1. List assumptions that are made that apply to this function.
** 2. List the external source(s) and event(s) that can cause this function to execute.
** 3. List known limitations that apply to this function.
** 4. If there are no assumptions, external events, or notes then enter NONE.
** Do not omit the section.
**
** Algorithm:
** Psuedo-code or description of basic algorithm
**
** Author(s): %s
**
** History: Date Written %s
** Unit Tested yyyy-mm-dd
**=====================================================================================*/
void %s_SendOutData()
{
/* TODO: Add code to update output data, if needed, here. */
CFE_SB_TimeStampMsg((CFE_SB_Msg_t*)&g_%s_AppData.OutData);
CFE_SB_SendMsg((CFE_SB_Msg_t*)&g_%s_AppData.OutData);
}
""" % (ucApp, ucApp, g_Owner, g_Date, ucApp, ucApp, ucApp)
return content
#========================================================================================
def construct_source_verify_cmd_len_content(tgtApp):
global g_Owner, g_Date
lcApp = tgtApp.lower()
ucApp = tgtApp.upper()
content = """
/*=====================================================================================
** Name: %s_VerifyCmdLength
**
** Purpose: To verify command length for a particular command message
**
** Arguments:
** CFE_SB_Msg_t* MsgPtr - command message pointer
** uint16 usExpLength - expected command length
**
** Returns:
** boolean bResult - result of verification
**
** Routines Called:
** TBD
**
** Called By:
** %s_ProcessNewCmds
**
** Global Inputs/Reads:
** None
**
** Global Outputs/Writes:
** TBD
**
** Limitations, Assumptions, External Events, and Notes:
** 1. List assumptions that are made that apply to this function.
** 2. List the external source(s) and event(s) that can cause this function to execute.
** 3. List known limitations that apply to this function.
** 4. If there are no assumptions, external events, or notes then enter NONE.
** Do not omit the section.
**
** Algorithm:
** Psuedo-code or description of basic algorithm
**
** Author(s): %s
**
** History: Date Written %s
** Unit Tested yyyy-mm-dd
**=====================================================================================*/
boolean %s_VerifyCmdLength(CFE_SB_Msg_t* MsgPtr,
uint16 usExpectedLen)
{
boolean bResult=FALSE;
uint16 usMsgLen=0;
if (MsgPtr != NULL)
{
usMsgLen = CFE_SB_GetTotalMsgLength(MsgPtr);
if (usExpectedLen != usMsgLen)
{
CFE_SB_MsgId_t MsgId = CFE_SB_GetMsgId(MsgPtr);
uint16 usCmdCode = CFE_SB_GetCmdCode(MsgPtr);
CFE_EVS_SendEvent(%s_MSGLEN_ERR_EID, CFE_EVS_ERROR,
\"%s - Rcvd invalid msgLen: msgId=0x%%08X, cmdCode=%%d, \"
\"msgLen=%%d, expectedLen=%%d\",
MsgId, usCmdCode, usMsgLen, usExpectedLen);
g_%s_AppData.HkTlm.usCmdErrCnt++;
}
}
return (bResult);
}
""" % (ucApp, ucApp, g_Owner, g_Date, ucApp, ucApp, ucApp, ucApp)
return content
#========================================================================================
def construct_source_tail_content(tgtApp):
content = """
/*=======================================================================================
** End of file %s_app.c
**=====================================================================================*/
""" % (tgtApp.lower())
return content
#========================================================================================
def generate_msgheader(tgtApp):
global g_OutDir
# Construct file path
name = tgtApp.lower() + "_msg.h"
filePath = os.path.join(g_OutDir, tgtApp.lower(), "fsw", "src", name)
fileExist_p = os.path.exists(filePath)
# Open file
fileHdl = app_utils.open_file_for_writing(filePath, False)
# Write to file
fileHdl.write(construct_msgheader_content(tgtApp))
# Close file
app_utils.close_file_from_writing(fileHdl, filePath, fileExist_p)
#========================================================================================
def generate_header(tgtApp):
global g_OutDir
# Construct file path
name = tgtApp.lower() + "_app.h"
filePath = os.path.join(g_OutDir, tgtApp.lower(), "fsw", "src", name)
fileExist_p = os.path.exists(filePath)
# Open file
fileHdl = app_utils.open_file_for_writing(filePath, False)
# Write to file
fileHdl.write(construct_header_content(tgtApp))
# Close file
app_utils.close_file_from_writing(fileHdl, filePath, fileExist_p)
#========================================================================================
def generate_source(tgtApp):
global g_OutDir
# Construct file path
name = tgtApp.lower() + "_app.c"
filePath = os.path.join(g_OutDir, tgtApp.lower(), "fsw", "src", name)
fileExist_p = os.path.exists(filePath)
# Open file
fileHdl = app_utils.open_file_for_writing(filePath, False)
# Write to file
fileHdl.write(construct_source_head_content(tgtApp))
fileHdl.write(construct_source_init_event_content(tgtApp))
fileHdl.write(construct_source_init_pipe_content(tgtApp))
fileHdl.write(construct_source_init_data_content(tgtApp))
fileHdl.write(construct_source_init_app_content(tgtApp))
fileHdl.write(construct_source_cleanup_content(tgtApp))
fileHdl.write(construct_source_recv_msg_content(tgtApp))
fileHdl.write(construct_source_process_new_data_content(tgtApp))
fileHdl.write(construct_source_process_new_cmds_content(tgtApp))
fileHdl.write(construct_source_process_new_app_cmds_content(tgtApp))
fileHdl.write(construct_source_report_hk_content(tgtApp))
fileHdl.write(construct_source_send_out_data_content(tgtApp))
fileHdl.write(construct_source_verify_cmd_len_content(tgtApp))
fileHdl.write(construct_source_main_content(tgtApp))
fileHdl.write(construct_source_tail_content(tgtApp))
# Close file
app_utils.close_file_from_writing(fileHdl, filePath, fileExist_p)
#========================================================================================
# End of app_code_src.py
#========================================================================================
| 28.77305
| 113
| 0.548106
|
aff5d1fb8c3f2b1ced5a7ee6e563dd5a18e6fd72
| 2,650
|
py
|
Python
|
src/systems/crafting_system.py
|
CGirdlestone/TextAdventure
|
6127d6c98ce3ad4c6e4d4fd0262310e74f2e4fad
|
[
"MIT"
] | null | null | null |
src/systems/crafting_system.py
|
CGirdlestone/TextAdventure
|
6127d6c98ce3ad4c6e4d4fd0262310e74f2e4fad
|
[
"MIT"
] | null | null | null |
src/systems/crafting_system.py
|
CGirdlestone/TextAdventure
|
6127d6c98ce3ad4c6e4d4fd0262310e74f2e4fad
|
[
"MIT"
] | null | null | null |
"""crafting_system.py
This class represents a simple crafting system. All recipe-related data is
stored externally in a JSON file.
"""
class CraftingSystem:
def __init__(self, event_queue, **kwargs):
self.event_queue = event_queue
self.event_queue.register_system(self)
self.__dict__.update(**kwargs)
def validate_components(self, container, recipe_number):
"""Checks whether the player has the necessary recipe components."""
if isinstance(self.recipe_components[recipe_number], int):
return self.recipe_components[recipe_number] in container
recipe_set = set(self.recipe_components[recipe_number])
has_components = True
for c in recipe_set:
component_count = self.recipe_components[recipe_number].count(c)
if container.count(c) != component_count:
has_components = False
return has_components
def remove_components(self, container, recipe_number):
"""Removes the recipe components from the player's inventory."""
if isinstance(self.recipe_components[recipe_number], int):
container.remove(self.recipe_components[recipe_number])
else:
for id in self.recipe_components[recipe_number]:
container.remove(id)
def add_output(self, container, recipe_number):
"""Adds the recipe output into the player's inventory."""
container.append(self.recipe_outputs[recipe_number])
if self.recipe_names[recipe_number][0] in ["a", "e", "i", "o", "u"]:
msg = ("Using your knowledge, you create an {}."
.format(self.recipe_names[recipe_number]))
self.event_queue.add_event({"message": msg})
else:
msg = ("Using your knowledge, you create a {}."
.format(self.recipe_names[recipe_number]))
self.event_queue.add_event({"message": msg})
def craft(self, event):
"""Crafts an item."""
recipe = list(event.values())[0][0]
player = list(event.values())[0][1]
recipe_number = ord(recipe) - 97
if self.validate_components(player.container, recipe_number):
self.remove_components(player.container, recipe_number)
self.add_output(player.container, recipe_number)
else:
msg = "You don't have the required components!"
self.event_queue.add_event({"message": msg})
def receive(self, event):
"""Handles the actioning of received events."""
if list(event.keys())[0] == self.action_word:
self.craft(event)
| 36.805556
| 76
| 0.637736
|
5655f0f163a104bdcb91d55a2cb544477adf77be
| 916
|
py
|
Python
|
tests/test_sentiments.py
|
sam1902/textblob-fr
|
8c3573659e35c8fe7809cb2ae3363268476352b7
|
[
"MIT"
] | null | null | null |
tests/test_sentiments.py
|
sam1902/textblob-fr
|
8c3573659e35c8fe7809cb2ae3363268476352b7
|
[
"MIT"
] | null | null | null |
tests/test_sentiments.py
|
sam1902/textblob-fr
|
8c3573659e35c8fe7809cb2ae3363268476352b7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env
# -*- coding: utf-8 -*-
import unittest
from nose.tools import * # PEP8 asserts
from textblob import TextBlob
from textblob_fr import PatternAnalyzer as FrAnalyzer
class TestPatternAnalyzer(unittest.TestCase):
def setUp(self):
self.analyzer = FrAnalyzer()
self.neg = u"C'est une voiture terribles."
self.pos = u"Quelle belle matinée!"
def test_analyze(self):
pos_sentiment = self.analyzer.analyze(self.pos)
assert_true(pos_sentiment[0] > 0.0)
neg_sentiment = self.analyzer.analyze(self.neg)
assert_true(neg_sentiment[0] < 0.0)
def test_blob_analyze(self):
pos_blob = TextBlob(self.pos, analyzer=self.analyzer)
assert_true(pos_blob.sentiment[0] > 0.0)
neg_blob = TextBlob(self.neg, analyzer=self.analyzer)
assert_true(neg_blob.sentiment[0] < 0.0)
if __name__ == '__main__':
unittest.main()
| 29.548387
| 61
| 0.677948
|
4c7d7415d9e665c94e2bc5dde05d4d078468d8d3
| 1,186
|
py
|
Python
|
test/test_threading.py
|
jhidding/parallel-python-workshop
|
631851558324be67a10963c6742490314d011c80
|
[
"Apache-2.0"
] | 3
|
2020-11-30T15:52:20.000Z
|
2021-07-22T10:28:27.000Z
|
test/test_threading.py
|
jhidding/parallel-python-workshop
|
631851558324be67a10963c6742490314d011c80
|
[
"Apache-2.0"
] | 5
|
2021-11-12T11:39:35.000Z
|
2022-01-14T10:47:05.000Z
|
test/test_threading.py
|
jhidding/parallel-python-workshop
|
631851558324be67a10963c6742490314d011c80
|
[
"Apache-2.0"
] | 3
|
2021-04-09T09:55:55.000Z
|
2021-07-22T10:30:03.000Z
|
import queue
import threading
import numba
import random
def test_threading():
input_range = [10**6]*4
ncpus = 4
@numba.jit(nopython=True, nogil=True)
def calc_pi_numba(N):
M = 0
for i in range(N):
# Simulate impact coordinates
x = random.uniform(-1, 1)
y = random.uniform(-1, 1)
# True if impact happens inside the circle
if x**2 + y**2 < 1.0:
M += 1
return 4 * M / N
# We need to define a worker function that fetches jobs from the queue.
def worker(q):
while True:
try:
x = q.get(block=False)
print(calc_pi_numba(x), end=' ', flush=True)
except queue.Empty:
break
# Create the queue, and fill it with input values
work_queue = queue.Queue()
for i in input_range:
work_queue.put(i)
# Start a number of threads
threads = [
threading.Thread(target=worker, args=(work_queue,))
for i in range(ncpus)]
for t in threads:
t.start()
# Wait until all of them are done
for t in threads:
t.join()
print()
| 23.254902
| 75
| 0.542159
|
8efd2f8a2f6c80e6a3c911ae8f8840983de56b25
| 865
|
py
|
Python
|
leonardo_oscar_dashboard/apps/dashboard.py
|
leonardo-modules/leonardo-oscar-dashboard
|
752bfbc1b7bbffee1f32f756c2ec0066baed9e7a
|
[
"BSD-3-Clause"
] | null | null | null |
leonardo_oscar_dashboard/apps/dashboard.py
|
leonardo-modules/leonardo-oscar-dashboard
|
752bfbc1b7bbffee1f32f756c2ec0066baed9e7a
|
[
"BSD-3-Clause"
] | null | null | null |
leonardo_oscar_dashboard/apps/dashboard.py
|
leonardo-modules/leonardo-oscar-dashboard
|
752bfbc1b7bbffee1f32f756c2ec0066baed9e7a
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from django.conf.urls import include, url
from oscar.apps.dashboard.app import application
urlpatterns = []
try:
from accounts.dashboard.app import application as accounts_app
urlpatterns += [
url(r'^accounts/', include(accounts_app.get_urls()))
]
except Exception as e:
pass
try:
from brand.dashboard.app import application as brand_app
urlpatterns += [
url(r'^brand/', include(brand_app.get_urls()))
]
except Exception as e:
pass
try:
from stores.dashboard.app import application as store_app
urlpatterns += [
url(r'^stores/', include(store_app.get_urls()))
]
except Exception as e:
pass
urlpatterns += [
url(r'^', include(application.get_urls()),)
]
| 20.595238
| 72
| 0.613873
|
fe2e86f70df87eaa47fdd63f89aff6dfc19a6360
| 6,403
|
py
|
Python
|
terex.py
|
kumaransanjay/Terex-a-voice-assistant
|
58a87d5f323a32b4e2c66efb9913b3c11add4e7d
|
[
"Apache-2.0"
] | null | null | null |
terex.py
|
kumaransanjay/Terex-a-voice-assistant
|
58a87d5f323a32b4e2c66efb9913b3c11add4e7d
|
[
"Apache-2.0"
] | null | null | null |
terex.py
|
kumaransanjay/Terex-a-voice-assistant
|
58a87d5f323a32b4e2c66efb9913b3c11add4e7d
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
from random import random
import random
import webbrowser
import pyttsx3
import datetime
import urllib.request
import re
import speech_recognition as sr
import wikipedia
import os
engine=pyttsx3.init('sapi5')
voices=engine.getProperty('voices')
engine.setProperty('voice',voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def joke():
speak("ok")
a="Why do we tell actors to break a leg?"+" Because every play has a cast"
b="What do dentists call their x-rays?"+" Tooth pics!"
c="Do you want to hear a construction joke?"+" Sorry, I\'m still working on it."
d="Why do ducks have feathers?"+" To cover their butt quacks!"
e="What does a nosey pepper do?"+" It gets jalapeño business. "
f="Why did the bullet end up losing his job?"+" He got fired."
g="How do you measure a snake?"+" In inches—they don\'t have feet."
lst1=[a,b,c,d,e,f]
joke=random.choice(lst1)
print(joke)
speak(joke)
speak("ha ha ha ha ha")
def playinyt():
speak("what to play in youtube")
search=mycommand()
space_count=0
for character in search:
if character==" ":
space_count=space_count+1
number_of_words=space_count+1
lst=[]
for i in range(0,number_of_words):
num=search.split()[i]
lst.append(num)
a=len(lst)
castor=lst[0]+"+"
for j in range(1,a):
castor=castor+"+"+lst[j]
speak("playing"+search)
html=urllib.request.urlopen("https://www.youtube.com/results?search_query="+castor)
video_ids=re.findall(r"watch\?v=(\S{11})",html.read().decode())
webbrowser.open("https://www.youtube.com/watch?v="+video_ids[0])
def activity():
speak("I can search for you what do you ask")
def wishme():
hour=int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good morning master")
elif hour>=12 and hour<17:
speak("Good afternoon master")
elif hour>=17 and hour<19:
speak("Good evening master")
else:
speak("Good night master")
def whoamI():
speak("naan thaan TEREX")
speak("what help do you want")
"""def alarm():
Hh=int(input("set hour"))
mm=int(input("set minute"))
hour=int(datetime.datetime.now().hour)
if (Hh==hour):
speak("MASTER enthuringa")"""
def mycommand():
r=sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source,duration=0.5)
# r.energy_threshold()
print("say anything : ")
audio= r.listen(source)
try:
query = r.recognize_google(audio)
print(query)
except:
print("sorry, could not recognise")
mycommand()
return query
if __name__=="__main__":
wishme()
whoamI()
while True:
query=mycommand().lower()
if "wikipedia"in query:
speak("Searching in wikipeia")
query=query.replace("wikipedia","")
results=wikipedia.summary(query,sentences=1)
speak("according to wikipedia")
speak(results)
print(results)
break;
elif"tell me a joke"in query:
joke()
break;
elif "tell joke"in query:
joke()
break;
elif "joke"in query:
joke()
break;
elif"hai"in query:
speak("hi master")
break;
elif "open youtube"in query:
speak("opening youtube")
webbrowser.open("youtube.com")
break;
elif "open google"in query:
speak("opening google")
webbrowser.open("google.com")
break;
elif "open geeks for geeks"in query:
speak("opening geeks for geeks ")
webbrowser.open_new_tab("geeksforgeeks.org")
break;
elif "play music"in query:
speak("opening music player")
music="C:\\Users\\home\\Desktop\\songs"
songs=os.listdir(music)
print(songs)
a=random.choice(songs)
print(a)
os.startfile(os.path.join(music,a))
break;
elif "open whatsapp"in query:
speak("opening whatsapp")
webbrowser.open("web.whatsapp.com")
break;
elif "play movie"in query:
speak("playing a movie")
kmovie="C:\\Users\\home\\Desktop\\sanjay"
movie="C:\\Users\\home\\Desktop\\movie\\movie"
k=[kmovie,movie]
c=random.choice(k)
film=os.listdir(c)
print(film)
b=random.choice(film)
print(b)
os.startfile(os.path.join(movie,b))
break;
elif "open chrome"in query:
speak("opening chrome" )
codepath="C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe "
os.startfile(codepath)
break;
elif "time now"in query:
time=datetime.datetime.now().strftime("%H:%M")
speak("THE TIME IS")
speak(time)
break;
elif "nothing"in query:
speak("Bye master")
exit()
elif "search in youtube"in query:
speak("what to search in youtube")
search=mycommand()
speak("searching for"+search)
webbrowser.open("https://www.youtube.com/results?search_query="+search)
break;
elif"play in youtube" in query:
playinyt()
break;
elif "play youtube songs"in query:
playinyt()
break;
elif "play youtube"in query:
playinyt()
break;
elif"youtube"in query:
playinyt()
break;
elif"search in google"in query:
speak("what to search in google")
search=mycommand()
speak("searching for"+search)
webbrowser.open("https://www.google.com/search?q="+search)
break;
| 28.584821
| 92
| 0.532875
|
11a349a61828217b9352d352d0aa5b11d44c8093
| 1,561
|
py
|
Python
|
probe.py
|
karazijal/automatic_xcnn_topologies
|
3c17b8537650a1d3d0a95269ebce12c136a12dda
|
[
"MIT"
] | null | null | null |
probe.py
|
karazijal/automatic_xcnn_topologies
|
3c17b8537650a1d3d0a95269ebce12c136a12dda
|
[
"MIT"
] | null | null | null |
probe.py
|
karazijal/automatic_xcnn_topologies
|
3c17b8537650a1d3d0a95269ebce12c136a12dda
|
[
"MIT"
] | 1
|
2021-11-09T09:06:48.000Z
|
2021-11-09T09:06:48.000Z
|
from keras.models import Model
from keras.layers import Flatten, Dense, BatchNormalization, Dropout
def get_probe(model, layer_name):
for layer in model.layers:
layer.trainable = False
input_tensor = model.input
attach_tensor = model.get_layer(name=layer_name).output
nb_classes = int(model.output.shape[1])
# print(nb_classes)
# define probe
if len(attach_tensor.shape) >= 3:
bn = BatchNormalization(axis=3, name="pbn")(attach_tensor)
f = Flatten(name='pflat')(bn)
else:
f = BatchNormalization(axis=1,name="pbn")(attach_tensor)
# f = attach_tensor
drop = Dropout(.2, name='pdrop')(f)
d = Dense(nb_classes, activation='softmax', name='psoft')(drop)
prob = Model(input_tensor, d)
return prob
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
lr_reducer = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=10, verbose=1, cooldown=4, min_lr=1e-7)
# e_stop = EarlyStopping(monitor='val_acc', min_delta=0.0002, patience=15, verbose=1)
def probe(probe, X_train, Y_train, X_test, Y_test, nb_batch=32, nb_epoch=80):
probe.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
hist = probe.fit(X_train, Y_train, batch_size=nb_batch, nb_epoch=nb_epoch, validation_data=(X_test, Y_test),
verbose=2, callbacks=[lr_reducer])
# accs = sorted(hist.history['val_acc'])[-10:]
# acc = max(accs)
mes = max(hist.history['val_acc'])
print(mes)
return mes
| 36.302326
| 112
| 0.673927
|
899361774262a7ae9e54ace6bcdc328f8a9969d9
| 41,930
|
py
|
Python
|
tests/test_monitoring_tools.py
|
danielpops/paasta
|
12325d1e884a6d0ace8dcd510da98f6910d28cb0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_monitoring_tools.py
|
danielpops/paasta
|
12325d1e884a6d0ace8dcd510da98f6910d28cb0
|
[
"Apache-2.0"
] | null | null | null |
tests/test_monitoring_tools.py
|
danielpops/paasta
|
12325d1e884a6d0ace8dcd510da98f6910d28cb0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pysensu_yelp
import pytest
from paasta_tools import marathon_tools
from paasta_tools import monitoring_tools
from paasta_tools.utils import compose_job_id
class TestMonitoring_Tools:
general_page = True
fake_general_service_config = {
"team": "general_test_team",
"runbook": "y/general_test_runbook",
"tip": "general_test_tip",
"notification_email": "general_test_notification_email",
"page": general_page,
}
empty_service_config = marathon_tools.MarathonServiceConfig(
service="myservicename",
cluster="mycluster",
instance="myinstance",
config_dict={},
branch_dict=None,
)
job_page = False
fake_marathon_job_config = marathon_tools.MarathonServiceConfig(
service="myservicename",
cluster="myclustername",
instance="myinstance",
config_dict={
"team": "job_test_team",
"runbook": "y/job_test_runbook",
"tip": "job_test_tip",
"notification_email": "job_test_notification_email",
"page": job_page,
},
branch_dict=None,
)
empty_job_config = {}
monitor_page = True
fake_monitor_config = {
"team": "monitor_test_team",
"runbook": "y/monitor_test_runbook",
"tip": "monitor_test_tip",
"notification_email": "monitor_test_notification_email",
"page": monitor_page,
}
empty_monitor_config = {}
framework = "fake_framework"
overrides = {}
instance = "fake_instance"
service = "fake_service"
soa_dir = "/fake/soa/dir"
def test_get_team(self):
with mock.patch(
"paasta_tools.monitoring_tools.__get_monitoring_config_value", autospec=True
) as get_monitoring_config_value_patch:
monitoring_tools.get_team(self.overrides, self.service, self.soa_dir)
get_monitoring_config_value_patch.assert_called_once_with(
"team", self.overrides, self.service, self.soa_dir
)
def test_get_runbook(self):
with mock.patch(
"paasta_tools.monitoring_tools.__get_monitoring_config_value", autospec=True
) as get_monitoring_config_value_patch:
monitoring_tools.get_runbook(self.overrides, self.service, self.soa_dir)
get_monitoring_config_value_patch.assert_called_once_with(
"runbook", self.overrides, self.service, self.soa_dir
)
def test_get_tip(self):
with mock.patch(
"paasta_tools.monitoring_tools.__get_monitoring_config_value", autospec=True
) as get_monitoring_config_value_patch:
monitoring_tools.get_tip(self.overrides, self.service, self.soa_dir)
get_monitoring_config_value_patch.assert_called_once_with(
"tip", self.overrides, self.service, self.soa_dir
)
def test_get_notification_email(self):
with mock.patch(
"paasta_tools.monitoring_tools.__get_monitoring_config_value", autospec=True
) as get_monitoring_config_value_patch:
monitoring_tools.get_notification_email(
self.overrides, self.service, self.soa_dir
)
get_monitoring_config_value_patch.assert_called_once_with(
"notification_email", self.overrides, self.service, self.soa_dir
)
def test_get_page(self):
with mock.patch(
"paasta_tools.monitoring_tools.__get_monitoring_config_value", autospec=True
) as get_monitoring_config_value_patch:
monitoring_tools.get_page(self.overrides, self.service, self.soa_dir)
get_monitoring_config_value_patch.assert_called_once_with(
"page", self.overrides, self.service, self.soa_dir
)
def test_get_alert_after(self):
with mock.patch(
"paasta_tools.monitoring_tools.__get_monitoring_config_value", autospec=True
) as get_monitoring_config_value_patch:
monitoring_tools.get_alert_after(self.overrides, self.service, self.soa_dir)
get_monitoring_config_value_patch.assert_called_once_with(
"alert_after", self.overrides, self.service, self.soa_dir
)
def test_get_realert_every(self):
with mock.patch(
"paasta_tools.monitoring_tools.__get_monitoring_config_value", autospec=True
) as get_monitoring_config_value_patch:
monitoring_defaults = mock.Mock()
monitoring_tools.get_realert_every(
self.overrides, self.service, self.soa_dir, monitoring_defaults
)
get_monitoring_config_value_patch.assert_called_once_with(
"realert_every",
self.overrides,
self.service,
self.soa_dir,
monitoring_defaults,
)
def test_get_check_every(self):
with mock.patch(
"paasta_tools.monitoring_tools.__get_monitoring_config_value", autospec=True
) as get_monitoring_config_value_patch:
monitoring_tools.get_check_every(self.overrides, self.service, self.soa_dir)
get_monitoring_config_value_patch.assert_called_once_with(
"check_every", self.overrides, self.service, self.soa_dir
)
def test_get_irc_channels(self):
with mock.patch(
"paasta_tools.monitoring_tools.__get_monitoring_config_value", autospec=True
) as get_monitoring_config_value_patch:
monitoring_tools.get_irc_channels(
self.overrides, self.service, self.soa_dir
)
get_monitoring_config_value_patch.assert_called_once_with(
"irc_channels", self.overrides, self.service, self.soa_dir
)
def test_get_slack_channels(self):
with mock.patch(
"paasta_tools.monitoring_tools.__get_monitoring_config_value", autospec=True
) as get_monitoring_config_value_patch:
monitoring_tools.get_slack_channels(
self.overrides, self.service, self.soa_dir
)
get_monitoring_config_value_patch.assert_called_once_with(
"slack_channels", self.overrides, self.service, self.soa_dir
)
def test_get_dependencies(self):
with mock.patch(
"paasta_tools.monitoring_tools.__get_monitoring_config_value", autospec=True
) as get_monitoring_config_value_patch:
monitoring_tools.get_dependencies(
self.overrides, self.service, self.soa_dir
)
get_monitoring_config_value_patch.assert_called_once_with(
"dependencies", self.overrides, self.service, self.soa_dir
)
def test_get_ticket(self):
with mock.patch(
"paasta_tools.monitoring_tools.__get_monitoring_config_value", autospec=True
) as get_monitoring_config_value_patch:
monitoring_tools.get_ticket(self.overrides, self.service, self.soa_dir)
get_monitoring_config_value_patch.assert_called_once_with(
"ticket", self.overrides, self.service, self.soa_dir
)
def test_get_project(self):
with mock.patch(
"paasta_tools.monitoring_tools.__get_monitoring_config_value", autospec=True
) as get_monitoring_config_value_patch:
monitoring_tools.get_project(self.overrides, self.service, self.soa_dir)
get_monitoring_config_value_patch.assert_called_once_with(
"project", self.overrides, self.service, self.soa_dir
)
def test_get_monitoring_config_value_with_monitor_config(self):
expected = "monitor_test_team"
with mock.patch(
"service_configuration_lib.read_service_configuration",
autospec=True,
return_value=self.fake_general_service_config,
) as service_configuration_lib_patch, mock.patch(
"paasta_tools.monitoring_tools.read_monitoring_config",
autospec=True,
return_value=self.fake_monitor_config,
) as read_monitoring_patch, mock.patch(
"paasta_tools.monitoring_tools.load_system_paasta_config", autospec=True
) as load_system_paasta_config_patch:
load_system_paasta_config_patch.return_value.get_cluster = mock.Mock(
return_value="fake_cluster"
)
actual = monitoring_tools.get_team(
self.overrides, self.service, self.soa_dir
)
assert expected == actual
service_configuration_lib_patch.assert_called_once_with(
self.service, soa_dir=self.soa_dir
)
read_monitoring_patch.assert_called_once_with(
self.service, soa_dir=self.soa_dir
)
def test_get_monitoring_config_value_with_service_config(self):
expected = "general_test_team"
with mock.patch(
"service_configuration_lib.read_service_configuration",
autospec=True,
return_value=self.fake_general_service_config,
) as service_configuration_lib_patch, mock.patch(
"paasta_tools.monitoring_tools.read_monitoring_config",
autospec=True,
return_value=self.empty_monitor_config,
) as read_monitoring_patch, mock.patch(
"paasta_tools.monitoring_tools.load_system_paasta_config", autospec=True
) as load_system_paasta_config_patch:
load_system_paasta_config_patch.return_value.get_cluster = mock.Mock(
return_value="fake_cluster"
)
actual = monitoring_tools.get_team(
self.overrides, self.service, self.soa_dir
)
assert expected == actual
service_configuration_lib_patch.assert_called_once_with(
self.service, soa_dir=self.soa_dir
)
read_monitoring_patch.assert_called_once_with(
self.service, soa_dir=self.soa_dir
)
def test_get_monitoring_config_value_with_defaults(self):
expected = None
with mock.patch(
"service_configuration_lib.read_service_configuration",
autospec=True,
return_value=self.empty_job_config,
) as service_configuration_lib_patch, mock.patch(
"paasta_tools.monitoring_tools.read_monitoring_config",
autospec=True,
return_value=self.empty_monitor_config,
) as read_monitoring_patch, mock.patch(
"paasta_tools.monitoring_tools.load_system_paasta_config", autospec=True
) as load_system_paasta_config_patch:
load_system_paasta_config_patch.return_value.get_cluster = mock.Mock(
return_value="fake_cluster"
)
actual = monitoring_tools.get_team(
self.overrides, self.service, self.soa_dir
)
assert expected == actual
service_configuration_lib_patch.assert_called_once_with(
self.service, soa_dir=self.soa_dir
)
read_monitoring_patch.assert_called_once_with(
self.service, soa_dir=self.soa_dir
)
def test_send_event(self):
fake_service = "fake_service"
fake_monitoring_overrides = {}
fake_check_name = "fake_check_name"
fake_status = "42"
fake_output = "The http port is not open"
fake_team = "fake_team"
fake_tip = "fake_tip"
fake_notification_email = "fake@notify"
fake_irc = "#fake"
fake_slack = "#fake_slack"
fake_soa_dir = "/fake/soa/dir"
self.fake_cluster = "fake_cluster"
fake_sensu_host = "fake_sensu_host"
fake_sensu_port = 12345
expected_runbook = "http://y/paasta-troubleshooting"
expected_check_name = fake_check_name
expected_kwargs = {
"name": expected_check_name,
"runbook": expected_runbook,
"status": fake_status,
"output": fake_output,
"team": fake_team,
"page": True,
"tip": fake_tip,
"notification_email": fake_notification_email,
"check_every": "1m",
"realert_every": -1,
"alert_after": "5m",
"irc_channels": fake_irc,
"slack_channels": fake_slack,
"ticket": False,
"project": None,
"priority": None,
"source": "paasta-fake_cluster",
"tags": [],
"ttl": None,
"sensu_host": fake_sensu_host,
"sensu_port": fake_sensu_port,
"component": None,
"description": None,
}
with mock.patch(
"paasta_tools.monitoring_tools.get_team",
return_value=fake_team,
autospec=True,
) as get_team_patch, mock.patch(
"paasta_tools.monitoring_tools.get_tip",
return_value=fake_tip,
autospec=True,
) as get_tip_patch, mock.patch(
"paasta_tools.monitoring_tools.get_notification_email",
return_value=fake_notification_email,
autospec=True,
) as get_notification_email_patch, mock.patch(
"paasta_tools.monitoring_tools.get_irc_channels",
return_value=fake_irc,
autospec=True,
) as get_irc_patch, mock.patch(
"paasta_tools.monitoring_tools.get_slack_channels",
return_value=fake_slack,
autospec=True,
) as get_slack_patch, mock.patch(
"paasta_tools.monitoring_tools.get_ticket",
return_value=False,
autospec=True,
), mock.patch(
"paasta_tools.monitoring_tools.get_project",
return_value=None,
autospec=True,
), mock.patch(
"paasta_tools.monitoring_tools.get_page", return_value=True, autospec=True
) as get_page_patch, mock.patch(
"paasta_tools.monitoring_tools.get_priority",
return_value=None,
autospec=True,
), mock.patch(
"paasta_tools.monitoring_tools.get_tags", return_value=[], autospec=True
), mock.patch(
"paasta_tools.monitoring_tools.get_component",
return_value=None,
autospec=True,
), mock.patch(
"paasta_tools.monitoring_tools.get_description",
return_value=None,
autospec=True,
), mock.patch(
"pysensu_yelp.send_event", autospec=True
) as pysensu_yelp_send_event_patch, mock.patch(
"paasta_tools.monitoring_tools.load_system_paasta_config", autospec=True
) as load_system_paasta_config_patch:
load_system_paasta_config_patch.return_value.get_cluster = mock.Mock(
return_value=self.fake_cluster
)
load_system_paasta_config_patch.return_value.get_sensu_host = mock.Mock(
return_value=fake_sensu_host
)
load_system_paasta_config_patch.return_value.get_sensu_port = mock.Mock(
return_value=fake_sensu_port
)
monitoring_tools.send_event(
fake_service,
fake_check_name,
fake_monitoring_overrides,
fake_status,
fake_output,
fake_soa_dir,
)
get_team_patch.assert_called_once_with(
fake_monitoring_overrides, fake_service, fake_soa_dir
)
get_tip_patch.assert_called_once_with(
fake_monitoring_overrides, fake_service, fake_soa_dir
)
get_notification_email_patch.assert_called_once_with(
fake_monitoring_overrides, fake_service, fake_soa_dir
)
get_irc_patch.assert_called_once_with(
fake_monitoring_overrides, fake_service, fake_soa_dir
)
get_slack_patch.assert_called_once_with(
fake_monitoring_overrides, fake_service, fake_soa_dir
)
get_page_patch.assert_called_once_with(
fake_monitoring_overrides, fake_service, fake_soa_dir
)
pysensu_yelp_send_event_patch.assert_called_once_with(**expected_kwargs)
load_system_paasta_config_patch.return_value.get_cluster.assert_called_once_with()
def test_send_event_sensu_host_is_None(self):
fake_service = "fake_service"
fake_monitoring_overrides = {}
fake_check_name = "fake_check_name"
fake_status = "42"
fake_output = "The http port is not open"
fake_soa_dir = "/fake/soa/dir"
self.fake_cluster = "fake_cluster"
fake_sensu_port = 12345
with mock.patch(
"paasta_tools.monitoring_tools.get_team", autospec=True
), mock.patch(
"paasta_tools.monitoring_tools.get_tip", autospec=True
), mock.patch(
"paasta_tools.monitoring_tools.get_notification_email", autospec=True
), mock.patch(
"paasta_tools.monitoring_tools.get_irc_channels", autospec=True
), mock.patch(
"paasta_tools.monitoring_tools.get_ticket", autospec=True
), mock.patch(
"paasta_tools.monitoring_tools.get_project", autospec=True
), mock.patch(
"paasta_tools.monitoring_tools.get_page", autospec=True
), mock.patch(
"pysensu_yelp.send_event", autospec=True
) as pysensu_yelp_send_event_patch, mock.patch(
"paasta_tools.monitoring_tools.load_system_paasta_config", autospec=True
) as load_system_paasta_config_patch:
load_system_paasta_config_patch.return_value.get_sensu_host = mock.Mock(
return_value=None
)
load_system_paasta_config_patch.return_value.get_sensu_port = mock.Mock(
return_value=fake_sensu_port
)
monitoring_tools.send_event(
fake_service,
fake_check_name,
fake_monitoring_overrides,
fake_status,
fake_output,
fake_soa_dir,
)
assert pysensu_yelp_send_event_patch.call_count == 0
def test_read_monitoring_config(self):
fake_name = "partial"
fake_fname = "acronyms"
fake_path = "ever_patched"
fake_soa_dir = "/nail/cte/oas"
fake_dict = {"e": "quail", "v": "snail"}
with mock.patch(
"os.path.abspath", autospec=True, return_value=fake_path
) as abspath_patch, mock.patch(
"os.path.join", autospec=True, return_value=fake_fname
) as join_patch, mock.patch(
"service_configuration_lib.read_monitoring",
autospec=True,
return_value=fake_dict,
) as read_monitoring_patch:
actual = monitoring_tools.read_monitoring_config(fake_name, fake_soa_dir)
assert fake_dict == actual
abspath_patch.assert_called_once_with(fake_soa_dir)
join_patch.assert_called_once_with(fake_path, fake_name, "monitoring.yaml")
read_monitoring_patch.assert_called_once_with(fake_fname)
def test_list_teams():
fake_team_data = {
"team_data": {
"red_jaguars": {
"pagerduty_api_key": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"pages_slack_channel": "red_jaguars_pages",
"notifications_slack_channel": "red_jaguars_notifications",
"notification_email": "red_jaguars+alert@yelp.com",
"project": "REDJAGS",
},
"blue_barracudas": {
"pagerduty_api_key": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"pages_slack_channel": "blue_barracudas_pages",
},
}
}
expected = {"red_jaguars", "blue_barracudas"}
with mock.patch(
"paasta_tools.monitoring_tools._load_sensu_team_data",
autospec=True,
return_value=fake_team_data,
):
actual = monitoring_tools.list_teams()
assert actual == expected
def test_send_event_users_monitoring_tools_send_event_properly(instance_config):
fake_status = "999999"
fake_output = "YOU DID IT"
instance_config.get_monitoring.return_value = {"fake_key": "fake_value"}
expected_check_name = (
"check_paasta_services_replication.%s" % instance_config.job_id
)
with mock.patch(
"paasta_tools.monitoring_tools.send_event", autospec=True
) as send_event_patch, mock.patch(
"paasta_tools.monitoring_tools._log", autospec=True
), mock.patch(
"paasta_tools.monitoring_tools.get_runbook",
autospec=True,
return_value="y/runbook",
):
monitoring_tools.send_replication_event(
instance_config=instance_config, status=fake_status, output=fake_output
)
send_event_patch.assert_called_once_with(
service=instance_config.service,
check_name=expected_check_name,
overrides={
"fake_key": "fake_value",
"runbook": "y/runbook",
"alert_after": "2m",
"check_every": "1m",
},
status=fake_status,
output=fake_output,
soa_dir=instance_config.soa_dir,
cluster=instance_config.cluster,
)
def test_send_replication_event_users_monitoring_tools_send_event_properly(
instance_config,
):
fake_status = "999999"
fake_output = "YOU DID IT"
instance_config.get_monitoring.return_value = {"fake_key": "fake_value"}
expected_check_name = (
"check_paasta_services_replication.%s" % instance_config.job_id
)
with mock.patch(
"paasta_tools.monitoring_tools.send_event", autospec=True
) as send_event_patch, mock.patch(
"paasta_tools.monitoring_tools._log", autospec=True
), mock.patch(
"paasta_tools.monitoring_tools.get_runbook",
autospec=True,
return_value="y/runbook",
):
monitoring_tools.send_replication_event(
instance_config=instance_config, status=fake_status, output=fake_output
)
send_event_patch.assert_called_once_with(
service=instance_config.service,
check_name=expected_check_name,
overrides={
"fake_key": "fake_value",
"runbook": "y/runbook",
"alert_after": "2m",
"check_every": "1m",
},
status=fake_status,
output=fake_output,
soa_dir=instance_config.soa_dir,
cluster=instance_config.cluster,
)
def test_send_replication_event_users_monitoring_tools_send_event_respects_alert_after(
instance_config,
):
fake_status = "999999"
fake_output = "YOU DID IT"
instance_config.get_monitoring.return_value = {"alert_after": "666m"}
expected_check_name = (
"check_paasta_services_replication.%s" % instance_config.job_id
)
with mock.patch(
"paasta_tools.monitoring_tools.send_event", autospec=True
) as send_event_patch, mock.patch(
"paasta_tools.monitoring_tools._log", autospec=True
), mock.patch(
"paasta_tools.monitoring_tools.get_runbook",
autospec=True,
return_value="y/runbook",
):
monitoring_tools.send_replication_event(
instance_config=instance_config, status=fake_status, output=fake_output
)
send_event_patch.call_count == 1
send_event_patch.assert_called_once_with(
service=instance_config.service,
check_name=expected_check_name,
overrides={
"runbook": "y/runbook",
"alert_after": "666m",
"check_every": "1m",
},
status=fake_status,
output=fake_output,
soa_dir=instance_config.soa_dir,
cluster=instance_config.cluster,
)
@pytest.fixture
def instance_config():
service = "fake_service"
instance = "fake_instance"
job_id = compose_job_id(service, instance)
mock_instance_config = mock.Mock(
service=service,
instance=instance,
cluster="fake_cluster",
soa_dir="fake_soa_dir",
job_id=job_id,
)
mock_instance_config.get_replication_crit_percentage.return_value = 90
mock_instance_config.get_registrations.return_value = [job_id]
return mock_instance_config
def test_check_smartstack_replication_for_instance_ok_when_expecting_zero(
instance_config,
):
expected_replication_count = 0
mock_smartstack_replication_checker = mock.Mock()
mock_smartstack_replication_checker.get_replication_for_instance.return_value = {
"fake_region": {"test.main": 1, "test.three": 4, "test.four": 8}
}
with mock.patch(
"paasta_tools.monitoring_tools.send_replication_event", autospec=True
) as mock_send_replication_event:
monitoring_tools.check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_replication_count,
smartstack_replication_checker=mock_smartstack_replication_checker,
)
mock_send_replication_event.assert_called_once_with(
instance_config=instance_config,
status=pysensu_yelp.Status.OK,
output=mock.ANY,
)
def test_check_smartstack_replication_for_instance_crit_when_absent(instance_config):
expected_replication_count = 8
mock_smartstack_replication_checker = mock.Mock()
mock_smartstack_replication_checker.get_replication_for_instance.return_value = {
"fake_region": {"test.two": 1, "test.three": 4, "test.four": 8}
}
with mock.patch(
"paasta_tools.monitoring_tools.send_replication_event", autospec=True
) as mock_send_replication_event:
monitoring_tools.check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_replication_count,
smartstack_replication_checker=mock_smartstack_replication_checker,
)
mock_send_replication_event.assert_called_once_with(
instance_config=instance_config,
status=pysensu_yelp.Status.CRITICAL,
output=mock.ANY,
)
def test_check_smartstack_replication_for_instance_crit_when_zero_replication(
instance_config,
):
expected_replication_count = 8
mock_smartstack_replication_checker = mock.Mock()
mock_smartstack_replication_checker.get_replication_for_instance.return_value = {
"fake_region": {
"fake_service.fake_instance": 0,
"test.main": 8,
"test.fully_replicated": 8,
}
}
with mock.patch(
"paasta_tools.monitoring_tools.send_replication_event", autospec=True
) as mock_send_replication_event:
monitoring_tools.check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_replication_count,
smartstack_replication_checker=mock_smartstack_replication_checker,
)
mock_send_replication_event.assert_called_once_with(
instance_config=instance_config,
status=pysensu_yelp.Status.CRITICAL,
output=mock.ANY,
)
_, send_replication_event_kwargs = mock_send_replication_event.call_args
alert_output = send_replication_event_kwargs["output"]
assert (
"Service {} has 0 out of 8 expected instances in fake_region".format(
instance_config.job_id
)
) in alert_output
assert (
"paasta status -s {} -i {} -c {} -vv".format(
instance_config.service,
instance_config.instance,
instance_config.cluster,
)
) in alert_output
def test_check_smartstack_replication_for_instance_crit_when_low_replication(
instance_config,
):
expected_replication_count = 8
mock_smartstack_replication_checker = mock.Mock()
mock_smartstack_replication_checker.get_replication_for_instance.return_value = {
"fake_region": {
"test.canary": 1,
"fake_service.fake_instance": 4,
"test.fully_replicated": 8,
}
}
with mock.patch(
"paasta_tools.monitoring_tools.send_replication_event", autospec=True
) as mock_send_replication_event:
monitoring_tools.check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_replication_count,
smartstack_replication_checker=mock_smartstack_replication_checker,
)
mock_send_replication_event.assert_called_once_with(
instance_config=instance_config,
status=pysensu_yelp.Status.CRITICAL,
output=mock.ANY,
)
_, send_replication_event_kwargs = mock_send_replication_event.call_args
alert_output = send_replication_event_kwargs["output"]
assert (
"Service {} has 4 out of 8 expected instances in fake_region".format(
instance_config.job_id
)
) in alert_output
assert (
"paasta status -s {} -i {} -c {} -vv".format(
instance_config.service,
instance_config.instance,
instance_config.cluster,
)
) in alert_output
def test_check_smartstack_replication_for_instance_ok_with_enough_replication(
instance_config,
):
expected_replication_count = 8
mock_smartstack_replication_checker = mock.Mock()
mock_smartstack_replication_checker.get_replication_for_instance.return_value = {
"fake_region": {
"test.canary": 1,
"test.low_replication": 4,
"fake_service.fake_instance": 8,
}
}
with mock.patch(
"paasta_tools.monitoring_tools.send_replication_event", autospec=True
) as mock_send_replication_event:
monitoring_tools.check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_replication_count,
smartstack_replication_checker=mock_smartstack_replication_checker,
)
mock_send_replication_event.assert_called_once_with(
instance_config=instance_config,
status=pysensu_yelp.Status.OK,
output=mock.ANY,
)
_, send_replication_event_kwargs = mock_send_replication_event.call_args
alert_output = send_replication_event_kwargs["output"]
assert (
"{} has 8 out of 8 expected instances in fake_region (OK: 100%)".format(
instance_config.job_id
)
) in alert_output
def test_check_smartstack_replication_for_instance_ok_with_enough_replication_multilocation(
instance_config,
):
expected_replication_count = 2
mock_smartstack_replication_checker = mock.Mock()
mock_smartstack_replication_checker.get_replication_for_instance.return_value = {
"fake_region": {"fake_service.fake_instance": 1},
"fake_other_region": {"fake_service.fake_instance": 1},
}
with mock.patch(
"paasta_tools.monitoring_tools.send_replication_event", autospec=True
) as mock_send_replication_event:
monitoring_tools.check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_replication_count,
smartstack_replication_checker=mock_smartstack_replication_checker,
)
mock_send_replication_event.assert_called_once_with(
instance_config=instance_config,
status=pysensu_yelp.Status.OK,
output=mock.ANY,
)
_, send_replication_event_kwargs = mock_send_replication_event.call_args
alert_output = send_replication_event_kwargs["output"]
assert (
"{} has 1 out of 1 expected instances in fake_region".format(
instance_config.job_id
)
) in alert_output
assert (
"{} has 1 out of 1 expected instances in fake_other_region".format(
instance_config.job_id
)
) in alert_output
def test_check_smartstack_replication_for_instance_crit_when_low_replication_multilocation(
instance_config,
):
expected_replication_count = 2
mock_smartstack_replication_checker = mock.Mock()
mock_smartstack_replication_checker.get_replication_for_instance.return_value = {
"fake_region": {"fake_service.fake_instance": 1},
"fake_other_region": {"fake_service.fake_instance": 0},
}
with mock.patch(
"paasta_tools.monitoring_tools.send_replication_event", autospec=True
) as mock_send_replication_event:
monitoring_tools.check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_replication_count,
smartstack_replication_checker=mock_smartstack_replication_checker,
)
mock_send_replication_event.assert_called_once_with(
instance_config=instance_config,
status=pysensu_yelp.Status.CRITICAL,
output=mock.ANY,
)
_, send_replication_event_kwargs = mock_send_replication_event.call_args
alert_output = send_replication_event_kwargs["output"]
assert (
"{} has 1 out of 1 expected instances in fake_region".format(
instance_config.job_id
)
) in alert_output
assert (
"{} has 0 out of 1 expected instances in fake_other_region".format(
instance_config.job_id
)
) in alert_output
assert (
"paasta status -s {} -i {} -c {} -vv".format(
instance_config.service,
instance_config.instance,
instance_config.cluster,
)
) in alert_output
def test_check_smartstack_replication_for_instance_crit_when_zero_replication_multilocation(
instance_config,
):
expected_replication_count = 2
mock_smartstack_replication_checker = mock.Mock()
mock_smartstack_replication_checker.get_replication_for_instance.return_value = {
"fake_region": {"fake_service.fake_instance": 0},
"fake_other_region": {"fake_service.fake_instance": 0},
}
with mock.patch(
"paasta_tools.monitoring_tools.send_replication_event", autospec=True
) as mock_send_replication_event:
monitoring_tools.check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_replication_count,
smartstack_replication_checker=mock_smartstack_replication_checker,
)
mock_send_replication_event.assert_called_once_with(
instance_config=instance_config,
status=pysensu_yelp.Status.CRITICAL,
output=mock.ANY,
)
_, send_replication_event_kwargs = mock_send_replication_event.call_args
alert_output = send_replication_event_kwargs["output"]
assert (
"{} has 0 out of 1 expected instances in fake_region".format(
instance_config.job_id
)
) in alert_output
assert (
"{} has 0 out of 1 expected instances in fake_other_region".format(
instance_config.job_id
)
) in alert_output
assert (
"paasta status -s {} -i {} -c {} -vv".format(
instance_config.service,
instance_config.instance,
instance_config.cluster,
)
) in alert_output
def test_check_smartstack_replication_for_instance_crit_when_missing_replication_multilocation(
instance_config,
):
expected_replication_count = 2
mock_smartstack_replication_checker = mock.Mock()
mock_smartstack_replication_checker.get_replication_for_instance.return_value = {
"fake_region": {"test.main": 0},
"fake_other_region": {"test.main": 0},
}
with mock.patch(
"paasta_tools.monitoring_tools.send_replication_event", autospec=True
) as mock_send_replication_event:
monitoring_tools.check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_replication_count,
smartstack_replication_checker=mock_smartstack_replication_checker,
)
mock_send_replication_event.assert_called_once_with(
instance_config=instance_config,
status=pysensu_yelp.Status.CRITICAL,
output=mock.ANY,
)
_, send_replication_event_kwargs = mock_send_replication_event.call_args
alert_output = send_replication_event_kwargs["output"]
assert (
"{} has 0 out of 1 expected instances in fake_region".format(
instance_config.job_id
)
) in alert_output
assert (
"{} has 0 out of 1 expected instances in fake_other_region".format(
instance_config.job_id
)
) in alert_output
def test_check_smartstack_replication_for_instance_crit_when_no_smartstack_info(
instance_config,
):
expected_replication_count = 2
mock_smartstack_replication_checker = mock.Mock()
mock_smartstack_replication_checker.get_replication_for_instance.return_value = {}
with mock.patch(
"paasta_tools.monitoring_tools.send_replication_event", autospec=True
) as mock_send_replication_event:
monitoring_tools.check_smartstack_replication_for_instance(
instance_config=instance_config,
expected_count=expected_replication_count,
smartstack_replication_checker=mock_smartstack_replication_checker,
)
mock_send_replication_event.assert_called_once_with(
instance_config=instance_config,
status=pysensu_yelp.Status.CRITICAL,
output=mock.ANY,
)
_, send_replication_event_kwargs = mock_send_replication_event.call_args
alert_output = send_replication_event_kwargs["output"]
assert (
f"{instance_config.job_id} has no Smartstack replication info."
) in alert_output
def test_send_replication_event_if_under_replication_handles_0_expected(
instance_config,
):
with mock.patch(
"paasta_tools.monitoring_tools.send_replication_event", autospec=True
) as mock_send_event:
monitoring_tools.send_replication_event_if_under_replication(
instance_config=instance_config, expected_count=0, num_available=0
)
mock_send_event.assert_called_once_with(
instance_config=instance_config, status=0, output=mock.ANY
)
_, send_event_kwargs = mock_send_event.call_args
alert_output = send_event_kwargs["output"]
assert (
"{} has 0 out of 0 expected instances available!\n(threshold: 90%)".format(
instance_config.job_id
)
) in alert_output
def test_send_replication_event_if_under_replication_good(instance_config):
with mock.patch(
"paasta_tools.monitoring_tools.send_replication_event", autospec=True
) as mock_send_event:
monitoring_tools.send_replication_event_if_under_replication(
instance_config=instance_config, expected_count=100, num_available=100
)
mock_send_event.assert_called_once_with(
instance_config=instance_config, status=0, output=mock.ANY
)
_, send_event_kwargs = mock_send_event.call_args
alert_output = send_event_kwargs["output"]
assert (
"{} has 100 out of 100 expected instances available!\n(threshold: 90%)".format(
instance_config.job_id
)
) in alert_output
def test_send_replication_event_if_under_replication_critical(instance_config):
with mock.patch(
"paasta_tools.monitoring_tools.send_replication_event", autospec=True
) as mock_send_event:
monitoring_tools.send_replication_event_if_under_replication(
instance_config=instance_config, expected_count=100, num_available=89
)
mock_send_event.assert_called_once_with(
instance_config=instance_config, status=2, output=mock.ANY
)
_, send_event_kwargs = mock_send_event.call_args
alert_output = send_event_kwargs["output"]
assert (
"{} has 89 out of 100 expected instances available!\n(threshold: 90%)".format(
instance_config.job_id
)
) in alert_output
assert (
"paasta status -s {} -i {} -c {} -vv".format(
instance_config.service,
instance_config.instance,
instance_config.cluster,
)
) in alert_output
| 40.047755
| 95
| 0.658145
|
dc24fbec378f0383676cb0bfb89b28831709aaea
| 1,961
|
py
|
Python
|
scicopia/app/parser/ScicopiaListener.py
|
pikatech/Scicopia
|
dcdb3b4f55b9111fa3b4fe78afdb07bb2ceb9985
|
[
"MIT"
] | null | null | null |
scicopia/app/parser/ScicopiaListener.py
|
pikatech/Scicopia
|
dcdb3b4f55b9111fa3b4fe78afdb07bb2ceb9985
|
[
"MIT"
] | 9
|
2021-07-24T16:12:03.000Z
|
2021-07-24T16:58:19.000Z
|
scicopia/app/parser/ScicopiaListener.py
|
pikatech/Scicopia
|
dcdb3b4f55b9111fa3b4fe78afdb07bb2ceb9985
|
[
"MIT"
] | 1
|
2021-06-18T16:00:06.000Z
|
2021-06-18T16:00:06.000Z
|
# Generated from Scicopia.g4 by ANTLR 4.9.2
from antlr4 import *
if __name__ is not None and "." in __name__:
from .ScicopiaParser import ScicopiaParser
else:
from ScicopiaParser import ScicopiaParser
# This class defines a complete listener for a parse tree produced by ScicopiaParser.
class ScicopiaListener(ParseTreeListener):
# Enter a parse tree produced by ScicopiaParser#query.
def enterQuery(self, ctx:ScicopiaParser.QueryContext):
pass
# Exit a parse tree produced by ScicopiaParser#query.
def exitQuery(self, ctx:ScicopiaParser.QueryContext):
pass
# Enter a parse tree produced by ScicopiaParser#part.
def enterPart(self, ctx:ScicopiaParser.PartContext):
pass
# Exit a parse tree produced by ScicopiaParser#part.
def exitPart(self, ctx:ScicopiaParser.PartContext):
pass
# Enter a parse tree produced by ScicopiaParser#exclude.
def enterExclude(self, ctx:ScicopiaParser.ExcludeContext):
pass
# Exit a parse tree produced by ScicopiaParser#exclude.
def exitExclude(self, ctx:ScicopiaParser.ExcludeContext):
pass
# Enter a parse tree produced by ScicopiaParser#quotes.
def enterQuotes(self, ctx:ScicopiaParser.QuotesContext):
pass
# Exit a parse tree produced by ScicopiaParser#quotes.
def exitQuotes(self, ctx:ScicopiaParser.QuotesContext):
pass
# Enter a parse tree produced by ScicopiaParser#prefixed.
def enterPrefixed(self, ctx:ScicopiaParser.PrefixedContext):
pass
# Exit a parse tree produced by ScicopiaParser#prefixed.
def exitPrefixed(self, ctx:ScicopiaParser.PrefixedContext):
pass
# Enter a parse tree produced by ScicopiaParser#term.
def enterTerm(self, ctx:ScicopiaParser.TermContext):
pass
# Exit a parse tree produced by ScicopiaParser#term.
def exitTerm(self, ctx:ScicopiaParser.TermContext):
pass
del ScicopiaParser
| 29.712121
| 85
| 0.72718
|
acb17f388a24ac86fabb5959b2a3efa022a28e32
| 19,020
|
py
|
Python
|
pydantic/main.py
|
pydevd/pydantic
|
cd50601172462b49cdf09e4d988906ba8f14af87
|
[
"MIT"
] | 25
|
2019-06-30T04:37:49.000Z
|
2022-03-19T19:57:37.000Z
|
pydantic/main.py
|
pydevd/pydantic
|
cd50601172462b49cdf09e4d988906ba8f14af87
|
[
"MIT"
] | 1
|
2018-11-22T15:52:55.000Z
|
2018-11-22T15:57:42.000Z
|
pydantic/main.py
|
pydevd/pydantic
|
cd50601172462b49cdf09e4d988906ba8f14af87
|
[
"MIT"
] | 4
|
2021-06-25T06:34:49.000Z
|
2022-02-07T01:52:10.000Z
|
import json
import warnings
from abc import ABCMeta
from copy import deepcopy
from functools import partial
from itertools import chain
from pathlib import Path
from types import FunctionType
from typing import Any, Callable, Dict, Set, Type, Union
from .error_wrappers import ErrorWrapper, ValidationError
from .errors import ConfigError, ExtraError, MissingError
from .fields import Field, Validator
from .json import custom_pydantic_encoder, pydantic_encoder
from .parse import Protocol, load_file, load_str_bytes
from .types import StrBytes
from .utils import clean_docstring, truncate, validate_field_name
from .validators import dict_validator
class BaseConfig:
title = None
anystr_strip_whitespace = False
min_anystr_length = 0
max_anystr_length = 2 ** 16
validate_all = False
ignore_extra = True
allow_extra = False
allow_mutation = True
allow_population_by_alias = False
use_enum_values = False
fields = {}
validate_assignment = False
error_msg_templates: Dict[str, str] = {}
arbitrary_types_allowed = False
json_encoders = {}
@classmethod
def get_field_schema(cls, name):
field_config = cls.fields.get(name) or {}
if isinstance(field_config, str):
field_config = {'alias': field_config}
return field_config
def inherit_config(self_config: Type[BaseConfig], parent_config: Type[BaseConfig]) -> Type[BaseConfig]:
if not self_config:
base_classes = parent_config,
elif self_config == parent_config:
base_classes = self_config,
else:
base_classes = self_config, parent_config
return type('Config', base_classes, {})
TYPE_BLACKLIST = FunctionType, property, type, classmethod, staticmethod
class ValidatorGroup:
def __init__(self, validators):
self.validators = validators
self.used_validators = {'*'}
def get_validators(self, name):
self.used_validators.add(name)
specific_validators = self.validators.get(name)
wildcard_validators = self.validators.get('*')
if specific_validators or wildcard_validators:
return (specific_validators or []) + (wildcard_validators or [])
def check_for_unused(self):
unused_validators = set(chain(*[(v.func.__name__ for v in self.validators[f] if v.check_fields)
for f in (self.validators.keys() - self.used_validators)]))
if unused_validators:
fn = ', '.join(unused_validators)
raise ConfigError(f"Validators defined with incorrect fields: {fn} "
f"(use check_fields=False if you're inheriting from the model and intended this)")
def _extract_validators(namespace):
validators = {}
for var_name, value in namespace.items():
validator_config = getattr(value, '__validator_config', None)
if validator_config:
fields, v = validator_config
for field in fields:
if field in validators:
validators[field].append(v)
else:
validators[field] = [v]
return validators
class MetaModel(ABCMeta):
def __new__(mcs, name, bases, namespace):
fields: Dict[name, Field] = {}
config = BaseConfig
for base in reversed(bases):
if issubclass(base, BaseModel) and base != BaseModel:
fields.update(base.__fields__)
config = inherit_config(base.__config__, config)
config = inherit_config(namespace.get('Config'), config)
vg = ValidatorGroup(_extract_validators(namespace))
for f in fields.values():
f.set_config(config)
extra_validators = vg.get_validators(f.name)
if extra_validators:
f.class_validators += extra_validators
# re-run prepare to add extra validators
f.prepare()
annotations = namespace.get('__annotations__', {})
# annotation only fields need to come first in fields
for ann_name, ann_type in annotations.items():
if not ann_name.startswith('_') and ann_name not in namespace:
validate_field_name(bases, ann_name)
fields[ann_name] = Field.infer(
name=ann_name,
value=...,
annotation=ann_type,
class_validators=vg.get_validators(ann_name),
config=config,
)
for var_name, value in namespace.items():
if not var_name.startswith('_') and not isinstance(value, TYPE_BLACKLIST):
validate_field_name(bases, var_name)
fields[var_name] = Field.infer(
name=var_name,
value=value,
annotation=annotations.get(var_name),
class_validators=vg.get_validators(var_name),
config=config,
)
vg.check_for_unused()
if config.json_encoders:
json_encoder = partial(custom_pydantic_encoder, config.json_encoders)
else:
json_encoder = pydantic_encoder
new_namespace = {
'__config__': config,
'__fields__': fields,
'__validators__': vg.validators,
'_schema_cache': {},
'_json_encoder': staticmethod(json_encoder),
**{n: v for n, v in namespace.items() if n not in fields}
}
return super().__new__(mcs, name, bases, new_namespace)
_missing = object()
class BaseModel(metaclass=MetaModel):
# populated by the metaclass, defined here to help IDEs only
__fields__ = {}
__validators__ = {}
Config = BaseConfig
__slots__ = '__values__',
def __init__(self, **data):
self.__setstate__(self._process_values(data))
def __getattr__(self, name):
try:
return self.__values__[name]
except KeyError:
raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{name}'")
def __setattr__(self, name, value):
if not self.__config__.allow_extra and name not in self.__fields__:
raise ValueError(f'"{self.__class__.__name__}" object has no field "{name}"')
elif not self.__config__.allow_mutation:
raise TypeError(f'"{self.__class__.__name__}" is immutable and does not support item assignment')
elif self.__config__.validate_assignment:
value_, error_ = self.fields[name].validate(value, self.dict(exclude={name}), loc=name)
if error_:
raise ValidationError([error_])
else:
self.__values__[name] = value_
else:
self.__values__[name] = value
def __getstate__(self):
return self.__values__
def __setstate__(self, state):
object.__setattr__(self, '__values__', state)
def dict(self, *, include: Set[str]=None, exclude: Set[str]=set(), by_alias: bool = False) -> Dict[str, Any]:
"""
Generate a dictionary representation of the model, optionally specifying which fields to include or exclude.
"""
get_key = self._get_key_factory(by_alias)
get_key = partial(get_key, self.fields)
return {
get_key(k): v
for k, v in self._iter(by_alias=by_alias)
if k not in exclude and (not include or k in include)
}
def _get_key_factory(self, by_alias: bool) -> Callable:
if by_alias:
return lambda fields, key: fields[key].alias
return lambda _, key: key
def json(self, *, include: Set[str]=None, exclude: Set[str]=set(), by_alias: bool = False,
encoder=None, **dumps_kwargs) -> str:
"""
Generate a JSON representation of the model, `include` and `exclude` arguments as per `dict()`.
`encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`.
"""
return json.dumps(
self.dict(include=include, exclude=exclude, by_alias=by_alias),
default=encoder or self._json_encoder, **dumps_kwargs
)
@classmethod
def parse_obj(cls, obj):
if not isinstance(obj, dict):
exc = TypeError(f'{cls.__name__} expected dict not {type(obj).__name__}')
raise ValidationError([ErrorWrapper(exc, loc='__obj__')])
return cls(**obj)
@classmethod
def parse_raw(cls, b: StrBytes, *,
content_type: str=None,
encoding: str='utf8',
proto: Protocol=None,
allow_pickle: bool=False):
try:
obj = load_str_bytes(b, proto=proto, content_type=content_type, encoding=encoding,
allow_pickle=allow_pickle)
except (ValueError, TypeError, UnicodeDecodeError) as e:
raise ValidationError([ErrorWrapper(e, loc='__obj__')])
return cls.parse_obj(obj)
@classmethod
def parse_file(cls, path: Union[str, Path], *,
content_type: str=None,
encoding: str='utf8',
proto: Protocol=None,
allow_pickle: bool=False):
obj = load_file(path, proto=proto, content_type=content_type, encoding=encoding, allow_pickle=allow_pickle)
return cls.parse_obj(obj)
@classmethod
def construct(cls, **values):
"""
Creates a new model and set __values__ without any validation, thus values should already be trusted.
Chances are you don't want to use this method directly.
"""
m = cls.__new__(cls)
m.__setstate__(values)
return m
def copy(self, *, include: Set[str]=None, exclude: Set[str]=None, update: Dict[str, Any]=None, deep: bool=False):
"""
Duplicate a model, optionally choose which fields to include, exclude and change.
:param include: fields to include in new model
:param exclude: fields to exclude from new model, as with values this takes precedence over include
:param update: values to change/add in the new model. Note: the data is not validated before creating
the new model: you should trust this data
:param deep: set to `True` to make a deep copy of the model
:return: new model instance
"""
if include is None and exclude is None and update is None:
# skip constructing values if no arguments are passed
v = self.__values__
else:
exclude = exclude or set()
v = {
**{k: v for k, v in self.__values__.items() if k not in exclude and (not include or k in include)},
**(update or {})
}
if deep:
v = deepcopy(v)
return self.__class__.construct(**v)
@property
def fields(self):
return self.__fields__
@classmethod
def type_schema(cls, by_alias):
return {
'type': 'object',
'properties': (
{f.alias: f.schema(by_alias) for f in cls.__fields__.values()}
if by_alias else
{k: f.schema(by_alias) for k, f in cls.__fields__.items()}
)
}
@classmethod
def schema(cls, by_alias=True) -> Dict[str, Any]:
cached = cls._schema_cache.get(by_alias)
if cached is not None:
return cached
s = {'title': cls.__config__.title or cls.__name__}
if cls.__doc__:
s['description'] = clean_docstring(cls.__doc__)
s.update(cls.type_schema(by_alias))
cls._schema_cache[by_alias] = s
return s
@classmethod
def schema_json(cls, *, by_alias=True, **dumps_kwargs) -> str:
from .json import pydantic_encoder
return json.dumps(cls.schema(by_alias=by_alias), default=pydantic_encoder, **dumps_kwargs)
@classmethod
def get_validators(cls):
yield dict_validator
yield cls.validate
@classmethod
def validate(cls, value):
return cls(**value)
def _process_values(self, input_data: dict) -> Dict[str, Any]: # noqa: C901 (ignore complexity)
return validate_model(self, input_data)
@classmethod
def _get_value(cls, v, by_alias=False):
if isinstance(v, BaseModel):
return v.dict(by_alias=by_alias)
elif isinstance(v, list):
return [cls._get_value(v_, by_alias=by_alias) for v_ in v]
elif isinstance(v, dict):
return {k_: cls._get_value(v_, by_alias=by_alias) for k_, v_ in v.items()}
elif isinstance(v, set):
return {cls._get_value(v_, by_alias=by_alias) for v_ in v}
elif isinstance(v, tuple):
return tuple(cls._get_value(v_, by_alias=by_alias) for v_ in v)
else:
return v
def __iter__(self):
"""
so `dict(model)` works
"""
yield from self._iter()
def _iter(self, by_alias=False):
for k, v in self.__values__.items():
yield k, self._get_value(v, by_alias=by_alias)
def __eq__(self, other):
if isinstance(other, BaseModel):
return self.dict() == other.dict()
else:
return self.dict() == other
def __repr__(self):
return f'<{self}>'
def to_string(self, pretty=False):
divider = '\n ' if pretty else ' '
return '{}{}{}'.format(
self.__class__.__name__,
divider,
divider.join('{}={}'.format(k, truncate(v)) for k, v in self.__values__.items()),
)
def __str__(self):
return self.to_string()
def create_model(
model_name: str, *,
__config__: Type[BaseConfig]=None,
__base__: Type[BaseModel]=None,
**field_definitions):
"""
Dynamically create a model.
:param model_name: name of the created model
:param __config__: config class to use for the new model
:param __base__: base class for the new model to inherit from
:param **field_definitions: fields of the model (or extra fields if a base is supplied) in the format
`<name>=(<type>, <default default>)` or `<name>=<default value> eg. `foobar=(str, ...)` or `foobar=123`
"""
if __base__:
fields = deepcopy(__base__.__fields__)
validators = __base__.__validators__
if __config__ is not None:
raise ConfigError('to avoid confusion __config__ and __base__ cannot be used together')
else:
__base__ = BaseModel
fields = {}
validators = {}
config = __config__ or BaseConfig
vg = ValidatorGroup(validators)
for f_name, f_def in field_definitions.items():
if isinstance(f_def, tuple):
try:
f_annotation, f_value = f_def
except ValueError as e:
raise ConfigError(f'field definitions should either be a tuple of (<type>, <default>) or just a '
f'default value, unfortunately this means tuples as '
f'default values are not allowed') from e
else:
f_annotation, f_value = None, f_def
if f_name.startswith('_'):
warnings.warn(f'fields may not start with an underscore, ignoring "{f_name}"', RuntimeWarning)
else:
fields[f_name] = Field.infer(
name=f_name,
value=f_value,
annotation=f_annotation,
class_validators=vg.get_validators(f_name),
config=config,
)
namespace = {
'config': config,
'__fields__': fields,
}
return type(model_name, (__base__,), namespace)
_FUNCS = set()
def validator(*fields, pre: bool=False, whole: bool=False, always: bool=False, check_fields: bool=True):
"""
Decorate methods on the class indicating that they should be used to validate fields
:param fields: which field(s) the method should be called on
:param pre: whether or not this validator should be called before the standard validators (else after)
:param whole: for complex objects (sets, lists etc.) whether to validate individual elements or the whole object
:param always: whether this method and other validators should be called even if the value is missing
:param check_fields: whether to check that the fields actually exist on the model
"""
if not fields:
raise ConfigError('validator with no fields specified')
elif isinstance(fields[0], FunctionType):
raise ConfigError("validators should be used with fields and keyword arguments, not bare. "
"E.g. usage should be `@validator('<field_name>', ...)`")
def dec(f):
ref = f.__module__ + '.' + f.__qualname__
if ref in _FUNCS:
raise ConfigError(f'duplicate validator function "{ref}"')
_FUNCS.add(ref)
f_cls = classmethod(f)
f_cls.__validator_config = fields, Validator(f, pre, whole, always, check_fields)
return f_cls
return dec
def validate_model(model, input_data: dict, raise_exc=True): # noqa: C901 (ignore complexity)
"""
validate data against a model.
"""
values = {}
errors = []
for name, field in model.__fields__.items():
value = input_data.get(field.alias, _missing)
if value is _missing and model.__config__.allow_population_by_alias and field.alt_alias:
value = input_data.get(field.name, _missing)
if value is _missing:
if model.__config__.validate_all or field.validate_always:
value = deepcopy(field.default)
else:
if field.required:
errors.append(ErrorWrapper(MissingError(), loc=field.alias, config=model.__config__))
else:
values[name] = deepcopy(field.default)
continue
v_, errors_ = field.validate(value, values, loc=field.alias, cls=model.__class__)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
else:
values[name] = v_
if (not model.__config__.ignore_extra) or model.__config__.allow_extra:
extra = input_data.keys() - {f.alias for f in model.__fields__.values()}
if extra:
if model.__config__.allow_extra:
for field in extra:
values[field] = input_data[field]
else:
# config.ignore_extra is False
for field in sorted(extra):
errors.append(ErrorWrapper(ExtraError(), loc=field, config=model.__config__))
if not raise_exc:
return values, ValidationError(errors) if errors else None
if errors:
raise ValidationError(errors)
return values
| 37.367387
| 120
| 0.613249
|
f828243bf3b0bfb48856c59dc9a616de6165ae25
| 8,288
|
py
|
Python
|
dashboard/dashboard/pinpoint/models/quest/find_isolate_test.py
|
bopopescu/chromium72-third-party-catapult
|
774e1355b871e13bb858147a136e9cb476f55030
|
[
"BSD-3-Clause"
] | 1
|
2019-01-04T10:08:58.000Z
|
2019-01-04T10:08:58.000Z
|
dashboard/dashboard/pinpoint/models/quest/find_isolate_test.py
|
kind-john/catapult
|
29635376119833f172a58a48a3282d353ce55d2b
|
[
"BSD-3-Clause"
] | null | null | null |
dashboard/dashboard/pinpoint/models/quest/find_isolate_test.py
|
kind-john/catapult
|
29635376119833f172a58a48a3282d353ce55d2b
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
from dashboard.pinpoint.models import isolate
from dashboard.pinpoint.models.change import change_test
from dashboard.pinpoint.models.quest import find_isolate
from dashboard.pinpoint import test
class FindIsolateQuestTest(unittest.TestCase):
def testMissingBuilder(self):
arguments = {'target': 'telemetry_perf_tests'}
with self.assertRaises(TypeError):
find_isolate.FindIsolate.FromDict(arguments)
def testMissingTarget(self):
arguments = {'builder': 'Mac Builder'}
with self.assertRaises(TypeError):
find_isolate.FindIsolate.FromDict(arguments)
def testAllArguments(self):
arguments = {
'builder': 'Mac Builder',
'target': 'telemetry_perf_tests',
}
expected = find_isolate.FindIsolate('Mac Builder', 'telemetry_perf_tests')
self.assertEqual(find_isolate.FindIsolate.FromDict(arguments), expected)
class _FindIsolateExecutionTest(test.TestCase):
def setUp(self):
super(_FindIsolateExecutionTest, self).setUp()
change = change_test.Change(123)
isolate.Put((
('Mac Builder', change, 'telemetry_perf_tests',
'https://isolate.server', '7c7e90be'),
))
def assertExecutionFailure(self, execution, exception_class):
self.assertTrue(execution.completed)
self.assertTrue(execution.failed)
self.assertIsInstance(execution.exception, basestring)
last_exception_line = execution.exception.splitlines()[-1]
self.assertTrue(last_exception_line.startswith(exception_class.__name__))
self.assertEqual(execution.result_arguments, {})
def assertExecutionSuccess(self, execution):
self.assertTrue(execution.completed)
self.assertFalse(execution.failed)
self.assertIsNone(execution.exception)
class IsolateLookupTest(_FindIsolateExecutionTest):
def testIsolateLookupSuccess(self):
quest = find_isolate.FindIsolate('Mac Builder', 'telemetry_perf_tests')
execution = quest.Start(change_test.Change(123))
execution.Poll()
expected_result_arguments = {
'isolate_server': 'https://isolate.server',
'isolate_hash': '7c7e90be',
}
expected_as_dict = {
'completed': True,
'exception': None,
'details': [
{
'key': 'builder',
'value': 'Mac Builder',
},
{
'key': 'isolate',
'value': '7c7e90be',
'url': 'https://isolate.server/browse?digest=7c7e90be',
},
],
}
self.assertExecutionSuccess(execution)
self.assertEqual(execution.result_values, ())
self.assertEqual(execution.result_arguments, expected_result_arguments)
self.assertEqual(execution.AsDict(), expected_as_dict)
@mock.patch('dashboard.services.buildbucket_service.GetJobStatus')
@mock.patch('dashboard.services.buildbucket_service.Put')
class BuildTest(_FindIsolateExecutionTest):
def testBuildLifecycle(self, put, get_job_status):
change = change_test.Change(123, 456, patch=True)
quest = find_isolate.FindIsolate('Mac Builder', 'telemetry_perf_tests')
execution = quest.Start(change)
# Request a build.
put.return_value = {'build': {'id': 'build_id'}}
execution.Poll()
self.assertFalse(execution.completed)
put.assert_called_once_with(find_isolate.BUCKET, {
'builder_name': 'Mac Builder',
'properties': {
'clobber': True,
'parent_got_revision': 'commit_123',
'deps_revision_overrides': {test.CATAPULT_URL: 'commit_456'},
'patch_gerrit_url': 'https://codereview.com',
'patch_issue': 567890,
'patch_project': 'project/name',
'patch_ref': 'refs/changes/90/567890/5',
'patch_repository_url': test.CHROMIUM_URL,
'patch_set': 5,
'patch_storage': 'gerrit',
}
})
# Check build status.
get_job_status.return_value = {'build': {
'status': 'STARTED',
'url': 'build_url',
}}
execution.Poll()
self.assertFalse(execution.completed)
get_job_status.assert_called_once_with('build_id')
# Look up isolate hash.
isolate.Put((
('Mac Builder', change, 'telemetry_perf_tests',
'https://isolate.server', 'isolate git hash'),
))
execution.Poll()
expected_result_arguments = {
'isolate_server': 'https://isolate.server',
'isolate_hash': 'isolate git hash',
}
expected_as_dict = {
'completed': True,
'exception': None,
'details': [
{
'key': 'builder',
'value': 'Mac Builder',
},
{
'key': 'build',
'value': 'build_id',
'url': 'build_url',
},
{
'key': 'isolate',
'value': 'isolate git hash',
'url': 'https://isolate.server/browse?digest=isolate git hash',
},
],
}
self.assertExecutionSuccess(execution)
self.assertEqual(execution.result_values, ())
self.assertEqual(execution.result_arguments, expected_result_arguments)
self.assertEqual(execution.AsDict(), expected_as_dict)
def testSimultaneousBuilds(self, put, get_job_status):
# Two builds started at the same time on the same Change should reuse the
# same build request.
change = change_test.Change(0)
quest = find_isolate.FindIsolate('Mac Builder', 'telemetry_perf_tests')
execution_1 = quest.Start(change)
execution_2 = quest.Start(change)
# Request a build.
put.return_value = {'build': {'id': 'build_id'}}
execution_1.Poll()
execution_2.Poll()
self.assertFalse(execution_1.completed)
self.assertFalse(execution_2.completed)
self.assertEqual(put.call_count, 1)
# Check build status.
get_job_status.return_value = {'build': {'status': 'STARTED'}}
execution_1.Poll()
execution_2.Poll()
self.assertFalse(execution_1.completed)
self.assertFalse(execution_2.completed)
self.assertEqual(get_job_status.call_count, 2)
# Look up isolate hash.
isolate.Put((
('Mac Builder', change, 'telemetry_perf_tests',
'https://isolate.server', 'isolate git hash'),
))
execution_1.Poll()
execution_2.Poll()
self.assertExecutionSuccess(execution_1)
self.assertExecutionSuccess(execution_2)
def testBuildFailure(self, put, get_job_status):
quest = find_isolate.FindIsolate('Mac Builder', 'telemetry_perf_tests')
execution = quest.Start(change_test.Change(0))
# Request a build.
put.return_value = {'build': {'id': 'build_id'}}
execution.Poll()
# Check build status.
get_job_status.return_value = {
'build': {
'status': 'COMPLETED',
'result': 'FAILURE',
'failure_reason': 'BUILD_FAILURE',
}
}
execution.Poll()
self.assertExecutionFailure(execution, find_isolate.BuildError)
def testBuildCanceled(self, put, get_job_status):
quest = find_isolate.FindIsolate('Mac Builder', 'telemetry_perf_tests')
execution = quest.Start(change_test.Change(0))
# Request a build.
put.return_value = {'build': {'id': 'build_id'}}
execution.Poll()
# Check build status.
get_job_status.return_value = {
'build': {
'status': 'COMPLETED',
'result': 'CANCELED',
'cancelation_reason': 'TIMEOUT',
}
}
execution.Poll()
self.assertExecutionFailure(execution, find_isolate.BuildError)
def testBuildSucceededButIsolateIsMissing(self, put, get_job_status):
quest = find_isolate.FindIsolate('Mac Builder', 'telemetry_perf_tests')
execution = quest.Start(change_test.Change(0))
# Request a build.
put.return_value = {'build': {'id': 'build_id'}}
execution.Poll()
# Check build status.
get_job_status.return_value = {
'build': {
'status': 'COMPLETED',
'result': 'SUCCESS',
}
}
with self.assertRaises(find_isolate.IsolateNotFoundError):
execution.Poll()
| 31.513308
| 79
| 0.650338
|
64d1a39255de3f77b927f48dfce14da2df378ae8
| 32,167
|
py
|
Python
|
venv/Lib/site-packages/sqlalchemy/dialects/firebird/base.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/sqlalchemy/dialects/firebird/base.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/sqlalchemy/dialects/firebird/base.py
|
OliviaNabbosa89/Disaster_Responses
|
1e66d77c303cec685dfc2ca94f4fca4cc9400570
|
[
"MIT"
] | 1
|
2021-06-20T19:28:37.000Z
|
2021-06-20T19:28:37.000Z
|
# firebird/base.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
r"""
.. dialect:: firebird
:name: Firebird
.. note::
The Firebird dialect within SQLAlchemy **is not currently supported**.
It is not tested within continuous integration and is likely to have
many issues and caveats not currently handled. Consider using the
`external dialect <https://github.com/pauldex/sqlalchemy-firebird>`_
instead.
.. deprecated:: 1.4 The internal Firebird dialect is deprecated and will be
removed in a future version. Use the external dialect.
Firebird Dialects
-----------------
Firebird offers two distinct dialects_ (not to be confused with a
SQLAlchemy ``Dialect``):
dialect 1
This is the old syntax and behaviour, inherited from Interbase pre-6.0.
dialect 3
This is the newer and supported syntax, introduced in Interbase 6.0.
The SQLAlchemy Firebird dialect detects these versions and
adjusts its representation of SQL accordingly. However,
support for dialect 1 is not well tested and probably has
incompatibilities.
Locking Behavior
----------------
Firebird locks tables aggressively. For this reason, a DROP TABLE may
hang until other transactions are released. SQLAlchemy does its best
to release transactions as quickly as possible. The most common cause
of hanging transactions is a non-fully consumed result set, i.e.::
result = engine.execute(text("select * from table"))
row = result.fetchone()
return
Where above, the ``CursorResult`` has not been fully consumed. The
connection will be returned to the pool and the transactional state
rolled back once the Python garbage collector reclaims the objects
which hold onto the connection, which often occurs asynchronously.
The above use case can be alleviated by calling ``first()`` on the
``CursorResult`` which will fetch the first row and immediately close
all remaining cursor/connection resources.
RETURNING support
-----------------
Firebird 2.0 supports returning a result set from inserts, and 2.1
extends that to deletes and updates. This is generically exposed by
the SQLAlchemy ``returning()`` method, such as::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\
values(name='foo')
print(result.fetchall())
# UPDATE..RETURNING
raises = empl.update().returning(empl.c.id, empl.c.salary).\
where(empl.c.sales>100).\
values(dict(salary=empl.c.salary * 1.1))
print(raises.fetchall())
.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html
"""
import datetime
from sqlalchemy import exc
from sqlalchemy import sql
from sqlalchemy import types as sqltypes
from sqlalchemy import util
from sqlalchemy.engine import default
from sqlalchemy.engine import reflection
from sqlalchemy.sql import compiler
from sqlalchemy.sql import expression
from sqlalchemy.types import BIGINT
from sqlalchemy.types import BLOB
from sqlalchemy.types import DATE
from sqlalchemy.types import FLOAT
from sqlalchemy.types import INTEGER
from sqlalchemy.types import Integer
from sqlalchemy.types import NUMERIC
from sqlalchemy.types import SMALLINT
from sqlalchemy.types import TEXT
from sqlalchemy.types import TIME
from sqlalchemy.types import TIMESTAMP
RESERVED_WORDS = set(
[
"active",
"add",
"admin",
"after",
"all",
"alter",
"and",
"any",
"as",
"asc",
"ascending",
"at",
"auto",
"avg",
"before",
"begin",
"between",
"bigint",
"bit_length",
"blob",
"both",
"by",
"case",
"cast",
"char",
"character",
"character_length",
"char_length",
"check",
"close",
"collate",
"column",
"commit",
"committed",
"computed",
"conditional",
"connect",
"constraint",
"containing",
"count",
"create",
"cross",
"cstring",
"current",
"current_connection",
"current_date",
"current_role",
"current_time",
"current_timestamp",
"current_transaction",
"current_user",
"cursor",
"database",
"date",
"day",
"dec",
"decimal",
"declare",
"default",
"delete",
"desc",
"descending",
"disconnect",
"distinct",
"do",
"domain",
"double",
"drop",
"else",
"end",
"entry_point",
"escape",
"exception",
"execute",
"exists",
"exit",
"external",
"extract",
"fetch",
"file",
"filter",
"float",
"for",
"foreign",
"from",
"full",
"function",
"gdscode",
"generator",
"gen_id",
"global",
"grant",
"group",
"having",
"hour",
"if",
"in",
"inactive",
"index",
"inner",
"input_type",
"insensitive",
"insert",
"int",
"integer",
"into",
"is",
"isolation",
"join",
"key",
"leading",
"left",
"length",
"level",
"like",
"long",
"lower",
"manual",
"max",
"maximum_segment",
"merge",
"min",
"minute",
"module_name",
"month",
"names",
"national",
"natural",
"nchar",
"no",
"not",
"null",
"numeric",
"octet_length",
"of",
"on",
"only",
"open",
"option",
"or",
"order",
"outer",
"output_type",
"overflow",
"page",
"pages",
"page_size",
"parameter",
"password",
"plan",
"position",
"post_event",
"precision",
"primary",
"privileges",
"procedure",
"protected",
"rdb$db_key",
"read",
"real",
"record_version",
"recreate",
"recursive",
"references",
"release",
"reserv",
"reserving",
"retain",
"returning_values",
"returns",
"revoke",
"right",
"rollback",
"rows",
"row_count",
"savepoint",
"schema",
"second",
"segment",
"select",
"sensitive",
"set",
"shadow",
"shared",
"singular",
"size",
"smallint",
"snapshot",
"some",
"sort",
"sqlcode",
"stability",
"start",
"starting",
"starts",
"statistics",
"sub_type",
"sum",
"suspend",
"table",
"then",
"time",
"timestamp",
"to",
"trailing",
"transaction",
"trigger",
"trim",
"uncommitted",
"union",
"unique",
"update",
"upper",
"user",
"using",
"value",
"values",
"varchar",
"variable",
"varying",
"view",
"wait",
"when",
"where",
"while",
"with",
"work",
"write",
"year",
]
)
class _StringType(sqltypes.String):
"""Base for Firebird string types."""
def __init__(self, charset=None, **kw):
self.charset = charset
super(_StringType, self).__init__(**kw)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""Firebird VARCHAR type"""
__visit_name__ = "VARCHAR"
def __init__(self, length=None, **kwargs):
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""Firebird CHAR type"""
__visit_name__ = "CHAR"
def __init__(self, length=None, **kwargs):
super(CHAR, self).__init__(length=length, **kwargs)
class _FBDateTime(sqltypes.DateTime):
def bind_processor(self, dialect):
def process(value):
if type(value) == datetime.date:
return datetime.datetime(value.year, value.month, value.day)
else:
return value
return process
colspecs = {sqltypes.DateTime: _FBDateTime}
ischema_names = {
"SHORT": SMALLINT,
"LONG": INTEGER,
"QUAD": FLOAT,
"FLOAT": FLOAT,
"DATE": DATE,
"TIME": TIME,
"TEXT": TEXT,
"INT64": BIGINT,
"DOUBLE": FLOAT,
"TIMESTAMP": TIMESTAMP,
"VARYING": VARCHAR,
"CSTRING": CHAR,
"BLOB": BLOB,
}
# TODO: date conversion types (should be implemented as _FBDateTime,
# _FBDate, etc. as bind/result functionality is required)
class FBTypeCompiler(compiler.GenericTypeCompiler):
def visit_boolean(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_TIMESTAMP(type_, **kw)
def visit_TEXT(self, type_, **kw):
return "BLOB SUB_TYPE 1"
def visit_BLOB(self, type_, **kw):
return "BLOB SUB_TYPE 0"
def _extend_string(self, type_, basic):
charset = getattr(type_, "charset", None)
if charset is None:
return basic
else:
return "%s CHARACTER SET %s" % (basic, charset)
def visit_CHAR(self, type_, **kw):
basic = super(FBTypeCompiler, self).visit_CHAR(type_, **kw)
return self._extend_string(type_, basic)
def visit_VARCHAR(self, type_, **kw):
if not type_.length:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" % self.dialect.name
)
basic = super(FBTypeCompiler, self).visit_VARCHAR(type_, **kw)
return self._extend_string(type_, basic)
class FBCompiler(sql.compiler.SQLCompiler):
"""Firebird specific idiosyncrasies"""
ansi_bind_rules = True
# def visit_contains_op_binary(self, binary, operator, **kw):
# cant use CONTAINING b.c. it's case insensitive.
# def visit_not_contains_op_binary(self, binary, operator, **kw):
# cant use NOT CONTAINING b.c. it's case insensitive.
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_startswith_op_binary(self, binary, operator, **kw):
return "%s STARTING WITH %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
)
def visit_not_startswith_op_binary(self, binary, operator, **kw):
return "%s NOT STARTING WITH %s" % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw),
)
def visit_mod_binary(self, binary, operator, **kw):
return "mod(%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw),
)
def visit_alias(self, alias, asfrom=False, **kwargs):
if self.dialect._version_two:
return super(FBCompiler, self).visit_alias(
alias, asfrom=asfrom, **kwargs
)
else:
# Override to not use the AS keyword which FB 1.5 does not like
if asfrom:
alias_name = (
isinstance(alias.name, expression._truncated_label)
and self._truncated_identifier("alias", alias.name)
or alias.name
)
return (
self.process(alias.element, asfrom=asfrom, **kwargs)
+ " "
+ self.preparer.format_alias(alias, alias_name)
)
else:
return self.process(alias.element, **kwargs)
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0])
start = self.process(func.clauses.clauses[1])
if len(func.clauses.clauses) > 2:
length = self.process(func.clauses.clauses[2])
return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
else:
return "SUBSTRING(%s FROM %s)" % (s, start)
def visit_length_func(self, function, **kw):
if self.dialect._version_two:
return "char_length" + self.function_argspec(function)
else:
return "strlen" + self.function_argspec(function)
visit_char_length_func = visit_length_func
def function_argspec(self, func, **kw):
# TODO: this probably will need to be
# narrowed to a fixed list, some no-arg functions
# may require parens - see similar example in the oracle
# dialect
if func.clauses is not None and len(func.clauses):
return self.process(func.clause_expr, **kw)
else:
return ""
def default_from(self):
return " FROM rdb$database"
def visit_sequence(self, seq, **kw):
return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list Firebird puts the limit and offset right
after the ``SELECT``...
"""
result = ""
if select._limit_clause is not None:
result += "FIRST %s " % self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
result += "SKIP %s " % self.process(select._offset_clause, **kw)
result += super(FBCompiler, self).get_select_precolumns(select, **kw)
return result
def limit_clause(self, select, **kw):
"""Already taken care of in the `get_select_precolumns` method."""
return ""
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in expression._select_iterables(returning_cols)
]
return "RETURNING " + ", ".join(columns)
class FBDDLCompiler(sql.compiler.DDLCompiler):
"""Firebird syntactic idiosyncrasies"""
def visit_create_sequence(self, create):
"""Generate a ``CREATE GENERATOR`` statement for the sequence."""
# no syntax for these
# http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html
if create.element.start is not None:
raise NotImplementedError(
"Firebird SEQUENCE doesn't support START WITH"
)
if create.element.increment is not None:
raise NotImplementedError(
"Firebird SEQUENCE doesn't support INCREMENT BY"
)
if self.dialect._version_two:
return "CREATE SEQUENCE %s" % self.preparer.format_sequence(
create.element
)
else:
return "CREATE GENERATOR %s" % self.preparer.format_sequence(
create.element
)
def visit_drop_sequence(self, drop):
"""Generate a ``DROP GENERATOR`` statement for the sequence."""
if self.dialect._version_two:
return "DROP SEQUENCE %s" % self.preparer.format_sequence(
drop.element
)
else:
return "DROP GENERATOR %s" % self.preparer.format_sequence(
drop.element
)
def visit_computed_column(self, generated):
if generated.persisted is not None:
raise exc.CompileError(
"Firebird computed columns do not support a persistence "
"method setting; set the 'persisted' flag to None for "
"Firebird support."
)
return "GENERATED ALWAYS AS (%s)" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
class FBIdentifierPreparer(sql.compiler.IdentifierPreparer):
"""Install Firebird specific reserved words."""
reserved_words = RESERVED_WORDS
illegal_initial_characters = compiler.ILLEGAL_INITIAL_CHARACTERS.union(
["_"]
)
def __init__(self, dialect):
super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True)
class FBExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
"""Get the next value from the sequence using ``gen_id()``."""
return self._execute_scalar(
"SELECT gen_id(%s, 1) FROM rdb$database"
% self.identifier_preparer.format_sequence(seq),
type_,
)
class FBDialect(default.DefaultDialect):
"""Firebird dialect"""
name = "firebird"
supports_statement_cache = True
max_identifier_length = 31
supports_sequences = True
sequences_optional = False
supports_default_values = True
postfetch_lastrowid = False
supports_native_boolean = False
requires_name_normalize = True
supports_empty_insert = False
statement_compiler = FBCompiler
ddl_compiler = FBDDLCompiler
preparer = FBIdentifierPreparer
type_compiler = FBTypeCompiler
execution_ctx_cls = FBExecutionContext
colspecs = colspecs
ischema_names = ischema_names
construct_arguments = []
# defaults to dialect ver. 3,
# will be autodetected off upon
# first connect
_version_two = True
def __init__(self, *args, **kwargs):
util.warn_deprecated(
"The firebird dialect is deprecated and will be removed "
"in a future version. This dialect is superseded by the external "
"dialect https://github.com/pauldex/sqlalchemy-firebird.",
version="1.4",
)
super(FBDialect, self).__init__(*args, **kwargs)
def initialize(self, connection):
super(FBDialect, self).initialize(connection)
self._version_two = (
"firebird" in self.server_version_info
and self.server_version_info >= (2,)
) or (
"interbase" in self.server_version_info
and self.server_version_info >= (6,)
)
if not self._version_two:
# TODO: whatever other pre < 2.0 stuff goes here
self.ischema_names = ischema_names.copy()
self.ischema_names["TIMESTAMP"] = sqltypes.DATE
self.colspecs = {sqltypes.DateTime: sqltypes.DATE}
self.implicit_returning = self._version_two and self.__dict__.get(
"implicit_returning", True
)
def has_table(self, connection, table_name, schema=None):
"""Return ``True`` if the given table exists, ignoring
the `schema`."""
self._ensure_has_table_connection(connection)
tblqry = """
SELECT 1 AS has_table FROM rdb$database
WHERE EXISTS (SELECT rdb$relation_name
FROM rdb$relations
WHERE rdb$relation_name=?)
"""
c = connection.exec_driver_sql(
tblqry, [self.denormalize_name(table_name)]
)
return c.first() is not None
def has_sequence(self, connection, sequence_name, schema=None):
"""Return ``True`` if the given sequence (generator) exists."""
genqry = """
SELECT 1 AS has_sequence FROM rdb$database
WHERE EXISTS (SELECT rdb$generator_name
FROM rdb$generators
WHERE rdb$generator_name=?)
"""
c = connection.exec_driver_sql(
genqry, [self.denormalize_name(sequence_name)]
)
return c.first() is not None
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
# there are two queries commonly mentioned for this.
# this one, using view_blr, is at the Firebird FAQ among other places:
# http://www.firebirdfaq.org/faq174/
s = """
select rdb$relation_name
from rdb$relations
where rdb$view_blr is null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
# the other query is this one. It's not clear if there's really
# any difference between these two. This link:
# http://www.alberton.info/firebird_sql_meta_info.html#.Ur3vXfZGni8
# states them as interchangeable. Some discussion at [ticket:2898]
# SELECT DISTINCT rdb$relation_name
# FROM rdb$relation_fields
# WHERE rdb$system_flag=0 AND rdb$view_context IS NULL
return [
self.normalize_name(row[0])
for row in connection.exec_driver_sql(s)
]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
# see http://www.firebirdfaq.org/faq174/
s = """
select rdb$relation_name
from rdb$relations
where rdb$view_blr is not null
and (rdb$system_flag is null or rdb$system_flag = 0);
"""
return [
self.normalize_name(row[0])
for row in connection.exec_driver_sql(s)
]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
qry = """
SELECT rdb$view_source AS view_source
FROM rdb$relations
WHERE rdb$relation_name=?
"""
rp = connection.exec_driver_sql(
qry, [self.denormalize_name(view_name)]
)
row = rp.first()
if row:
return row["view_source"]
else:
return None
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
# Query to extract the PK/FK constrained fields of the given table
keyqry = """
SELECT se.rdb$field_name AS fname
FROM rdb$relation_constraints rc
JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
"""
tablename = self.denormalize_name(table_name)
# get primary key fields
c = connection.exec_driver_sql(keyqry, ["PRIMARY KEY", tablename])
pkfields = [self.normalize_name(r["fname"]) for r in c.fetchall()]
return {"constrained_columns": pkfields, "name": None}
@reflection.cache
def get_column_sequence(
self, connection, table_name, column_name, schema=None, **kw
):
tablename = self.denormalize_name(table_name)
colname = self.denormalize_name(column_name)
# Heuristic-query to determine the generator associated to a PK field
genqry = """
SELECT trigdep.rdb$depended_on_name AS fgenerator
FROM rdb$dependencies tabdep
JOIN rdb$dependencies trigdep
ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name
AND trigdep.rdb$depended_on_type=14
AND trigdep.rdb$dependent_type=2
JOIN rdb$triggers trig ON
trig.rdb$trigger_name=tabdep.rdb$dependent_name
WHERE tabdep.rdb$depended_on_name=?
AND tabdep.rdb$depended_on_type=0
AND trig.rdb$trigger_type=1
AND tabdep.rdb$field_name=?
AND (SELECT count(*)
FROM rdb$dependencies trigdep2
WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2
"""
genr = connection.exec_driver_sql(genqry, [tablename, colname]).first()
if genr is not None:
return dict(name=self.normalize_name(genr["fgenerator"]))
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
# Query to extract the details of all the fields of the given table
tblqry = """
SELECT r.rdb$field_name AS fname,
r.rdb$null_flag AS null_flag,
t.rdb$type_name AS ftype,
f.rdb$field_sub_type AS stype,
f.rdb$field_length/
COALESCE(cs.rdb$bytes_per_character,1) AS flen,
f.rdb$field_precision AS fprec,
f.rdb$field_scale AS fscale,
COALESCE(r.rdb$default_source,
f.rdb$default_source) AS fdefault
FROM rdb$relation_fields r
JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
JOIN rdb$types t
ON t.rdb$type=f.rdb$field_type AND
t.rdb$field_name='RDB$FIELD_TYPE'
LEFT JOIN rdb$character_sets cs ON
f.rdb$character_set_id=cs.rdb$character_set_id
WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=?
ORDER BY r.rdb$field_position
"""
# get the PK, used to determine the eventual associated sequence
pk_constraint = self.get_pk_constraint(connection, table_name)
pkey_cols = pk_constraint["constrained_columns"]
tablename = self.denormalize_name(table_name)
# get all of the fields for this table
c = connection.exec_driver_sql(tblqry, [tablename])
cols = []
while True:
row = c.fetchone()
if row is None:
break
name = self.normalize_name(row["fname"])
orig_colname = row["fname"]
# get the data type
colspec = row["ftype"].rstrip()
coltype = self.ischema_names.get(colspec)
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'"
% (colspec, name)
)
coltype = sqltypes.NULLTYPE
elif issubclass(coltype, Integer) and row["fprec"] != 0:
coltype = NUMERIC(
precision=row["fprec"], scale=row["fscale"] * -1
)
elif colspec in ("VARYING", "CSTRING"):
coltype = coltype(row["flen"])
elif colspec == "TEXT":
coltype = TEXT(row["flen"])
elif colspec == "BLOB":
if row["stype"] == 1:
coltype = TEXT()
else:
coltype = BLOB()
else:
coltype = coltype()
# does it have a default value?
defvalue = None
if row["fdefault"] is not None:
# the value comes down as "DEFAULT 'value'": there may be
# more than one whitespace around the "DEFAULT" keyword
# and it may also be lower case
# (see also http://tracker.firebirdsql.org/browse/CORE-356)
defexpr = row["fdefault"].lstrip()
assert defexpr[:8].rstrip().upper() == "DEFAULT", (
"Unrecognized default value: %s" % defexpr
)
defvalue = defexpr[8:].strip()
if defvalue == "NULL":
# Redundant
defvalue = None
col_d = {
"name": name,
"type": coltype,
"nullable": not bool(row["null_flag"]),
"default": defvalue,
"autoincrement": "auto",
}
if orig_colname.lower() == orig_colname:
col_d["quote"] = True
# if the PK is a single field, try to see if its linked to
# a sequence thru a trigger
if len(pkey_cols) == 1 and name == pkey_cols[0]:
seq_d = self.get_column_sequence(connection, tablename, name)
if seq_d is not None:
col_d["sequence"] = seq_d
cols.append(col_d)
return cols
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
# Query to extract the details of each UK/FK of the given table
fkqry = """
SELECT rc.rdb$constraint_name AS cname,
cse.rdb$field_name AS fname,
ix2.rdb$relation_name AS targetrname,
se.rdb$field_name AS targetfname
FROM rdb$relation_constraints rc
JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name
JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key
JOIN rdb$index_segments cse ON
cse.rdb$index_name=ix1.rdb$index_name
JOIN rdb$index_segments se
ON se.rdb$index_name=ix2.rdb$index_name
AND se.rdb$field_position=cse.rdb$field_position
WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=?
ORDER BY se.rdb$index_name, se.rdb$field_position
"""
tablename = self.denormalize_name(table_name)
c = connection.exec_driver_sql(fkqry, ["FOREIGN KEY", tablename])
fks = util.defaultdict(
lambda: {
"name": None,
"constrained_columns": [],
"referred_schema": None,
"referred_table": None,
"referred_columns": [],
}
)
for row in c:
cname = self.normalize_name(row["cname"])
fk = fks[cname]
if not fk["name"]:
fk["name"] = cname
fk["referred_table"] = self.normalize_name(row["targetrname"])
fk["constrained_columns"].append(self.normalize_name(row["fname"]))
fk["referred_columns"].append(
self.normalize_name(row["targetfname"])
)
return list(fks.values())
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
qry = """
SELECT ix.rdb$index_name AS index_name,
ix.rdb$unique_flag AS unique_flag,
ic.rdb$field_name AS field_name
FROM rdb$indices ix
JOIN rdb$index_segments ic
ON ix.rdb$index_name=ic.rdb$index_name
LEFT OUTER JOIN rdb$relation_constraints
ON rdb$relation_constraints.rdb$index_name =
ic.rdb$index_name
WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL
AND rdb$relation_constraints.rdb$constraint_type IS NULL
ORDER BY index_name, ic.rdb$field_position
"""
c = connection.exec_driver_sql(
qry, [self.denormalize_name(table_name)]
)
indexes = util.defaultdict(dict)
for row in c:
indexrec = indexes[row["index_name"]]
if "name" not in indexrec:
indexrec["name"] = self.normalize_name(row["index_name"])
indexrec["column_names"] = []
indexrec["unique"] = bool(row["unique_flag"])
indexrec["column_names"].append(
self.normalize_name(row["field_name"])
)
return list(indexes.values())
| 32.491919
| 80
| 0.555663
|
045388143ae66491ed902a53869a393d4a182a00
| 625
|
py
|
Python
|
agents/load_ppo_params.py
|
matthiasgruber/supervisor
|
f7ab5f5ee67170256033e62e61a5be3410fcf6a8
|
[
"MIT"
] | 3
|
2021-08-08T20:22:13.000Z
|
2022-01-27T17:22:32.000Z
|
agents/load_ppo_params.py
|
matthiasgruber/supervisor
|
f7ab5f5ee67170256033e62e61a5be3410fcf6a8
|
[
"MIT"
] | null | null | null |
agents/load_ppo_params.py
|
matthiasgruber/supervisor
|
f7ab5f5ee67170256033e62e61a5be3410fcf6a8
|
[
"MIT"
] | null | null | null |
import pickle
def load_ppo_params():
file = open("logs/baseline_tuning/study.obj", 'rb')
optuna_object = pickle.load(file)
file.close()
model_params = optuna_object.best_params
if model_params["n_steps"] < model_params["batch_size"]:
model_params["nminibatches"] = 1
else:
model_params["nminibatches"] = int(model_params["n_steps"] / model_params["batch_size"])
del model_params["batch_size"]
model_params["lam"] = model_params["lambda"]
del model_params["lambda"]
model_params["learning_rate"] = model_params["lr"]
del model_params["lr"]
return model_params
| 32.894737
| 96
| 0.6912
|
68a6f06d752214ba9649ec9bb8fc2912e60ecad7
| 5,069
|
py
|
Python
|
download_imdb.py
|
WillSkywalker/scholar
|
68a8f1680ccb150b005e440b501c05d7339e43e8
|
[
"Apache-2.0"
] | null | null | null |
download_imdb.py
|
WillSkywalker/scholar
|
68a8f1680ccb150b005e440b501c05d7339e43e8
|
[
"Apache-2.0"
] | null | null | null |
download_imdb.py
|
WillSkywalker/scholar
|
68a8f1680ccb150b005e440b501c05d7339e43e8
|
[
"Apache-2.0"
] | null | null | null |
from optparse import OptionParser
import os
import errno
import tarfile
from torchvision.datasets.utils import download_url
import file_handling as fh
class IMDB:
"""`IMDB <http://ai.stanford.edu/~amaas/data/sentiment/>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
train (bool, optional): If True, load the training data, otherwise test
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
strip_html (bool, optional): If True, remove html tags during preprocessing; default=True
"""
url = 'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
raw_filename = 'aclImdb_v1.tar.gz'
train_file = 'train.jsonlist'
test_file = 'test.jsonlist'
unlabeled_file = 'unlabeled.jsonlist'
def __init__(self, root, download=True):
super().__init__()
self.root = os.path.expanduser(root)
if download:
self.download()
if not self._check_raw_exists():
raise RuntimeError('Dataset not found. You can use download=True to download it')
self.preprocess()
def _check_processed_exists(self):
return os.path.exists(os.path.join(self.root, self.train_file)) and \
os.path.exists(os.path.join(self.root, self.test_file)) and \
os.path.exists(os.path.join(self.root, self.unlabeled_file))
def _check_raw_exists(self):
return os.path.exists(os.path.join(self.root, self.raw_filename))
def download(self):
"""Download the IMDB data if it doesn't exist in processed_folder already."""
if self._check_raw_exists():
return
# download files
try:
os.makedirs(os.path.join(self.root))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
download_url(self.url, root=self.root,
filename=self.raw_filename, md5=None)
if not self._check_raw_exists():
raise RuntimeError("Unable to find downloaded file. Please try again.")
else:
print("Download finished.")
def preprocess(self):
"""Preprocess the raw data file"""
if self._check_processed_exists():
return
train_lines = []
test_lines = []
unlabeled_lines = []
print("Opening tar file")
# read in the raw data
tar = tarfile.open(os.path.join(self.root, self.raw_filename), "r:gz")
# process all the data files in the archive
print("Processing documents")
for m_i, member in enumerate(tar.getmembers()):
# Display occassional progress
if (m_i + 1) % 5000 == 0:
print("Processed {:d} / 100000".format(m_i+1))
# get the internal file name
parts = member.name.split('/')
if len(parts) > 3:
split = parts[1] # train or test
label = parts[2] # pos, neg, or unsup
name = parts[3].split('.')[0]
doc_id, rating = name.split('_')
doc_id = int(doc_id)
rating = int(rating)
# read the text from the archive
f = tar.extractfile(member)
bytes = f.read()
text = bytes.decode("utf-8")
# tokenize it using spacy
if label != 'unsup':
# save the text, label, and original file name
doc = {'id': split + '_' + str(doc_id), 'text': text, 'sentiment': label, 'orig': member.name, 'rating': rating}
if split == 'train':
train_lines.append(doc)
elif split == 'test':
test_lines.append(doc)
else:
doc = {'id': 'unlabeled_' + str(doc_id), 'text': text, 'sentiment': None, 'orig': member.name, 'rating': rating}
unlabeled_lines.append(doc)
print("Saving processed data to {:s}".format(self.root))
fh.write_jsonlist(train_lines, os.path.join(self.root, self.train_file))
fh.write_jsonlist(test_lines, os.path.join(self.root, self.test_file))
fh.write_jsonlist(unlabeled_lines, os.path.join(self.root, self.unlabeled_file))
def main():
usage = "%prog"
parser = OptionParser(usage=usage)
parser.add_option('--root-dir', type=str, default='./data/imdb',
help='Destination directory: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
root_dir = options.root_dir
IMDB(root_dir, download=False)
if __name__ == '__main__':
main()
| 36.467626
| 132
| 0.58118
|
70432a8f0297752867f9c0e6e62e0d1257413f3e
| 1,885
|
py
|
Python
|
app/head/acts/calculation/impulse.py
|
Matexer/BSPR
|
a503a8795cb0f4cebe2eedd148aa00aea75b570e
|
[
"MIT"
] | null | null | null |
app/head/acts/calculation/impulse.py
|
Matexer/BSPR
|
a503a8795cb0f4cebe2eedd148aa00aea75b570e
|
[
"MIT"
] | null | null | null |
app/head/acts/calculation/impulse.py
|
Matexer/BSPR
|
a503a8795cb0f4cebe2eedd148aa00aea75b570e
|
[
"MIT"
] | null | null | null |
from typing import Tuple
from statistics import mean
from ..templates import CalculationActTemplate
from ....core import Impulse, ImpulseOutput
from ....gui.frames import ResultsFrame
class ImpulseAct(CalculationActTemplate):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fuel_name = args[1]
data = args[2]
config = args[3]
output = Impulse(data, config).get_results()
self.generate_report(self.frame, output)
def generate_report(self,
frame: ResultsFrame, output: ImpulseOutput):
title = frame.create_title(frame.interior,
f"WYNIKI OBLICZEŃ IMPULSU JEDNOSTKOWEGO DLA PALIWA {self.fuel_name}")
data = self.get_table_data(output)
table = frame.create_table(frame.interior, data)
export_btn = frame.create_export_btn(frame.interior)
title.pack(fill="both")
table.pack(pady=20)
export_btn.pack(pady=5)
export_btn.configure(command=lambda: self.export_data(data))
def get_table_data(self, output: ImpulseOutput) -> Tuple[tuple, ...]:
def get_a(item):
if item.a:
return round(item.a, 2)
else:
return "-"
dms = tuple(item.jet_d for item in output)
min_precision = self.get_dm_precision(dms)
headings = ("Nr\npomiaru", "Impuls jednostkowy\n[N⋅s/kg]",
"Impuls całkowity\n[N⋅s]", "a\n[-]", "dm [mm]",
"Dł. komory\nspalania [mm]", "Śr. komory\nspalania [mm]")
data = [headings]
for i, item in enumerate(output, start=1):
row = (i, int(round(item.unit_impulse, 0)), round(item.total_impulse, 1),
get_a(item), format(item.jet_d, f'.{min_precision}f'), item.chamber_length,
item.chamber_d)
data.append(row)
return data
| 34.907407
| 91
| 0.615385
|
02fc2676e4ef7daafb169ac5c65d32461e778e2c
| 12,168
|
py
|
Python
|
databroker/assets/core.py
|
ericdill/DataBroker
|
970e9148dfab5e77101d40f059ecb30d064eac81
|
[
"BSD-3-Clause"
] | 15
|
2019-09-07T13:18:43.000Z
|
2022-03-25T07:03:47.000Z
|
databroker/assets/core.py
|
ericdill/DataBroker
|
970e9148dfab5e77101d40f059ecb30d064eac81
|
[
"BSD-3-Clause"
] | 258
|
2019-05-13T23:11:57.000Z
|
2022-03-08T22:09:08.000Z
|
databroker/assets/core.py
|
ericdill/DataBroker
|
970e9148dfab5e77101d40f059ecb30d064eac81
|
[
"BSD-3-Clause"
] | 15
|
2019-06-03T20:25:36.000Z
|
2022-03-25T14:08:29.000Z
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from jsonschema import validate as js_validate
import warnings
import uuid
import time as ttime
import pandas as pd
from ..utils import sanitize_np, apply_to_dict_recursively
class DatumNotFound(Exception):
"""
Raised if a Datum id is not found.
"""
def __init__(self, datum_id, msg=None, *args):
if msg is None:
msg = f"No datum found with datum id {datum_id}"
super().__init__(msg, *args)
self.datum_id = datum_id
class EventDatumNotFound(Exception):
"""
Raised if an Event document is found to have an unknown Datum id.
"""
def __init__(self, event_uid, datum_id, msg=None, *args):
if msg is None:
msg = (
f"Event with uid {event_uid} references "
f"unknown Datum with datum id {datum_id}"
)
super().__init__(msg, *args)
self.event_uid = event_uid
self.datum_id = datum_id
def doc_or_uid_to_uid(doc_or_uid):
"""Given Document or uid return the uid
Parameters
----------
doc_or_uid : dict or str
If str, then assume uid and pass through, if not, return
the 'uid' field
Returns
-------
uid : str
A string version of the uid of the given document
"""
if not isinstance(doc_or_uid, six.string_types):
doc_or_uid = doc_or_uid['uid']
return doc_or_uid
def _get_datum_from_datum_id(col, datum_id, datum_cache, logger):
try:
datum = datum_cache[datum_id]
except KeyError:
# find the current document
edoc = col.find_one({'datum_id': datum_id})
if edoc is None:
raise DatumNotFound(datum_id=datum_id)
# save it for later
datum = dict(edoc)
res = edoc['resource']
count = 0
for dd in col.find({'resource': res}):
count += 1
d_id = dd['datum_id']
if d_id not in datum_cache:
datum_cache[d_id] = dict(dd)
if count > datum_cache.max_size:
logger.warn("More datum in a resource than your "
"datum cache can hold.")
datum.pop('_id', None)
return datum
def retrieve(col, datum_id, datum_cache, get_spec_handler, logger):
datum = _get_datum_from_datum_id(col, datum_id, datum_cache, logger)
handler = get_spec_handler(datum['resource'])
return handler(**datum['datum_kwargs'])
def resource_given_datum_id(col, datum_id, datum_cache, logger):
datum_id = doc_or_uid_to_uid(datum_id)
datum = _get_datum_from_datum_id(col, datum_id, datum_cache, logger)
res = datum['resource']
return res
def resource_given_uid(col, resource):
uid = doc_or_uid_to_uid(resource)
ret = col.find_one({'uid': uid})
ret.pop('_id', None)
ret['id'] = ret['uid']
return ret
def bulk_insert_datum(col, resource, datum_ids,
datum_kwarg_list):
resource_id = doc_or_uid_to_uid(resource)
def datum_factory():
for d_id, d_kwargs in zip(datum_ids, datum_kwarg_list):
datum = dict(resource=resource_id,
datum_id=str(d_id),
datum_kwargs=dict(d_kwargs))
apply_to_dict_recursively(datum, sanitize_np)
yield datum
col.insert(datum_factory())
def bulk_register_datum_table(datum_col,
resource_uid,
dkwargs_table,
validate):
if validate:
raise
d_ids = [str(uuid.uuid4()) for j in range(len(dkwargs_table))]
dkwargs_table = pd.DataFrame(dkwargs_table)
bulk_insert_datum(datum_col, resource_uid, d_ids, [
dict(r) for _, r in dkwargs_table.iterrows()])
return d_ids
def register_datum(col, resource_uid, datum_kwargs):
datum_uid = str(uuid.uuid4())
datum = insert_datum(col, resource_uid, datum_uid, datum_kwargs, {}, None)
return datum['datum_id']
def insert_datum(col, resource, datum_id, datum_kwargs, known_spec,
resource_col, ignore_duplicate_error=False,
duplicate_exc=None):
if ignore_duplicate_error:
assert duplicate_exc is not None
if duplicate_exc is None:
class _PrivateException(Exception):
pass
duplicate_exc = _PrivateException
try:
resource['spec']
spec = resource['spec']
if spec in known_spec:
js_validate(datum_kwargs, known_spec[spec]['datum'])
except (AttributeError, TypeError):
pass
resource_uid = doc_or_uid_to_uid(resource)
datum = dict(resource=resource_uid,
datum_id=str(datum_id),
datum_kwargs=dict(datum_kwargs))
apply_to_dict_recursively(datum, sanitize_np)
# We are transitioning from ophyd objects inserting directly into a
# Registry to ophyd objects passing documents to the RunEngine which in
# turn inserts them into a Registry. During the transition period, we allow
# an ophyd object to attempt BOTH so that configuration files are
# compatible with both the new model and the old model. Thus, we need to
# ignore the second attempt to insert.
try:
col.insert_one(datum)
except duplicate_exc:
if ignore_duplicate_error:
warnings.warn("Ignoring attempt to insert Resource with duplicate "
"uid, assuming that both ophyd and bluesky "
"attempted to insert this document. Remove the "
"Registry (`reg` parameter) from your ophyd "
"instance to remove this warning.")
else:
raise
# do not leak mongo objectID
datum.pop('_id', None)
return datum
def insert_resource(col, spec, resource_path, resource_kwargs,
known_spec, root, path_semantics='posix', uid=None,
run_start=None, id=None,
ignore_duplicate_error=False, duplicate_exc=None):
"""Insert resource into a databroker.
Parameters
----------
col : pymongo.Collection instance
Collection to insert data into
spec : str
The resource data spec
resource_path : str
The path to the resource files
resource_kwargs : dict
The kwargs for the resource
known_spec : set
The known specs
root : str
The root of the file path
path_semantics : str, optional
The name of the path semantics, e.g. ``posix`` for Linux systems
uid : str, optional
The unique ID for the resource
run_start : str, optional
The unique ID for the start document the resource is associated with
id : str, optional
Dummy variable so that we round trip resources, same as ``uid``
Returns
-------
resource_object : dict
The resource
"""
if ignore_duplicate_error:
assert duplicate_exc is not None
if duplicate_exc is None:
class _PrivateException(Exception):
pass
duplicate_exc = _PrivateException
resource_kwargs = dict(resource_kwargs)
if spec in known_spec:
js_validate(resource_kwargs, known_spec[spec]['resource'])
if uid is None:
uid = str(uuid.uuid4())
resource_object = dict(spec=str(spec),
resource_path=str(resource_path),
root=str(root),
resource_kwargs=resource_kwargs,
path_semantics=path_semantics,
uid=uid)
# This is special-cased because it was added later.
# Someday this may be required and no longer special-cased.
if run_start is not None:
resource_object['run_start'] = run_start
# We are transitioning from ophyd objects inserting directly into a
# Registry to ophyd objects passing documents to the RunEngine which in
# turn inserts them into a Registry. During the transition period, we allow
# an ophyd object to attempt BOTH so that configuration files are
# compatible with both the new model and the old model. Thus, we need to
# ignore the second attempt to insert.
try:
col.insert_one(resource_object)
except duplicate_exc:
if ignore_duplicate_error:
warnings.warn("Ignoring attempt to insert Datum with duplicate "
"datum_id, assuming that both ophyd and bluesky "
"attempted to insert this document. Remove the "
"Registry (`reg` parameter) from your ophyd "
"instance to remove this warning.")
else:
raise
resource_object['id'] = resource_object['uid']
resource_object.pop('_id', None)
return resource_object
def update_resource(update_col, resource_col, old, new, cmd, cmd_kwargs):
'''Update a resource document
Parameters
----------
update_col : Collection
The collection to record audit trail in
resource_col : Collection
The resource collection
old : dict
The old resource document
new : dict
The new resource document
cmd : str
The name of the operation which generated this update
cmd_kwargs : dict
The arguments that went into the update (excluding the resource id)
Returns
-------
ret : dict
The new resource document
log_object : dict
The history object inserted (with oid removed)
'''
if old['uid'] != new['uid']:
raise RuntimeError('must not change the resource uid')
uid = old['uid']
log_object = {'resource': uid,
'old': old,
'new': new,
'time': ttime.time(),
'cmd': cmd,
'cmd_kwargs': cmd_kwargs}
update_col.insert_one(log_object)
result = resource_col.replace_one({'uid': uid}, new)
ret = resource_given_uid(resource_col, uid)
# TODO look inside of result
del result
log_object.pop('_id', None)
return ret, log_object
def get_resource_history(col, resource):
uid = doc_or_uid_to_uid(resource)
cursor = col.find({'resource': uid})
for doc in cursor:
for k in ['new', 'old']:
d = doc[k]
d.pop('_id', None)
d['id'] = d['uid']
doc[k] = d
doc.pop('_id', None)
yield doc
def get_datumkw_by_resuid_gen(datum_col, resource_uid):
'''Given a resource uid, get all datum_kwargs
No order is guaranteed.
Internally the result of this is passed to the `get_file_list` method
of the handler object in `change_root`
Parameters
----------
datum_col : Collection
The Datum collection
resource_uid : dict or str
The resource to work on
Yields
------
datum_kwarg : dict
'''
resource_uid = doc_or_uid_to_uid(resource_uid)
cur = datum_col.find({'resource': resource_uid})
for d in cur:
yield d['datum_kwargs']
def get_datum_by_res_gen(datum_col, resource_uid):
'''Given a resource uid, get all datums
No order is guaranteed.
Internally the result of this is passed to the `get_file_list` method
of the handler object in `change_root`
Parameters
----------
datum_col : Collection
The Datum collection
resource_uid : dict or str
The resource to work on
Yields
------
datum : dict
'''
resource_uid = doc_or_uid_to_uid(resource_uid)
cur = datum_col.find({'resource': resource_uid})
for d in cur:
yield d
def get_file_list(resource, datum_kwarg_gen, get_spec_handler):
"""
Given a resource and an iterable of datum kwargs, get a list of
associated files.
DO NOT USE FOR COPYING OR MOVING. This is for debugging only.
See the methods for moving and copying on the Registry object.
"""
handler = get_spec_handler(resource['uid'])
return handler.get_file_list(datum_kwarg_gen)
| 30.805063
| 79
| 0.62311
|
ae8d7c089f31a437f6f83868c879ed93ab95eadb
| 6,803
|
py
|
Python
|
model.py
|
bearcatt/single-shot-detector
|
649d55aa84f1c988afd920ed8abc601512405825
|
[
"MIT"
] | 1
|
2020-01-31T09:28:54.000Z
|
2020-01-31T09:28:54.000Z
|
model.py
|
bearcatt/single-shot-detector
|
649d55aa84f1c988afd920ed8abc601512405825
|
[
"MIT"
] | null | null | null |
model.py
|
bearcatt/single-shot-detector
|
649d55aa84f1c988afd920ed8abc601512405825
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from detector import SSD
from detector.anchor_generator import AnchorGenerator
from detector.box_predictor import RetinaNetBoxPredictor
from detector.feature_extractor import RetinaNetFeatureExtractor
from detector.backbones import mobilenet_v1, shufflenet_v2, resnet, hrnet
from metrics import Evaluator
MOVING_AVERAGE_DECAY = 0.993
def model_fn(features, labels, mode, params):
"""
This is a function for creating a computational tensorflow graph.
The function is in format required by tf.estimator.
"""
is_training = mode == tf.estimator.ModeKeys.TRAIN
# the base network
def backbone(images, is_training):
if params['backbone'] == 'mobilenet':
return mobilenet_v1(
images, is_training,
depth_multiplier=params['depth_multiplier']
)
elif params['backbone'] == 'shufflenet':
return shufflenet_v2(
images, is_training,
depth_multiplier=str(params['depth_multiplier'])
)
elif params['backbone'] == 'resnet':
return resnet(
images, is_training,
block_sizes=params['block_sizes'],
enableBN=params['enableBN']
)
# elif params['backbone'] == 'hrnet':
# return hrnet(
# images, is_training,
# width=params['width'],
# )
else:
raise NotImplementedError
# add additional layers to the base network
feature_extractor = RetinaNetFeatureExtractor(is_training, backbone)
# ssd anchor maker
anchor_generator = AnchorGenerator(
strides=[8, 16, 32, 64, 128],
scales=[32, 64, 128, 256, 512],
scale_multipliers=[1.0, 1.4142],
aspect_ratios=[1.0, 2.0, 0.5]
)
num_anchors_per_location = anchor_generator.num_anchors_per_location
# add layers that predict boxes and labels
box_predictor = RetinaNetBoxPredictor(is_training, params['num_classes'], num_anchors_per_location)
# collect everything on one place
ssd = SSD(
features['images'], feature_extractor,
anchor_generator, box_predictor,
params['num_classes']
)
# add nms to the graph
if not is_training:
predictions = ssd.get_predictions(
score_threshold=params['score_threshold'],
iou_threshold=params['iou_threshold'],
max_boxes_per_class=params['max_boxes_per_class']
)
if mode == tf.estimator.ModeKeys.PREDICT:
# because images are resized before
# feeding them to the network
box_scaler = features['box_scaler']
predictions['boxes'] /= box_scaler
export_outputs = tf.estimator.export.PredictOutput({
name: tf.identity(tensor, name)
for name, tensor in predictions.items()
})
return tf.estimator.EstimatorSpec(
mode, predictions=predictions,
export_outputs={'outputs': export_outputs}
)
# add l2 regularization
with tf.name_scope('weight_decay'):
add_weight_decay(params['weight_decay'])
regularization_loss = tf.losses.get_regularization_loss()
# create localization and classification losses
losses = ssd.loss(labels, params)
tf.losses.add_loss(params['localization_loss_weight'] * losses['localization_loss'])
tf.losses.add_loss(params['classification_loss_weight'] * losses['classification_loss'])
tf.summary.scalar('regularization_loss', regularization_loss)
tf.summary.scalar('localization_loss', losses['localization_loss'])
tf.summary.scalar('classification_loss', losses['classification_loss'])
total_loss = tf.losses.get_total_loss(add_regularization_losses=True)
if mode == tf.estimator.ModeKeys.EVAL:
batch_size = features['images'].shape[0].value
assert batch_size == 1
evaluator = Evaluator(num_classes=params['num_classes'])
eval_metric_ops = evaluator.get_metric_ops(labels, predictions)
return tf.estimator.EstimatorSpec(
mode, loss=total_loss,
eval_metric_ops=eval_metric_ops
)
assert mode == tf.estimator.ModeKeys.TRAIN
with tf.variable_scope('learning_rate'):
global_step = tf.train.get_global_step()
learning_rate = tf.train.cosine_decay(
params['initial_learning_rate'],
global_step, decay_steps=params['num_steps']
)
tf.summary.scalar('learning_rate', learning_rate)
# TODO: SyncBN support
if params['enableBN']:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops), tf.variable_scope('optimizer'):
var_list = tf.trainable_variables()
if params.has_key('freeze_at'):
# remove freezed vars from var_list
pass
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)
grads_and_vars = optimizer.compute_gradients(total_loss, var_list)
train_op = optimizer.apply_gradients(grads_and_vars, global_step)
for g, v in grads_and_vars:
tf.summary.histogram(v.name[:-2] + '_hist', v)
tf.summary.histogram(v.name[:-2] + '_grad_hist', g)
# TODO: chech if ema helps.
with tf.control_dependencies([train_op]), tf.name_scope('ema'):
ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY, num_updates=global_step)
train_op = ema.apply(tf.trainable_variables())
return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op)
def add_weight_decay(weight_decay):
"""Add L2 regularization to all (or some) trainable kernel weights."""
weight_decay = tf.constant(
weight_decay, tf.float32,
[], 'weight_decay'
)
trainable_vars = tf.trainable_variables()
kernels = [
v for v in trainable_vars
if ('weights' in v.name or 'kernel' in v.name) and 'depthwise_weights' not in v.name
]
for K in kernels:
x = tf.multiply(weight_decay, tf.nn.l2_loss(K))
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, x)
class RestoreMovingAverageHook(tf.train.SessionRunHook):
def __init__(self, model_dir):
super(RestoreMovingAverageHook, self).__init__()
self.model_dir = model_dir
def begin(self):
ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY)
variables_to_restore = ema.variables_to_restore()
self.load_ema = tf.contrib.framework.assign_from_checkpoint_fn(
tf.train.latest_checkpoint(self.model_dir), variables_to_restore
)
def after_create_session(self, sess, coord):
tf.logging.info('Loading EMA weights...')
self.load_ema(sess)
| 37.174863
| 103
| 0.668676
|
090f7a6bd4941a17df6b010eeff613baa549ee60
| 1,461
|
py
|
Python
|
tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/spatial/tests/test__plotutils.py
|
globusgenomics/galaxy
|
7caf74d9700057587b3e3434c64e82c5b16540f1
|
[
"CC-BY-3.0"
] | 1
|
2021-02-05T13:19:58.000Z
|
2021-02-05T13:19:58.000Z
|
tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/spatial/tests/test__plotutils.py
|
globusgenomics/galaxy
|
7caf74d9700057587b3e3434c64e82c5b16540f1
|
[
"CC-BY-3.0"
] | 1
|
2018-04-15T22:59:15.000Z
|
2018-04-15T22:59:15.000Z
|
tools/intogen/runtime/pyenv/lib/python2.7/site-packages/scipy/spatial/tests/test__plotutils.py
|
globusgenomics/galaxy
|
7caf74d9700057587b3e3434c64e82c5b16540f1
|
[
"CC-BY-3.0"
] | null | null | null |
from __future__ import division, print_function, absolute_import
from numpy.testing import dec, assert_, assert_array_equal
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
@dec.skipif(not has_matplotlib, "Matplotlib not available")
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| 30.4375
| 70
| 0.652293
|
1c3a31952cc8585ec814b199d27d13eef56bbe9b
| 2,964
|
py
|
Python
|
osspeak/recognition/actions/library/stdlib.py
|
OSSpeak/OSSpeak
|
327c38a37684165f87bf8d76ab2ca135b43b8ab7
|
[
"MIT"
] | 1
|
2020-03-17T10:24:41.000Z
|
2020-03-17T10:24:41.000Z
|
osspeak/recognition/actions/library/stdlib.py
|
OSSpeak/OSSpeak
|
327c38a37684165f87bf8d76ab2ca135b43b8ab7
|
[
"MIT"
] | 12
|
2016-09-28T05:16:00.000Z
|
2020-11-27T22:32:40.000Z
|
osspeak/recognition/actions/library/stdlib.py
|
OSSpeak/OSSpeak
|
327c38a37684165f87bf8d76ab2ca135b43b8ab7
|
[
"MIT"
] | null | null | null |
from recognition.actions.library import (window, thread, engine,
extensions, general, text, clipboard, macro, osspeak, conditionals,
fsystem, math, directinput, flow, process)
from recognition.actions.library import _mouse as mouse
from recognition.actions.library import _keyboard as keyboard
from recognition.actions.library import screengrid
from recognition.actions import astree
from types import SimpleNamespace
import operator
import re
import time
def wait(n):
try:
time.sleep(float(n))
except TypeError:
pass
def assign_var(context, name, value):
context.argument_frames[-1][name.evaluate(context)] = value.evaluate(context)
def osspeak_lambda(context, params, action):
fn = astree.FunctionDefinition(None, params, action)
return fn
class _Nil:
pass
def not_none(val, default):
if val is not None:
return val
return default
def parse_int(val, default=1):
if isinstance(val, str):
val = val.replace(' ', '')
try:
return int(val)
except (ValueError, TypeError) as e:
return default
def initialize():
# avoid circular import timing issue
macro._restore_saved()
namespace = {
'active_window': lambda: window.active_window_name().title(),
'between': flow.between,
'camel_case': text.camel_case,
'click': mouse.click,
'clipboard': clipboard,
'dict': dict,
'directinput': directinput,
'engine': engine,
'error': general.error,
'eval': lambda context, x: eval(str(x.evaluate(context)), {}, context.namespace),
'extensions': extensions,
'false': lambda: False,
'if': flow.osspeak_if,
'int': int,
'in': lambda a, b: a in b,
'is': lambda a, b: a is b,
'keyboard': keyboard,
'lambda': osspeak_lambda,
'len': len,
'loop': flow.loop,
'macro': macro,
'mouse': mouse,
'none': lambda: None,
'not_none': not_none,
'parse_int': parse_int,
'print': print,
'process': process,
're': re,
'read': fsystem.read_file,
'run': process.run,
'run_sync': process.run_sync,
'screengrid': screengrid,
'set': set,
'setattr': setattr,
'setState': lambda name, value: setattr(namespace['state'], name, value),
'snake_case': text.snake_case,
'state': SimpleNamespace(),
'str': str,
'text': text,
'pascal_case': text.pascal_case,
'true': lambda: True,
'var': assign_var,
'wait': wait,
'while': flow.osspeak_while,
'window': window,
}
class Namespace:
def __init__(self):
self.stdlib = namespace.copy()
self._frames = [{}]
def __getattr__(self, k):
pass
deferred_arguments_eval = {
flow.osspeak_if: flow.osspeak_if_gen,
flow.osspeak_while: flow.osspeak_while_gen,
flow.loop: flow.loop_gen,
flow.between: None,
keyboard.add_delay: None,
keyboard.remove_delay: None,
namespace['eval']: None,
assign_var: None,
osspeak_lambda: None,
}
| 26
| 85
| 0.654858
|
1e53ef4245037cf760e12089c0360b5bda84a10d
| 1,945
|
py
|
Python
|
selfdrive/test/helpers.py
|
tomidbsimcha/openpilot
|
5663c116edbea0235b2a439257ebc6c0d39f4435
|
[
"MIT"
] | 2
|
2019-09-04T14:31:54.000Z
|
2019-09-13T13:18:46.000Z
|
selfdrive/test/helpers.py
|
tomidbsimcha/openpilot
|
5663c116edbea0235b2a439257ebc6c0d39f4435
|
[
"MIT"
] | null | null | null |
selfdrive/test/helpers.py
|
tomidbsimcha/openpilot
|
5663c116edbea0235b2a439257ebc6c0d39f4435
|
[
"MIT"
] | 1
|
2020-09-16T07:13:00.000Z
|
2020-09-16T07:13:00.000Z
|
import subprocess
from functools import wraps
from nose.tools import nottest
from common.android import ANDROID
from common.apk import update_apks, start_offroad, pm_apply_packages, android_packages
from common.params import Params
from selfdrive.version import training_version, terms_version
from selfdrive.manager import start_managed_process, kill_managed_process, get_running
def set_params_enabled():
params = Params()
params.put("HasAcceptedTerms", terms_version)
params.put("HasCompletedSetup", "1")
params.put("OpenpilotEnabledToggle", "1")
params.put("CommunityFeaturesToggle", "1")
params.put("Passive", "0")
params.put("CompletedTrainingVersion", training_version)
def phone_only(x):
if ANDROID:
return x
else:
return nottest(x)
def with_processes(processes):
def wrapper(func):
@wraps(func)
def wrap():
# start and assert started
[start_managed_process(p) for p in processes]
assert all(get_running()[name].exitcode is None for name in processes)
# call the function
try:
func()
# assert processes are still started
assert all(get_running()[name].exitcode is None for name in processes)
finally:
# kill and assert all stopped
[kill_managed_process(p) for p in processes]
assert len(get_running()) == 0
return wrap
return wrapper
def with_apks():
def wrapper(func):
@wraps(func)
def wrap():
update_apks()
pm_apply_packages('enable')
start_offroad()
func()
try:
for package in android_packages:
apk_is_running = (subprocess.call(["pidof", package]) == 0)
assert apk_is_running, package
finally:
pm_apply_packages('disable')
for package in android_packages:
apk_is_not_running = (subprocess.call(["pidof", package]) == 1)
assert apk_is_not_running, package
return wrap
return wrapper
| 28.602941
| 86
| 0.693059
|
ceba1096a1a5193dd72b090ff7a77ae03e2e2be9
| 1,650
|
py
|
Python
|
nova/virt/powervm/constants.py
|
bopopescu/OpenStack-DNRM-Nova
|
7354f378398850113ac93b511547ed05218dc770
|
[
"Apache-2.0"
] | 1
|
2021-04-08T10:13:03.000Z
|
2021-04-08T10:13:03.000Z
|
nova/virt/powervm/constants.py
|
bopopescu/OpenStack-DNRM-Nova
|
7354f378398850113ac93b511547ed05218dc770
|
[
"Apache-2.0"
] | 1
|
2018-01-19T07:50:49.000Z
|
2018-01-19T07:50:49.000Z
|
nova/virt/powervm/constants.py
|
bopopescu/OpenStack-DNRM-Nova
|
7354f378398850113ac93b511547ed05218dc770
|
[
"Apache-2.0"
] | 1
|
2020-07-24T09:07:58.000Z
|
2020-07-24T09:07:58.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.compute import power_state
POWERVM_NOSTATE = ''
POWERVM_RUNNING = 'Running'
POWERVM_STARTING = 'Starting'
POWERVM_SHUTDOWN = 'Not Activated'
POWERVM_SHUTTING_DOWN = 'Shutting Down'
POWERVM_ERROR = 'Error'
POWERVM_NOT_AVAILABLE = 'Not Available'
POWERVM_OPEN_FIRMWARE = 'Open Firmware'
POWERVM_POWER_STATE = {
POWERVM_NOSTATE: power_state.NOSTATE,
POWERVM_RUNNING: power_state.RUNNING,
POWERVM_STARTING: power_state.RUNNING,
POWERVM_SHUTDOWN: power_state.SHUTDOWN,
POWERVM_SHUTTING_DOWN: power_state.SHUTDOWN,
POWERVM_ERROR: power_state.CRASHED,
POWERVM_OPEN_FIRMWARE: power_state.CRASHED,
POWERVM_NOT_AVAILABLE: power_state.CRASHED
}
POWERVM_CPU_INFO = ('ppc64', 'powervm', '3940')
POWERVM_HYPERVISOR_TYPE = 'powervm'
POWERVM_HYPERVISOR_VERSION = '7.1'
POWERVM_SUPPORTED_INSTANCES = [('ppc64', 'powervm', 'hvm')]
POWERVM_MIN_ROOT_GB = 10
POWERVM_MIN_MEM = 512
POWERVM_MAX_MEM = 1024
POWERVM_MAX_CPUS = 1
POWERVM_MIN_CPUS = 1
POWERVM_CONNECTION_TIMEOUT = 60
| 33
| 78
| 0.765455
|
5da9c99efb383c6d0f20fb00340fe17aa6a13d4a
| 2,031
|
py
|
Python
|
realworld_benchmark/node_information.py
|
FedericoStazi/pna
|
86581e52f13bdce80a72c941076a7cb136089c9b
|
[
"MIT"
] | 1
|
2020-11-26T18:16:50.000Z
|
2020-11-26T18:16:50.000Z
|
realworld_benchmark/node_information.py
|
FedericoStazi/pna
|
86581e52f13bdce80a72c941076a7cb136089c9b
|
[
"MIT"
] | null | null | null |
realworld_benchmark/node_information.py
|
FedericoStazi/pna
|
86581e52f13bdce80a72c941076a7cb136089c9b
|
[
"MIT"
] | null | null | null |
import networkx
import random
import numpy
import scipy
import dgl
def normalize(l):
return (l - numpy.mean(l)) / (numpy.std(l) if numpy.std(l) > 1e-6 else 1)
def get_nodes_degree(graph):
return list(graph.in_degrees())
def get_nodes_closeness_centrality(graph):
return list(networkx.closeness_centrality(graph.to_networkx().to_undirected()).values())
def get_nodes_betweenness_centrality(graph):
return list(networkx.betweenness_centrality(graph.to_networkx().to_undirected()).values())
def get_nodes_pagerank(graph):
return list(networkx.algorithms.link_analysis.pagerank_alg.pagerank(graph.to_networkx().to_undirected()).values())
def get_nodes_triangles(graph):
return list(networkx.algorithms.cluster.triangles(graph.to_networkx().to_undirected()).values())
def get_nodes_random(graph):
return list([random.random() for _ in graph.nodes()])
def get_nodes_eigenvector(graph, k=1):
A = graph.adjacency_matrix_scipy(return_edge_ids=False).astype(float)
N = scipy.sparse.diags(dgl.backend.asnumpy(graph.in_degrees()).clip(1), dtype=float)
L = N * scipy.sparse.eye(graph.number_of_nodes()) - A
EigVal, EigVec = scipy.sparse.linalg.eigs(L, k+1, which='SR', tol=5e-1)
EigVec = EigVec[:, EigVal.argsort()]
return numpy.absolute(numpy.real(EigVec[:, -1]))
NODE_INFORMATION = {'degree' : get_nodes_degree, 'closeness_centrality' : get_nodes_closeness_centrality,
'betweenness_centrality' : get_nodes_betweenness_centrality, 'pagerank' : get_nodes_pagerank,
'triangles' : get_nodes_triangles, 'random' : get_nodes_random,
'eig1' : (lambda g : get_nodes_eigenvector(g, 1)),
'eig2' : (lambda g : get_nodes_eigenvector(g, 2)),
'eig3' : (lambda g : get_nodes_eigenvector(g, 3)),
'degree_normalized' : (lambda g : normalize(get_nodes_degree(g))),
'triangles_normalized' : (lambda g : normalize(get_nodes_triangles(g)))
}
| 44.152174
| 118
| 0.69227
|
d66792c15fb84a6215c306744121af81f2d15398
| 2,072
|
py
|
Python
|
src/bandersnatch_filter_plugins/latest_name.py
|
hugovk/bandersnatch
|
ce95eb0d64ba582194618edde6e16d55f0ec3249
|
[
"AFL-3.0"
] | null | null | null |
src/bandersnatch_filter_plugins/latest_name.py
|
hugovk/bandersnatch
|
ce95eb0d64ba582194618edde6e16d55f0ec3249
|
[
"AFL-3.0"
] | null | null | null |
src/bandersnatch_filter_plugins/latest_name.py
|
hugovk/bandersnatch
|
ce95eb0d64ba582194618edde6e16d55f0ec3249
|
[
"AFL-3.0"
] | null | null | null |
import logging
from operator import itemgetter
from typing import Dict, Sequence, Tuple, Union
from packaging.version import LegacyVersion, Version, parse
from bandersnatch.filter import FilterReleasePlugin
logger = logging.getLogger("bandersnatch")
class LatestReleaseFilter(FilterReleasePlugin):
"""
Plugin to download only latest releases
"""
name = "latest_release"
keep = 0 # by default, keep 'em all
latest: Sequence[str] = []
def initialize_plugin(self) -> None:
"""
Initialize the plugin reading patterns from the config.
"""
if self.keep:
return
try:
self.keep = int(self.configuration["latest_release"]["keep"])
except KeyError:
return
except ValueError:
return
if self.keep > 0:
logger.info(f"Initialized latest releases plugin with keep={self.keep}")
def filter(self, metadata: Dict) -> bool:
"""
Returns False if version fails the filter, i.e. is not a latest/current release
"""
if self.keep == 0:
return True
if not self.latest:
info = metadata["info"]
releases = metadata["releases"]
versions = list(releases.keys())
before = len(versions)
if before <= self.keep:
# not enough releases: do nothing
return True
versions_pair = map(lambda v: (parse(v), v), versions)
latest_sorted: Sequence[Tuple[Union[LegacyVersion, Version], str]] = sorted(
versions_pair
)[
-self.keep : # noqa: E203
]
self.latest = list(map(itemgetter(1), latest_sorted))
current_version = info.get("version")
if current_version and (current_version not in self.latest):
# never remove the stable/official version
self.latest[0] = current_version
version = metadata["version"]
return version in self.latest
| 30.028986
| 88
| 0.585425
|
ff21b80306e05676b292cdb27daafc15392c5c16
| 1,467
|
py
|
Python
|
cheritest/trunk/tests/cp2/test_cp2_clcr_otype.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 36
|
2015-05-29T16:47:19.000Z
|
2022-02-08T21:16:26.000Z
|
cheritest/trunk/tests/cp2/test_cp2_clcr_otype.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 1
|
2015-10-14T13:05:21.000Z
|
2015-10-19T20:34:03.000Z
|
cheritest/trunk/tests/cp2/test_cp2_clcr_otype.py
|
tupipa/beri
|
cef1b41d52592cfa7454ddf59f9f2994e447cd66
|
[
"Apache-2.0"
] | 15
|
2015-06-11T07:10:58.000Z
|
2021-06-18T05:14:54.000Z
|
#-
# Copyright (c) 2016 Michael Roe
# All rights reserved.
#
# This software was developed by the University of Cambridge Computer
# Laboratory as part of the Rigorous Engineering of Mainstream Systems (REMS)
# project, funded by EPSRC grant EP/K008528/1.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_cp2_clcr_otype(BaseBERITestCase):
@attr('cap256')
@attr('capabilities')
def test_cp2_clcr_otype_1(self):
self.assertRegisterEqual(self.MIPS.a0, 0xffffff, "CGetType of an all 1's untagged capability did not return the expected value")
| 39.648649
| 136
| 0.770279
|
413bc26df93e44b5af570d98dfedfbe0de18836c
| 346
|
py
|
Python
|
untitled/classes/eletronico.py
|
walterps/python_intermediario
|
f282ac4ad7fa34ead6f096797f0e90beb9ec326e
|
[
"MIT"
] | null | null | null |
untitled/classes/eletronico.py
|
walterps/python_intermediario
|
f282ac4ad7fa34ead6f096797f0e90beb9ec326e
|
[
"MIT"
] | null | null | null |
untitled/classes/eletronico.py
|
walterps/python_intermediario
|
f282ac4ad7fa34ead6f096797f0e90beb9ec326e
|
[
"MIT"
] | null | null | null |
class Eletronico:
def __init__(self, nome):
self._nome = nome
self._ligado = False
def ligar(self):
if self._ligado:
return
self._ligado = True
print(f'{self._nome} esta ligando ! !')
def desligar(self):
if self._ligado:
self._ligado = False
return
| 18.210526
| 47
| 0.534682
|
93fdb5e9e425d2438fb69dcc39d346449670f6a1
| 374
|
py
|
Python
|
libs/WebDriverUtil.py
|
bugbound/Angularrrgh
|
30b31e86cd6678c0619e1bbf4077c62edcbd02a7
|
[
"MIT"
] | null | null | null |
libs/WebDriverUtil.py
|
bugbound/Angularrrgh
|
30b31e86cd6678c0619e1bbf4077c62edcbd02a7
|
[
"MIT"
] | null | null | null |
libs/WebDriverUtil.py
|
bugbound/Angularrrgh
|
30b31e86cd6678c0619e1bbf4077c62edcbd02a7
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver import Chrome
class WebDriverUtil:
@staticmethod
def getWebDriver(proxy):
chrome_options = webdriver.ChromeOptions()
if(proxy):
chrome_options.add_argument("--proxy-server={0}".format(proxy))
driver = Chrome(chrome_options=chrome_options)
return driver
| 24.933333
| 75
| 0.679144
|
4abba340f0cd417866d78f2a24ad8f736149fdd4
| 367
|
py
|
Python
|
scripts/gen_key.py
|
ki-tools/sls_ki_synapse
|
8c726a9ec568e3d416049a8813c21bbe87740f16
|
[
"Apache-2.0"
] | 1
|
2018-11-21T19:54:34.000Z
|
2018-11-21T19:54:34.000Z
|
scripts/gen_key.py
|
pcstout/sls_ki_synapse
|
8c726a9ec568e3d416049a8813c21bbe87740f16
|
[
"Apache-2.0"
] | 5
|
2019-03-12T16:44:35.000Z
|
2019-03-15T21:46:00.000Z
|
scripts/gen_key.py
|
ki-tools/sls_ki_synapse
|
8c726a9ec568e3d416049a8813c21bbe87740f16
|
[
"Apache-2.0"
] | 2
|
2019-02-28T23:16:32.000Z
|
2019-03-05T22:16:39.000Z
|
#!/usr/bin/env python3
import secrets
import argparse
def main():
"""
Generates a random key.
"""
parser = argparse.ArgumentParser()
parser.add_argument('bytes', type=int, nargs='?', help='How many bytes to generate.', default=64)
args = parser.parse_args()
print(secrets.token_hex(args.bytes))
if __name__ == "__main__":
main()
| 18.35
| 101
| 0.651226
|
88584d3418494bb775487753c985969226144b4f
| 9,487
|
py
|
Python
|
dashboard/tests/tests_templatetags.py
|
svenvdmeer/babybuddy
|
d9485f0208b529c1e19ccfeb8a52e78c76615767
|
[
"BSD-2-Clause"
] | null | null | null |
dashboard/tests/tests_templatetags.py
|
svenvdmeer/babybuddy
|
d9485f0208b529c1e19ccfeb8a52e78c76615767
|
[
"BSD-2-Clause"
] | null | null | null |
dashboard/tests/tests_templatetags.py
|
svenvdmeer/babybuddy
|
d9485f0208b529c1e19ccfeb8a52e78c76615767
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import pytz
from django.contrib.auth.models import User
from django.test import TestCase
from django.utils import timezone
from babybuddy.models import Settings
from core import models
from dashboard.templatetags import cards
class MockUserRequest:
def __init__(self, user):
self.user = user
class TemplateTagsTestCase(TestCase):
fixtures = ['tests.json']
@classmethod
def setUpClass(cls):
super(TemplateTagsTestCase, cls).setUpClass()
cls.child = models.Child.objects.first()
cls.context = {'request': MockUserRequest(User.objects.first())}
# Ensure timezone matches the one defined by fixtures.
user_timezone = Settings.objects.first().timezone
timezone.activate(pytz.timezone(user_timezone))
# Test file data uses a basis date of 2017-11-18.
date = timezone.localtime().strptime('2017-11-18', '%Y-%m-%d')
cls.date = timezone.make_aware(date)
def test_hide_empty(self):
request = MockUserRequest(User.objects.first())
request.user.settings.dashboard_hide_empty = True
context = {'request': request}
hide_empty = cards._hide_empty(context)
self.assertTrue(hide_empty)
def test_card_diaperchange_last(self):
data = cards.card_diaperchange_last(self.context, self.child)
self.assertEqual(data['type'], 'diaperchange')
self.assertFalse(data['empty'])
self.assertFalse(data['hide_empty'])
self.assertIsInstance(data['change'], models.DiaperChange)
self.assertEqual(data['change'], models.DiaperChange.objects.first())
def test_card_diaperchange_types(self):
data = cards.card_diaperchange_types(
self.context,
self.child,
self.date)
self.assertEqual(data['type'], 'diaperchange')
stats = {
0: {'wet_pct': 50.0, 'solid_pct': 50.0, 'solid': 1, 'wet': 1},
1: {'wet_pct': 0.0, 'solid_pct': 100.0, 'solid': 2, 'wet': 0},
2: {'wet_pct': 100.0, 'solid_pct': 0.0, 'solid': 0, 'wet': 2},
3: {'wet_pct': 75.0, 'solid_pct': 25.0, 'solid': 1, 'wet': 3},
4: {'wet_pct': 100.0, 'solid_pct': 0.0, 'solid': 0, 'wet': 1},
5: {'wet_pct': 100.0, 'solid_pct': 0.0, 'solid': 0, 'wet': 2},
6: {'wet_pct': 100.0, 'solid_pct': 0.0, 'solid': 0, 'wet': 1}
}
self.assertEqual(data['stats'], stats)
def test_card_feeding_day(self):
data = cards.card_feeding_day(self.context, self.child, self.date)
self.assertEqual(data['type'], 'feeding')
self.assertFalse(data['empty'])
self.assertFalse(data['hide_empty'])
self.assertEqual(data['total'], 2.5)
self.assertEqual(data['count'], 3)
def test_card_feeding_last(self):
data = cards.card_feeding_last(self.context, self.child)
self.assertEqual(data['type'], 'feeding')
self.assertFalse(data['empty'])
self.assertFalse(data['hide_empty'])
self.assertIsInstance(data['feeding'], models.Feeding)
self.assertEqual(data['feeding'], models.Feeding.objects.first())
def test_card_feeding_last_method(self):
data = cards.card_feeding_last_method(self.context, self.child)
self.assertEqual(data['type'], 'feeding')
self.assertFalse(data['empty'])
self.assertFalse(data['hide_empty'])
self.assertEqual(len(data['feedings']), 3)
for feeding in data['feedings']:
self.assertIsInstance(feeding, models.Feeding)
self.assertEqual(
data['feedings'][2].method,
models.Feeding.objects.first().method)
def test_card_sleep_last(self):
data = cards.card_sleep_last(self.context, self.child)
self.assertEqual(data['type'], 'sleep')
self.assertFalse(data['empty'])
self.assertFalse(data['hide_empty'])
self.assertIsInstance(data['sleep'], models.Sleep)
self.assertEqual(data['sleep'], models.Sleep.objects.first())
def test_card_sleep_last_empty(self):
models.Sleep.objects.all().delete()
data = cards.card_sleep_last(self.context, self.child)
self.assertEqual(data['type'], 'sleep')
self.assertTrue(data['empty'])
self.assertFalse(data['hide_empty'])
def test_card_sleep_day(self):
data = cards.card_sleep_day(self.context, self.child, self.date)
self.assertEqual(data['type'], 'sleep')
self.assertFalse(data['empty'])
self.assertFalse(data['hide_empty'])
self.assertEqual(data['total'], timezone.timedelta(2, 7200))
self.assertEqual(data['count'], 4)
def test_card_sleep_naps_day(self):
data = cards.card_sleep_naps_day(self.context, self.child, self.date)
self.assertEqual(data['type'], 'sleep')
self.assertFalse(data['empty'])
self.assertFalse(data['hide_empty'])
self.assertEqual(data['total'], timezone.timedelta(0, 9000))
self.assertEqual(data['count'], 2)
def test_card_statistics(self):
data = cards.card_statistics(self.context, self.child)
stats = [
{
'title': 'Diaper change frequency',
'stat': timezone.timedelta(0, 44228, 571429),
'type': 'duration'
},
# Statistics date basis is not particularly strong to these feeding
# examples.
# TODO: Improve testing of feeding frequency statistics.
{
'type': 'duration',
'stat': 0.0,
'title': 'Feeding frequency (past 3 days)'
},
{
'type': 'duration',
'stat': 0.0,
'title': 'Feeding frequency (past 2 weeks)'},
{
'type': 'duration',
'stat': timezone.timedelta(0, 7200),
'title': 'Feeding frequency'
},
{
'title': 'Average nap duration',
'stat': timezone.timedelta(0, 4500),
'type': 'duration'
},
{
'title': 'Average naps per day',
'stat': 2.0,
'type': 'float'
},
{
'title': 'Average sleep duration',
'stat': timezone.timedelta(0, 6750),
'type': 'duration'
},
{
'title': 'Average awake duration',
'stat': timezone.timedelta(0, 19200),
'type': 'duration'
},
{
'title': 'Weight change per week',
'stat': 1.0, 'type':
'float'
}
]
self.assertEqual(data['stats'], stats)
self.assertFalse(data['empty'])
self.assertFalse(data['hide_empty'])
def test_card_timer_list(self):
user = User.objects.first()
child = models.Child.objects.first()
child_two = models.Child.objects.create(
first_name='Child',
last_name='Two',
birth_date=timezone.localdate()
)
timers = {
'no_child': models.Timer.objects.create(
user=user,
start=timezone.localtime() - timezone.timedelta(hours=3)
),
'child': models.Timer.objects.create(
user=user,
child=child,
start=timezone.localtime() - timezone.timedelta(hours=2)
),
'child_two': models.Timer.objects.create(
user=user,
child=child_two,
start=timezone.localtime() - timezone.timedelta(hours=1)
),
}
data = cards.card_timer_list(self.context)
self.assertIsInstance(data['instances'][0], models.Timer)
self.assertEqual(len(data['instances']), 3)
data = cards.card_timer_list(self.context, child)
self.assertIsInstance(data['instances'][0], models.Timer)
self.assertTrue(timers['no_child'] in data['instances'])
self.assertTrue(timers['child'] in data['instances'])
self.assertFalse(timers['child_two'] in data['instances'])
data = cards.card_timer_list(self.context, child_two)
self.assertIsInstance(data['instances'][0], models.Timer)
self.assertTrue(timers['no_child'] in data['instances'])
self.assertTrue(timers['child_two'] in data['instances'])
self.assertFalse(timers['child'] in data['instances'])
def test_card_tummytime_last(self):
data = cards.card_tummytime_last(self.context, self.child)
self.assertEqual(data['type'], 'tummytime')
self.assertFalse(data['empty'])
self.assertFalse(data['hide_empty'])
self.assertIsInstance(data['tummytime'], models.TummyTime)
self.assertEqual(data['tummytime'], models.TummyTime.objects.first())
def test_card_tummytime_day(self):
data = cards.card_tummytime_day(self.context, self.child, self.date)
self.assertEqual(data['type'], 'tummytime')
self.assertFalse(data['empty'])
self.assertFalse(data['hide_empty'])
self.assertIsInstance(data['instances'].first(), models.TummyTime)
self.assertIsInstance(data['last'], models.TummyTime)
stats = {'count': 3, 'total': timezone.timedelta(0, 300)}
self.assertEqual(data['stats'], stats)
| 39.529167
| 79
| 0.586803
|
544a1d84239db962772d50db6ae0a32024b5d77d
| 50,440
|
py
|
Python
|
core_orthogonal_archived/sdfrenderer/renderer.py
|
trisct/DIST-Renderer
|
18e8494f07bfa6487710afacda563a899d74e5d2
|
[
"MIT"
] | null | null | null |
core_orthogonal_archived/sdfrenderer/renderer.py
|
trisct/DIST-Renderer
|
18e8494f07bfa6487710afacda563a899d74e5d2
|
[
"MIT"
] | null | null | null |
core_orthogonal_archived/sdfrenderer/renderer.py
|
trisct/DIST-Renderer
|
18e8494f07bfa6487710afacda563a899d74e5d2
|
[
"MIT"
] | null | null | null |
import os, sys
import torch
import torch.nn as nn
import numpy as np
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
from core.utils.decoder_utils import decode_sdf, decode_sdf_gradient
from core.visualize.profiler import Profiler
from core.utils.render_utils import depth2normal
import copy
import time
class SDFRenderer(object):
def __init__(self, decoder, intrinsic, img_hw=None, transform_matrix=None, march_step=50, buffer_size=5, ray_marching_ratio=1.5, use_depth2normal=False, max_sample_dist=0.2, radius=1.0, threshold=5e-5, scale_list=[4, 2, 1], march_step_list=[3, 3, -1], use_gpu=True, is_eval=True):
self.decoder = decoder
print(f'[In renderer] Setting device to {next(self.decoder.parameters()).device}')
self.device = next(self.decoder.parameters()).device
if is_eval:
self.decoder.eval()
self.march_step = march_step
self.buffer_size = buffer_size
self.max_sample_dist = max_sample_dist
self.ray_marching_ratio = ray_marching_ratio
self.use_depth2normal=use_depth2normal
self.radius = radius
self.threshold = threshold
self.scale_list = scale_list
self.march_step_list = march_step_list
if type(intrinsic) == torch.Tensor:
intrinsic = intrinsic.detach().cpu().numpy()
self.intrinsic = intrinsic
if img_hw is None:
img_h, img_w = int(intrinsic[1,2] * 2), int(intrinsic[0,2] * 2) # this tells us that intrinsic matrix is 2-dimensional and [1,2], [0,2] are respectively cy, cx
self.img_hw = (img_h, img_w)
else:
self.img_hw = img_hw
self.homo_2d = self.init_grid_homo_2d(self.img_hw)
self.K, self.K_inv = self.init_intrinsic(intrinsic)
print(f'[In renderer] showing candidate devices at line 40...')
print(f'[In renderer] | K_inv = {self.K_inv.device}')
print(f'[In renderer] | homo_2d = {self.homo_2d.device}')
self.homo_calib = torch.matmul(self.K_inv, self.homo_2d) # (3, H*W)
self.homo_calib.requires_grad=False
self.imgmap_init = self.init_imgmap(self.img_hw)
self.imgmap_init.requires_grad=False
if transform_matrix is None:
self.transform_matrix = np.array([[1., 0., 0.], [0., 0., -1.], [0., 1., 0.]])
else:
self.transform_matrix = transform_matrix
self.transform_matrix = torch.from_numpy(self.transform_matrix).float()
if use_gpu:
if torch.cuda.device_count() == 0:
raise ValueError('No GPU device found.')
self.homo_2d = self.homo_2d.to(self.device)
self.homo_calib = self.homo_calib.to(self.device) # (3, H*W)
self.imgmap_init = self.imgmap_init.to(self.device) # (H*W)
self.transform_matrix = self.transform_matrix.to(self.device) # (3,3)
self.K, self.K_inv = self.K.to(self.device), self.K_inv.to(self.device)
self.calib_map = self.normalize_vectors(self.homo_calib)[2,:]
def get_intrinsic(self):
return self.intrinsic
def get_threshold(self):
return self.threshold
def get_img_hw(self):
return self.img_hw
def visualize_calib_map(self, fname='calib_map_vis.png'):
import cv2
data = self.calib_map.detach().cpu().numpy()
min_, max_ = data.min(), data.max()
data = (data - min_) / (max_ - min_)
data = (data * 255.0).reshape(self.img_hw[0], self.img_hw[1]).astype(np.uint8)
cv2.imwrite(fname, data)
def apply_3Dsim(self, points,sim3_mtrx,inv=False):
sR,t = sim3_mtrx[:,:3],sim3_mtrx[:,3]
points = (points-t)@sR.inverse().t() if inv else \
points@sR.t()+t
return points
def transform_points(self, points):
'''
transformation for point coordinates.
Input:
- points type: torch.Tensor (3, H*W)
Return:
- points_new type: torch.Tensor (3, H*W)
'''
if self.transform_matrix.shape[1] == 4:
# sR, t = self.transform_matrix[:,:3], self.transform_matrix[:,3]
# points_new = sR @ points + t[:, None]
points_new = self.apply_3Dsim(points.t(), self.transform_matrix).t()
else:
points_new = torch.matmul(self.transform_matrix, points)
return points_new
def inv_transform_points(self, points):
'''
inverse transformation for point coordinates.
Input:
- points type: torch.Tensor (3, H*W)
Return:
- points_new type: torch.Tensor (3, H*W)
'''
if self.transform_matrix.shape[1] == 4:
# sR, t = self.transform_matrix[:,:3], self.transform_matrix[:,3]
# points_new = sR.inverse() @ (points-t[:, None])
#pdb.set_trace()
#points = np.array([0.419, 1.837, 2.495])
#points = torch.from_numpy(points)
#points = points[:, None]
pdb.set_trace()
points_new = self.apply_3Dsim(points.t(), self.transform_matrix, inv=True).t()
else:
points_new = torch.matmul(self.transform_matrix.transpose(1,0), points)
return points_new
def get_meshgrid(self, img_hw):
'''
To get meshgrid:
Input:
- img_hw (h, w)
Return:
- grid_map type: torch.Tensor (H, W, 2)
'''
h, w = img_hw
Y, X = torch.meshgrid(torch.arange(0, h), torch.arange(0, w))
grid_map = torch.cat([X[:,:,None], Y[:,:,None]], 2) # (h, w, 2)
grid_map = grid_map.float()
return grid_map.to(self.device)
def get_homo_2d_from_xy(self, xy):
'''
get homo 2d from xy
Input:
- xy type: torch.Tensor (H, W, 2)
Return:
- homo type: torch.Tensor (H, W, 3)
'''
H, W = xy.shape[0], xy.shape[1]
homo_ones = torch.ones(H, W, 1)
if xy.get_device() != -1:
homo_ones = homo_ones.to(xy.device)
homo_2d = torch.cat([xy, homo_ones], 2)
return homo_2d
def get_homo_2d(self, img_hw):
xy = self.get_meshgrid(img_hw)
homo_2d = self.get_homo_2d_from_xy(xy)
return homo_2d
def init_grid_homo_2d(self, img_hw):
homo_2d = self.get_homo_2d(img_hw)
homo_2d = homo_2d.reshape(-1, 3).transpose(1,0) # (3, H*W)
return homo_2d
def init_intrinsic(self, intrinsic):
K = torch.from_numpy(intrinsic).float().to(self.device)
K_inv = torch.from_numpy(np.linalg.inv(intrinsic)).float().to(self.device)
return K, K_inv
def init_imgmap(self, img_hw):
h, w = img_hw
imgmap_init = torch.zeros(h, w)
return imgmap_init
def normalize_vectors(self, x):
'''
normalize the vector by the first dim
'''
norm = torch.norm(x, p=2, dim=0).expand_as(x)
eps = 1e-12
x = x.div(norm + eps)
return x
def get_camera_location(self, R, T):
'''
Input:
- R type: torch.Tensor (3,3)
- T type: torch.Tensor (3)
'''
pos = torch.matmul(-R.transpose(1,0), T[:,None]) # (3,1)
pos = pos.squeeze(1) # (3)
return pos
def get_camera_rays(self, R, homo=None):
'''
Input:
- R type: torch.Tensor (3,3)
- T type: torch.Tensor (3)
'''
if homo is None:
homo = self.homo_calib
rays = torch.matmul(R.transpose(1,0), homo) # (3, H*W)
rays = self.normalize_vectors(rays) # (3, H*W)
return rays
def generate_point_samples(self, cam_pos, cam_rays, Zdepth, inv_transform=True, has_zdepth_grad=False):
'''
Input:
- cam_pos type torch.Tensor (3)
- cam_ays type torch.Tensor (3, N)
- Zdepth type torch.Tensor (N)
Return:
- points type torch.Tensor (3, N)
'''
if not has_zdepth_grad:
Zdepth = Zdepth.detach()
N = Zdepth.shape[0]
if N == 0:
raise ValueError('No valid depth.')
cam_pos_pad = cam_pos[:,None].repeat(1,N) # (3, N)
Zdepth_pad = Zdepth[None,:].repeat(3,1) # (3, N)
points = cam_rays * Zdepth_pad + cam_pos_pad # (3, N)
if inv_transform:
points = self.inv_transform_points(points)
if not points.requires_grad:
points.requires_grad=True
return points
def get_distance_from_origin(self, cam_pos, cam_rays):
'''
get_distance_from_origin
Input:
- cam_pos type torch.FloatTensor (3)
- cam_rays type torch.FloatTensor (3, H*W)
'''
N = cam_rays.shape[1]
cam_pos_pad = cam_pos[:,None].expand_as(cam_rays) # (3, N)
p, q = cam_pos_pad, cam_rays # (3, N), (3, N)
ptq = (p * q).sum(0) # (N)
dist = p - ptq[None,:].repeat(3,1) * q # (3, N)
dist = torch.norm(dist, p=2, dim=0) # (N)
return dist
def get_maxbound_zdepth_from_dist(self, dist):
'''
Input:
- dist type torch.FloatTensor (N)
'''
with torch.no_grad():
value = self.radius ** 2 - dist ** 2
valid_mask = (value >= 0)
maxbound_zdepth = torch.zeros_like(dist)
maxbound_zdepth[valid_mask] = 2 * torch.sqrt(value[valid_mask])
return maxbound_zdepth
def get_intersections_with_unit_spheres(self, cam_pos, cam_rays):
'''
get_intersections_with_unit_sphere
Input:
- cam_pos type torch.FloatTensor (3)
- cam_rays type torch.FloatTensor (3, H*W)
'''
with torch.no_grad():
dist = self.get_distance_from_origin(cam_pos, cam_rays)
valid_mask = (dist <= self.radius)
maxbound_marching_zdepth = self.get_maxbound_zdepth_from_dist(dist) # (H*W)
cam_pos_dist = torch.sqrt((cam_pos ** 2).sum())
if torch.nonzero((cam_pos_dist < self.radius).unsqueeze(0)).shape[0] != 0:
init_zdepth = torch.zeros_like(dist)
else:
init_zdepth_valid = torch.sqrt(cam_pos_dist ** 2 - dist[valid_mask] ** 2) - maxbound_marching_zdepth[valid_mask] / 2.0 # (N)
init_zdepth = torch.ones_like(dist) * init_zdepth_valid.max() # (H*W)
init_zdepth = self.copy_index(init_zdepth, valid_mask, init_zdepth_valid)
return init_zdepth, valid_mask
def get_maxbound_zdepth(self, cam_pos, valid_cam_rays):
with torch.no_grad():
init_zdepth, _ = self.get_intersections_with_unit_spheres(cam_pos, valid_cam_rays) # (N)
dist = self.get_distance_from_origin(cam_pos, valid_cam_rays) # (N)
maxbound_marching_zdepth = self.get_maxbound_zdepth_from_dist(dist) # (N)
max_zdepth = init_zdepth + maxbound_marching_zdepth # (N)
return max_zdepth
def copy_index(self, inputs, mask, src):
'''
out-of-place copy index.
Input:
- inputs: torch.Tensor (H*W) / (H, W) / (H, W, k)
- mask: torch.Tensor (H*W)
- src: torch.Tensor (N) / (N, k)
'''
inputs_shape = inputs.shape
if len(inputs_shape) <= 2:
inputs, mask = inputs.reshape(-1), mask.reshape(-1)
elif len(inputs_shape) == 3:
inputs, mask = inputs.reshape(-1, inputs_shape[-1]), mask.reshape(-1)
else:
raise NotImplementedError
index = torch.nonzero(mask).reshape(-1).long()
outputs = inputs.index_copy(0, index, src)
outputs = outputs.reshape(inputs_shape)
return outputs
def get_index_from_sdf_list(self, sdf_list, index_size, index_type='min', clamp_dist=0.1):
'''
get index with certain method.
Input:
- sdf_list: type: torch.Tensor (self.march_step, N)
Return:
- sdf: type: torch.Tensor (N, index_size)
- index: type: torch.Tensor (N, index_size). Note: the first dimension (index[0]) is always the min index.
'''
if index_type == 'min':
sdf, index = torch.topk(-sdf_list.transpose(1,0), index_size, dim=1)
sdf = -sdf
elif index_type == 'min_abs':
sdf_list_new = torch.abs(sdf_list)
_, index = torch.topk(-sdf_list_new.transpose(1,0), index_size, dim=1)
sdf = self.collect_data_from_index(sdf_list, index)
elif index_type == 'max_neg':
sdf_list_new = sdf_list.clone()
sdf_list_pos = (sdf_list_new >= 0)
sdf_list_new[sdf_list_pos] = sdf_list_new[sdf_list_pos].clone() * (-1) - 2
sdf, index = torch.topk(sdf_list_new.transpose(1,0), index_size, dim=1) # (N, index_size)
sdf_pos = (sdf <= -2)
sdf[sdf_pos] = sdf[sdf_pos].clone() * (-1) - 2
elif index_type == 'last_valid':
march_step, N = sdf_list.shape[0], sdf_list.shape[1]
valid = (torch.abs(sdf_list) < clamp_dist)
idx_list = torch.arange(0, march_step)[:,None].repeat(1,N).to(sdf_list.device)
idx_list = idx_list.float() * valid.float()
_, index = torch.topk(idx_list.transpose(1,0), index_size, dim=1) # (N, index_size)
sdf = self.collect_data_from_index(sdf_list, index)[0].transpose(1,0)
elif index_type == 'last':
march_step, N = sdf_list.shape[0], sdf_list.shape[1]
sdf = sdf_list[-index_size:, :].transpose(1,0)
index = torch.arange(march_step - index_size, march_step)[None,:].repeat(N, 1)
index = index.to(sdf.device)
else:
raise NotImplementedError
return sdf, index
def collect_data_from_index(self, data, index):
'''
Input:
- data: type: torch.Tensor (self.march_step, N) / (self.march_step, N, k)
- index: type: torch.Tensor (N, index_size)
Return:
- data_sampled: type: torch.Tensor (index_size, N) / (index_size, N, k)
'''
index_size = index.shape[1]
count_index = torch.arange(index.shape[0]).repeat(index_size).to(index.device)
point_index = index.transpose(1,0).reshape(-1) * data.shape[1] + count_index
if len(data.shape) == 3:
data_shape = data.shape
data_sampled = data.reshape(-1, data_shape[-1])[point_index].reshape(index_size, -1, data_shape[-1]).clone() # (index_size, N, 3)
elif len(data.shape) == 2:
data_sampled = data.reshape(-1)[point_index].reshape(index_size, -1).clone() # (index_size, N)
else:
raise NotImplementedError
return data_sampled
def sample_points_uniform(self, points, cam_rays, num_samples=None):
'''
Input:
points: type: torch.Tensor (N, 3)
cam_rays: type: torch.Tensor (3, N)
Return:
points_sampled: type: torch.Tensor (num_samples, N, 3)
'''
if num_samples == None:
num_samples = self.buffer_size
N = points.shape[0]
points = points[None,:,:].repeat(num_samples, 1, 1) # (num_samples, N, 3)
cam_rays = cam_rays.transpose(1, 0)[None,:,:].repeat(num_samples, 1, 1) # (num_samples, N, 3)
delta_depth = torch.linspace(0, -self.max_sample_dist, num_samples).to(points.device) # (num_samples)
delta_depth = delta_depth[:,None,None].repeat(1, N, 3) # (num_samples, N, 3)
points_sampled = delta_depth * cam_rays + points # (num_smaples, N, 3)
return points_sampled
def get_min_sdf_sample(self, sdf_list, points_list, latent, index_type='min_abs', clamp_dist=0.1, profile=False, no_grad=False):
profiler = Profiler(silent = not profile)
_, index = self.get_index_from_sdf_list(sdf_list, 1, index_type=index_type)
points = self.collect_data_from_index(points_list, index)[0] # (N, 3)
min_sdf_sample = decode_sdf(self.decoder, latent, points, clamp_dist=None, no_grad=no_grad).squeeze(-1)
profiler.report_process('[DEPTH] [SAMPLING] sample min sdf time\t')
if no_grad:
min_sdf_sample = min_sdf_sample.detach()
return min_sdf_sample
def get_sample_on_marching_zdepth_along_ray(self, marching_zdepth_list, sdf_list, points_list, cam_rays, latent, index_type='min_abs', use_uniform_sample=False, clamp_dist=0.1, profile=False, no_grad=False):
# initialization
profiler = Profiler(silent = not profile)
# collect points
if use_uniform_sample:
sdf_selected, index_selected = self.get_index_from_sdf_list(sdf_list, 1, index_type=index_type, clamp_dist=clamp_dist)
points = self.collect_data_from_index(points_list, index_selected)[0] # (N, 3)
points_sampled = self.sample_points_uniform(points, cam_rays)
else:
sdf_selected, index_selected = self.get_index_from_sdf_list(sdf_list, self.buffer_size, index_type=index_type, clamp_dist=clamp_dist)
points_sampled = self.collect_data_from_index(points_list, index_selected)
profiler.report_process('[DEPTH] [SAMPLING] collect points time\t')
# generate new marching zdepth
marching_zdepth = self.collect_data_from_index(marching_zdepth_list, index_selected[:,[0]])[0] # (N)
marching_zdepth = marching_zdepth + (1 - self.ray_marching_ratio) * torch.clamp(sdf_selected[0,:], -clamp_dist, clamp_dist) # (N)
if no_grad:
marching_zdepth_final = marching_zdepth
else:
marching_zdepth_new = marching_zdepth
for i in range(self.buffer_size):
sdf = decode_sdf(self.decoder, latent, points_sampled[i], clamp_dist=clamp_dist, no_grad=no_grad).squeeze(-1)
marching_zdepth_new = marching_zdepth_new - sdf.detach() * self.ray_marching_ratio
marching_zdepth_new = marching_zdepth_new + sdf * self.ray_marching_ratio
profiler.report_process('[DEPTH] [SAMPLING] re-ray marching time')
marching_zdepth_final = marching_zdepth_new
return marching_zdepth_final
def ray_marching_trivial_non_parallel(self, cam_pos, cam_rays, init_zdepth, valid_mask, latent, march_step=None, clamp_dist=0.1, no_grad=False, use_transform=True):
valid_cam_rays = cam_rays[:, valid_mask]
init_zdepth = init_zdepth[valid_mask]
if march_step is None:
march_step = self.march_step
marching_zdepth = torch.zeros_like(init_zdepth)
marching_zdepth_list, sdf_list, points_list = [], [], []
for j in range(valid_cam_rays.shape[1]):
marching_zdepth_list_per_ray, sdf_list_per_ray, points_list_per_ray = [], [], []
marching_zdepth_per_ray = marching_zdepth[[j]]
for i in range(march_step):
# get corresponding sdf value
points = self.generate_point_samples(cam_pos, valid_cam_rays[:,[j]], init_zdepth[[j]] + marching_zdepth_per_ray, inv_transform=use_transform)
sdf = decode_sdf(self.decoder, latent, points.transpose(1,0), clamp_dist=None, no_grad=no_grad).squeeze(-1)
points_list_per_ray.append(points.transpose(1,0)[None,:])
# clamp sdf from below if the flag is invalid, which means that it has not meet any sdf < 0
sdf = sdf.detach()
sdf_list_per_ray.append(sdf[None,:])
sdf_marching = torch.clamp(sdf, -clamp_dist, clamp_dist)
# aggressive ray marching
marching_zdepth_per_ray = marching_zdepth_per_ray + sdf_marching * self.ray_marching_ratio
marching_zdepth_list_per_ray.append(marching_zdepth_per_ray[None,:])
# concat ray marching info
marching_zdepth_list_per_ray = torch.cat(marching_zdepth_list_per_ray, 0) # (self.march_step, N)
sdf_list_per_ray = torch.cat(sdf_list_per_ray, 0)
points_list_per_ray = torch.cat(points_list_per_ray, 0)
marching_zdepth_list.append(marching_zdepth_list_per_ray)
sdf_list.append(sdf_list_per_ray)
points_list.append(points_list_per_ray)
marching_zdepth_list = torch.cat(marching_zdepth_list, 1)
sdf_list = torch.cat(sdf_list, 1)
points_list = torch.cat(points_list, 1)
# get valid mask
maxbound_zdepth = self.get_maxbound_zdepth(cam_pos, valid_cam_rays)
valid_mask_max_marching_zdepth = (marching_zdepth_list[-1] + init_zdepth < maxbound_zdepth)
min_sdf, _ = torch.abs(sdf_list).min(0)
valid_mask_ray_marching = (min_sdf <= self.threshold)
# get corner case: the first query is lower than threshold.
valid_mask_first_query = sdf_list[0] > self.threshold
valid_mask_render = valid_mask_max_marching_zdepth & valid_mask_ray_marching & valid_mask_first_query # (N)
return sdf_list, marching_zdepth_list, points_list, valid_mask_render
def ray_marching_trivial(self, cam_pos, cam_rays, init_zdepth, valid_mask, latent, march_step=None, clamp_dist=0.1, no_grad=False, use_transform=True):
valid_cam_rays = cam_rays[:, valid_mask]
init_zdepth = init_zdepth[valid_mask]
if march_step is None:
march_step = self.march_step
marching_zdepth = torch.zeros_like(init_zdepth)
marching_zdepth_list, sdf_list, points_list = [], [], []
for i in range(march_step):
# get corresponding sdf value
points = self.generate_point_samples(cam_pos, valid_cam_rays, init_zdepth + marching_zdepth, inv_transform=use_transform)
sdf = decode_sdf(self.decoder, latent, points.transpose(1,0), clamp_dist=None, no_grad=no_grad).squeeze(-1)
points_list.append(points.transpose(1,0)[None,:])
# clamp sdf from below if the flag is invalid, which means that it has not meet any sdf < 0
sdf = sdf.detach()
sdf_list.append(sdf[None,:])
sdf_marching = torch.clamp(sdf, -clamp_dist, clamp_dist)
# aggressive ray marching
marching_zdepth = marching_zdepth + sdf_marching * self.ray_marching_ratio
marching_zdepth_list.append(marching_zdepth[None,:])
# concat ray marching info
marching_zdepth_list = torch.cat(marching_zdepth_list, 0) # (self.march_step, N)
sdf_list = torch.cat(sdf_list, 0)
points_list = torch.cat(points_list, 0)
# get valid mask
maxbound_zdepth = self.get_maxbound_zdepth(cam_pos, valid_cam_rays)
valid_mask_max_marching_zdepth = (marching_zdepth_list[-1] + init_zdepth < maxbound_zdepth)
min_sdf, _ = torch.abs(sdf_list).min(0)
valid_mask_ray_marching = (min_sdf <= self.threshold)
# get corner case: the first query is lower than threshold.
valid_mask_first_query = sdf_list[0] > self.threshold
valid_mask_render = valid_mask_max_marching_zdepth & valid_mask_ray_marching & valid_mask_first_query # (N)
return sdf_list, marching_zdepth_list, points_list, valid_mask_render
def ray_marching_recursive(self, cam_pos, cam_rays, init_zdepth, valid_mask, latent, march_step=None, stop_threshold=None, clamp_dist=0.1, no_grad=False, use_transform=True, use_first_query_check=True):
if stop_threshold is None:
stop_threshold = self.threshold
valid_cam_rays = cam_rays[:, valid_mask]
init_zdepth = init_zdepth[valid_mask]
if march_step is None:
march_step = self.march_step
maxbound_zdepth = self.get_maxbound_zdepth(cam_pos, valid_cam_rays)
marching_zdepth_list, sdf_list, points_list = [], [], []
marching_zdepth = torch.zeros_like(init_zdepth)
valid_mask_max_marching_zdepth = (marching_zdepth + init_zdepth < maxbound_zdepth)
unfinished_mask = valid_mask_max_marching_zdepth # (N)
for i in range(march_step):
# get unfinished
cam_rays_now = valid_cam_rays[:, unfinished_mask] # (3, K)
init_zdepth_now = init_zdepth[unfinished_mask] # (K)
marching_zdepth_now = marching_zdepth[unfinished_mask] # (K)
# get corresponding sdf value
points_now = self.generate_point_samples(cam_pos, cam_rays_now, init_zdepth_now + marching_zdepth_now, inv_transform=use_transform) # (3, K)
if no_grad:
points_now = points_now.detach()
sdf_now = decode_sdf(self.decoder, latent, points_now.transpose(1,0), clamp_dist=None, no_grad=no_grad).squeeze(-1) # (K)
points = torch.zeros_like(marching_zdepth)[:,None].repeat(1,3)
points[unfinished_mask,:] = points_now.transpose(1,0)
if no_grad:
points = points.detach()
points_list.append(points[None,:])
# clamp sdf from below if the flag is invalid, which means that it has not meet any sdf < 0
sdf = torch.zeros_like(marching_zdepth)
sdf[unfinished_mask] = sdf_now.detach()
sdf_marching = torch.clamp(sdf, -clamp_dist, clamp_dist)
# aggressive ray marching
marching_zdepth = marching_zdepth + sdf_marching * self.ray_marching_ratio
marching_zdepth_list.append(marching_zdepth[None,:])
# update sdf list
sdf[~unfinished_mask] = 1.0
sdf_list.append(sdf[None,:])
# update unfinised mask
valid_mask_max_marching_zdepth = (marching_zdepth + init_zdepth < maxbound_zdepth)
unstop_mask = torch.abs(sdf) >= stop_threshold
unfinished_mask = unfinished_mask & valid_mask_max_marching_zdepth & unstop_mask
if torch.nonzero(unfinished_mask).shape[0] == 0:
while(len(marching_zdepth_list) < self.buffer_size):
marching_zdepth_list.append(marching_zdepth[None,:])
sdf_list.append(sdf[None,:])
points_list.append(points[None,:])
break
# concat ray marching info
marching_zdepth_list = torch.cat(marching_zdepth_list, 0) # (self.march_step, N)
sdf_list = torch.cat(sdf_list, 0)
points_list = torch.cat(points_list, 0)
# get valid mask
valid_mask_max_marching_zdepth = (marching_zdepth_list[-1] + init_zdepth < maxbound_zdepth)
min_sdf, _ = torch.abs(sdf_list).min(0)
valid_mask_ray_marching = (min_sdf <= self.threshold)
# get corner case: the first query is lower than threshold.
valid_mask_render = valid_mask_max_marching_zdepth & valid_mask_ray_marching # (N)
if use_first_query_check:
valid_mask_first_query = sdf_list[0] > self.threshold
valid_mask_render = valid_mask_render & valid_mask_first_query
return sdf_list, marching_zdepth_list, points_list, valid_mask_render
def index_sample(self, basemap, indexmap):
'''
To use indexmap to index basemap.
Inputs:
- basemap type: torch.Tensor (H', W', C)
- indexmap type: torch.Tensor (H, W, 2)
Returns:
- newmap type: torch.Tensor (H, W, C)
'''
h, w, c = basemap.shape[0], basemap.shape[1], basemap.shape[2]
h_index, w_index = indexmap.shape[0], indexmap.shape[1]
index = indexmap.reshape(-1, 2)
index = (index[:,0] + index[:,1] * w).type(torch.long)
newmap = basemap.reshape(-1, c)[index]
newmap = newmap.reshape(h_index, w_index, c)
return newmap
def get_downscaled_grid_map(self, grid_map, scale=2.0):
'''
Inputs:
- grid_map type: torch.Tensor (H, W, 2)
Returns:
- new_grid_map type: torch.Tensor (H/scale, W/scale, 2)
- index_map type: torch.Tensor (H, W, 2)
'''
h, w = grid_map.shape[0], grid_map.shape[1]
stride = grid_map[0,1,0] - grid_map[0,0,0]
new_h, new_w = np.ceil(h / scale), np.ceil(w / scale)
new_grid_map = self.get_meshgrid((new_h, new_w))
print(f'[In renderer] showing candidate devices at line 618...')
#print(f'[In renderer] | scale = {scale.device}')
print(f'[In renderer] | stride = {stride.device}')
print(f'[In renderer] | new_grid_map = {new_grid_map.device}')
new_grid_map = (scale * stride) * new_grid_map + ((scale * stride) - 1) / 2
new_grid_map = new_grid_map.to(grid_map.device)
if stride == 1:
grid_map_meshgrid = grid_map
else:
grid_map_meshgrid = self.get_meshgrid((h,w))
grid_map_meshgrid = grid_map_meshgrid.to(grid_map.device)
index_map = torch.ceil((grid_map_meshgrid + 1) / scale) - 1
if (index_map[:,:,0] + index_map[:,:,1] * new_grid_map.shape[1]).max().detach().cpu().numpy() > new_grid_map.reshape(-1,2).shape[0]:
raise ValueError('Error! Index map out of bound.')
return new_grid_map, index_map
def get_rays_from_grid_map(self, grid_map, R):
homo_2d = self.get_homo_2d_from_xy(grid_map) # (H', W', 3)
homo_calib = torch.matmul(self.K_inv, homo_2d.reshape(-1,3).transpose(1,0)) # (3, H'*W')
rays = self.get_camera_rays(R, homo=homo_calib)
calib_map = self.normalize_vectors(homo_calib)[2,:]
return rays, calib_map
def get_downscaled_camera_rays(self, grid_map, R, scale=2.0):
'''
To get downscaled camera rays along with related infos
Inputs:
- grid_map type: torch.Tensor (H, W, 2)
- R type: torch.Tensor (3, 3)
Returns:
- new_grid_map type: torch.Tensor (H', W', 2)
- new_rays type: torch.Tensor (3, H'*W')
- index_map type: torch.Tensor (H*W, 2) (easy index-based upsampling is available simply with new_rays[:, index_map])
- recalib_map type: torch.Tensor (H*W)
'''
# get downsampled grid map and corresponding index map
new_grid_map, index_map = self.get_downscaled_grid_map(grid_map, scale=scale)
new_img_hw = new_grid_map.shape[:2]
# get downsampled camera rays
new_rays, new_calib_map = self.get_rays_from_grid_map(new_grid_map, R)
# get corresponding index
h, w = new_grid_map.shape[0], new_grid_map.shape[1]
index_map = index_map.reshape(-1, 2)
index_map = (index_map[:,0] + index_map[:,1] * w).type(torch.long)
# upsample downsampled camera rays and compute angles
rays, calib_map = self.get_rays_from_grid_map(grid_map, R)
new_calib_map_upsampled = new_calib_map[index_map]
recalib_map = new_calib_map_upsampled / calib_map
return new_grid_map, new_rays, index_map, recalib_map
def maxpool_valid_mask_with_index(self, valid_mask, index_map):
'''
to max pooling a binary mask (0/1 float tensor)
Inputs:
- valid_mask: type: torch.Tensor (H*W)
- index_map: type: torch.Tensor (H*W) with max entries (H'*W' - 1)
Returns:
- new_valid_mask: type: torch.Tensor (H'*W')
'''
from torch_scatter import scatter_max
print(f'[In renderer.maxpool_valid_mask_with_index] valid_mask: {valid_mask.dtype}, index_map: {index_map.dtype}')
with torch.no_grad():
new_valid_mask, _ = scatter_max(valid_mask.int(), index_map)
return new_valid_mask.bool()
def upsample_zdepth_and_recalib(self, zdepth_lowres, index_map, recalib_map):
zdepth_highres = zdepth_lowres[:, index_map]
zdepth_highres = zdepth_highres * recalib_map
return zdepth_highres
def unmap_tensor_with_valid_mask(self, tensor, valid_mask, fill_value=0.):
'''
Inputs:
- tensor: type: torch.Tensor (C, N) / (C, N, 3)
- valid_mask: type: torch.Tensor (H*W) with N valid entries
Returns:
- output: type: torch.Tensor (C, H*W) / (C, H*W, 3)
'''
C, N = tensor.shape[0], tensor.shape[1]
N_new = valid_mask.shape[0]
if len(tensor.shape) == 2:
if fill_value == 0:
output = torch.zeros(C, N_new).to(tensor.device)
else:
output = (torch.ones(C, N_new) * fill_value).to(tensor.device)
output[:, valid_mask] = tensor
else:
M = tensor.shape[2]
if fill_value == 0:
output = torch.zeros(C, N_new, M).to(tensor.device)
else:
output = (torch.ones(C, N_new, M) * fill_value).to(tensor.device)
output[:, valid_mask, :] = tensor
return output
def ray_marching_pyramid_recursive(self, cam_pos, R, valid_mask, latent, scale_list=None, march_step_list=None, march_step=None, stop_threshold=None, clamp_dist=0.1, no_grad=False, use_transform=True, split_type='raydepth'):
if stop_threshold is None:
stop_threshold = self.threshold
# initialization
if march_step is None:
march_step = self.march_step
if scale_list is None:
scale_list = copy.deepcopy(self.scale_list)
if march_step_list is None:
march_step_list = copy.deepcopy(self.march_step_list)
if march_step_list[-1] == -1:
march_step_list[-1] = march_step - sum(march_step_list[:-1])
assert (scale_list[-1] == 1)
# get pyramid rays, index maps, recalib maps and downscaled valid masks.
grid_map_list, rays_list, index_map_list, recalib_map_list, img_hw_list = [], [], [], [], []
valid_mask_list = []
scale_list_rev, march_step_list_rev = scale_list[::-1], march_step_list[::-1]
for idx, scale in enumerate(scale_list_rev):
if idx == 0: # the original scale
grid_map_now = self.homo_2d.reshape(-1, self.img_hw[0], self.img_hw[1])[:2].permute(1,2,0) # [h,w,2]
rays_now = self.get_camera_rays(R)
index_map_now, recalib_map_now = None, None
valid_mask_now = valid_mask
else:
grid_map_now, rays_now, index_map_now, recalib_map_now = self.get_downscaled_camera_rays(grid_map_list[idx-1], R, scale / scale_list_rev[idx-1])
if split_type == 'raydepth':
recalib_map_now = torch.ones_like(recalib_map_now)
valid_mask_now = self.maxpool_valid_mask_with_index(valid_mask_list[idx-1], index_map_now)
grid_map_list.append(grid_map_now)
img_hw_list.append(grid_map_now.shape[:2])
rays_list.append(rays_now)
index_map_list.append(index_map_now)
recalib_map_list.append(recalib_map_now)
valid_mask_list.append(valid_mask_now)
# get init zdepth
init_zdepth_lowres, _ = self.get_intersections_with_unit_spheres(cam_pos, rays_list[-1])
init_zdepth_original, _ = self.get_intersections_with_unit_spheres(cam_pos, rays_list[0])
# pyramid recursive ray marching
sdf_list, zdepth_list, points_list = None, None, None
num_scales = len(rays_list)
for idx in range(num_scales - 1, -1, -1):
cam_rays_now = rays_list[idx]
valid_mask_now = valid_mask_list[idx]
index_map_now = index_map_list[idx]
recalib_map_now = recalib_map_list[idx]
march_step_now = march_step_list_rev[idx]
if idx == num_scales - 1: # first (large) scale: initialization
init_zdepth_now = init_zdepth_lowres
else:
init_zdepth_now = zdepth_list[-1]
# single-scale ray marching
if idx != 0: # first (large) scale: initialization
sdf_list_now, marching_zdepth_list_now, points_list_now, _ = self.ray_marching_trivial(cam_pos, cam_rays_now, init_zdepth_now, valid_mask_now, latent, march_step=march_step_now, clamp_dist=clamp_dist, no_grad=no_grad, use_transform=use_transform)
# unmap
sdf_list_now = self.unmap_tensor_with_valid_mask(sdf_list_now, valid_mask_now, fill_value=1.0)
points_list_now = self.unmap_tensor_with_valid_mask(points_list_now, valid_mask_now)
marching_zdepth_list_now = self.unmap_tensor_with_valid_mask(marching_zdepth_list_now, valid_mask_now)
zdepth_list_now = marching_zdepth_list_now + init_zdepth_now
if idx != num_scales - 1: # not first iteration
sdf_list_now = torch.cat([sdf_list, sdf_list_now], 0)
points_list_now = torch.cat([points_list, points_list_now], 0)
zdepth_list_now = torch.cat([zdepth_list, zdepth_list_now], 0)
# upsample (and recalib)
sdf_list_now = sdf_list_now[:, index_map_now]
points_list_now = points_list_now[:, index_map_now, :]
zdepth_list_now = self.upsample_zdepth_and_recalib(zdepth_list_now, index_map_now, recalib_map_now)
# update global info
sdf_list, zdepth_list, points_list = sdf_list_now, zdepth_list_now, points_list_now
else: # i.e. idx == 0: final (original) scale: recursive ray marching.
sdf_list_now, marching_zdepth_list_now, points_list_now, valid_mask_render = self.ray_marching_recursive(cam_pos, cam_rays_now, init_zdepth_now, valid_mask_now, latent, march_step=march_step_now, clamp_dist=clamp_dist, no_grad=no_grad, use_first_query_check=False, use_transform=use_transform)
# map down global info to (N)
sdf_list = torch.cat([sdf_list[:, valid_mask], sdf_list_now], 0)
points_list = torch.cat([points_list[:, valid_mask, :], points_list_now], 0)
zdepth_list_now = marching_zdepth_list_now + init_zdepth_now[valid_mask]
zdepth_list = torch.cat([zdepth_list[:, valid_mask], zdepth_list_now], 0)
# get the corresponding marching zdepth
marching_zdepth_list = zdepth_list - init_zdepth_original[valid_mask][None,:]
return sdf_list, marching_zdepth_list, points_list, valid_mask_render
def ray_marching(self, cam_pos, R, init_zdepth, valid_mask, latent, march_step=None, clamp_dist=0.1, no_grad=False, use_transform=True, ray_marching_type='recursive', split_type='raydepth'):
'''
ray marching function
Input:
- init_zdepth type: torch.Tensor (H*W)
- valid_mask type: torch.Tensor (H*W) with N valid entries
- split_type ['depth', 'raydepth'], which is the spliting strategy for pyramid recursive marching
Return:
- sdf_list type: torch.Tensor (march_step, N)
- marching_zdepth_list type: torch.Tensor (march_step, N)
- points_list type: torch.Tensor (march_step, N, 3)
- valid_mask_render type: torch.Tensor (N)
'''
if not (split_type in ['depth', 'raydepth']):
raise NotImplementedError
if ray_marching_type == 'trivial_non_parallel':
cam_rays = self.get_camera_rays(R)
return self.ray_marching_trivial_non_parallel(cam_pos, cam_rays, init_zdepth, valid_mask, latent, march_step=None, clamp_dist=clamp_dist, no_grad=no_grad, use_transform=use_transform)
elif ray_marching_type == 'trivial':
cam_rays = self.get_camera_rays(R)
return self.ray_marching_trivial(cam_pos, cam_rays, init_zdepth, valid_mask, latent, march_step=None, clamp_dist=clamp_dist, no_grad=no_grad, use_transform=use_transform)
elif ray_marching_type == 'recursive':
cam_rays = self.get_camera_rays(R)
return self.ray_marching_recursive(cam_pos, cam_rays, init_zdepth, valid_mask, latent, march_step=None, clamp_dist=clamp_dist, no_grad=no_grad, use_transform=use_transform)
elif ray_marching_type == 'pyramid_recursive':
return self.ray_marching_pyramid_recursive(cam_pos, R, valid_mask, latent, march_step=None, clamp_dist=clamp_dist, no_grad=no_grad, use_transform=use_transform, split_type=split_type)
else:
raise ValueError('Error! Invalid type of ray marching: {}.'.format(ray_marching_type))
def render_depth(self, latent, R, T, clamp_dist=0.1, sample_index_type='min_abs', profile=False, no_grad=False, no_grad_depth=False, no_grad_mask=False, no_grad_camera=False, ray_marching_type='recursive', use_transform=True):
if no_grad:
no_grad_depth, no_grad_mask, no_grad_camera = True, True, True
# Getting camera pos and rays
print(f'Getting camera pos and rays')
cam_pos = self.get_camera_location(R, T)
cam_rays = self.get_camera_rays(R)
dist = self.get_distance_from_origin(cam_pos, cam_rays)
print(f'| cam_pos = {cam_pos.shape}, {cam_pos}')
print(f'| cam_rays = {cam_rays.shape}, ray viewing direction. For our orthogonal case, we only need a single direction!')
dbgtmpvar_camraylen = (cam_rays**2).sum(0)
print(f'| cam_rays lengths: min = {dbgtmpvar_camraylen.min()}, max = {dbgtmpvar_camraylen.max()}, so these are unit vectors.')
print(f'| dist = {dist.shape}')
profiler = Profiler(silent = not profile)
# initialization on the unit sphere
h, w = self.img_hw
print(f'Getting initial zdepth and valid mask')
init_zdepth, valid_mask = self.get_intersections_with_unit_spheres(cam_pos, cam_rays)
print(f'| init_zdepth = {init_zdepth.shape}')
print(f'| valid_mask = {valid_mask.shape}')
profiler.report_process('[DEPTH] initialization time')
# ray marching
print(f'Marching rays. Clearly the most important marching step happens here.')
sdf_list, marching_zdepth_list, points_list, valid_mask_render = self.ray_marching(cam_pos, R, init_zdepth, valid_mask, latent, clamp_dist=clamp_dist, no_grad=no_grad_camera, ray_marching_type=ray_marching_type, use_transform=use_transform)
print(f'| sdf_list = {sdf_list.shape}, the sdfs at all 50 marching steps')
print(f'| marching_zdepth_list = {marching_zdepth_list.shape}, the depth at all 50 marching steps')
print(f'| points_list = {points_list.shape}, the points at all 50 marching steps')
print(f'| valid_mask_render = {valid_mask_render.shape}, only a single image')
profiler.report_process('[DEPTH] ray marching time')
# get differnetiable samples
min_sdf_sample = self.get_min_sdf_sample(sdf_list, points_list, latent, index_type='min_abs', clamp_dist=clamp_dist, profile=profile, no_grad=no_grad_mask)
marching_zdepth = self.get_sample_on_marching_zdepth_along_ray(marching_zdepth_list, sdf_list, points_list, cam_rays[:, valid_mask], latent, use_uniform_sample=False, index_type=sample_index_type, clamp_dist=clamp_dist, profile=profile, no_grad=no_grad_depth)
profiler.report_process('[DEPTH] re-sampling time')
# generate output
min_sdf_sample_new = torch.zeros_like(valid_mask).float() # (H, W)
min_sdf_sample_new.requires_grad = True
min_sdf_sample_new = self.copy_index(min_sdf_sample_new, valid_mask, min_sdf_sample)
min_sdf_sample_new = self.copy_index(min_sdf_sample_new, ~valid_mask, dist[~valid_mask] + self.threshold - self.radius) # help handle camera gradient
## get zdepth
Zdepth = torch.ones_like(self.imgmap_init) * 1e11 # (H, W)
Zdepth.requires_grad = True
src_zdepth = init_zdepth[valid_mask] + marching_zdepth # (N)
Zdepth = self.copy_index(Zdepth, valid_mask, src_zdepth)
Zdepth = Zdepth.reshape(-1) # (H*W)
## update valid_mask
valid_mask = valid_mask.clone()
valid_mask[valid_mask] = valid_mask_render
profiler.report_process('[DEPTH] finalize time\t')
if no_grad_depth:
Zdepth = Zdepth.detach()
return Zdepth, valid_mask, min_sdf_sample_new # (H*W), (H*W), (H*W)
def render_normal(self, latent, R, T, Zdepth, valid_mask, clamp_dist=0.1, MAX_POINTS=100000, no_grad=False, normalize=True, use_transform=True):
cam_pos = self.get_camera_location(R, T)
cam_rays = self.get_camera_rays(R)
h, w = self.img_hw
Znormal = torch.zeros_like(self.imgmap_init)[None,:,:].repeat(3, 1, 1) # (3, H, W)
Znormal.requires_grad = True
# initialization
valid_cam_rays = cam_rays[:, valid_mask]
valid_zdepth = Zdepth[valid_mask]
if valid_zdepth.shape[0] == 0:
return Znormal.reshape(3, -1) # (3, H*W)
# compute normal
points = self.generate_point_samples(cam_pos, valid_cam_rays, valid_zdepth, has_zdepth_grad=False, inv_transform=use_transform)
gradient = decode_sdf_gradient(self.decoder, latent, points.transpose(1,0), clamp_dist=clamp_dist, no_grad=no_grad, MAX_POINTS=MAX_POINTS) # (N, 3)
gradient = gradient.transpose(1,0) # (3, N)
if normalize:
valid_normal_untransformed = self.normalize_vectors(gradient) # (3, N)
else:
valid_normal_untransformed = gradient
valid_normal = self.transform_points(valid_normal_untransformed)
# generate output
Znormal = self.copy_index(Znormal.permute(1,2,0), valid_mask, valid_normal.transpose(1,0)) # (H, W, 3)
Znormal = Znormal.reshape(-1, 3).transpose(1,0)
if no_grad:
Znormal = Znormal.detach()
return Znormal # (3, H*W)
def forward_sampling(self, latent, R, T, Zdepth, valid_mask, clamp_dist=0.1, num_forward_sampling=1, no_grad=False, use_transform=True):
'''
To sample forward along the ray (sampling inside)
This function should be used when the latent space is not pretrained.
Returns:
- inside_samples torch.Tensor (H*W, num_forward_sampling) (sdf + offset, should be negative)
'''
assert (num_forward_sampling > 0)
cam_pos = self.get_camera_location(R, T)
cam_rays = self.get_camera_rays(R)
# initialization
h, w = self.img_hw
inside_samples = torch.zeros_like(self.imgmap_init).reshape(-1)[:,None].repeat(1, num_forward_sampling) # (3, H, W)
valid_cam_rays = cam_rays[:, valid_mask]
valid_zdepth = Zdepth[valid_mask]
if valid_zdepth.shape[0] == 0:
return inside_samples # (H*W, 3)
grid_list = 0.5 * clamp_dist * (torch.arange(num_forward_sampling).float() + 1) / num_forward_sampling
if cam_pos.get_device() != -1:
grid_list.to(cam_pos.device)
inside_samples_list = []
for idx in range(num_forward_sampling):
grid = grid_list[idx]
points = self.generate_point_samples(cam_pos, valid_cam_rays, valid_zdepth + grid, has_zdepth_grad=False, inv_transform=use_transform)
sdf = decode_sdf(self.decoder, latent, points.transpose(1,0), clamp_dist=None, no_grad=no_grad).squeeze(-1)
inside_samples_list.append(sdf[:,None] + grid)
inside_samples[valid_mask] = torch.cat(inside_samples_list, 1)
return inside_samples
def render(self, latent, R, T, clamp_dist=0.1, sample_index_type='min_abs', profile=False, no_grad=False, no_grad_depth=False, no_grad_normal=False, no_grad_mask=False, no_grad_camera=False, normalize_normal=True, use_transform=True, ray_marching_type='pyramid_recursive', num_forward_sampling=0):
'''
differentiable rendering.
Input:
- latent type torch.Tensor (1, latent_size)
- R type torch.Tensor (3,3)
- T type torch.Tensor (3)
Return:
- Zdepth type torch.Tensor (H, W) - rendered depth
- Znormal type torch.Tensor (H, W, 3) - rendered normal
- valid_mask type torch.Tensor (H, W) - rendered silhoutte
- min_sdf_sample type torch.Tensor (H, W) - minimum_depth_sample
'''
if no_grad:
no_grad_depth, no_grad_normal, no_grad_mask, no_grad_camera = True, True, True, True
profiler = Profiler(silent = not profile)
h, w = self.img_hw
profiler.report_process('\ninitialization time')
# render depth
Zdepth, valid_mask, min_abs_query = self.render_depth(latent, R, T, clamp_dist=clamp_dist, sample_index_type=sample_index_type, profile=profile, no_grad=no_grad, no_grad_depth=no_grad_depth, no_grad_mask=no_grad_mask, no_grad_camera=no_grad_camera, ray_marching_type=ray_marching_type, use_transform=use_transform) # (H*W), (H*W), (H*W)
profiler.report_process('render depth time')
depth = torch.ones_like(Zdepth) * 1e11
depth[valid_mask] = Zdepth[valid_mask].clone() * self.calib_map[valid_mask]
depth = depth.reshape(h, w)
# render normal
if self.use_depth2normal:
f_x_pix = self.K.detach().cpu().numpy()[0,0]
f_y_pix = self.K.detach().cpu().numpy()[1,1]
normal = depth2normal(depth, f_x_pix, f_y_pix)
else:
normal = self.render_normal(latent, R, T, Zdepth, valid_mask, clamp_dist=clamp_dist, no_grad=no_grad_normal, normalize=normalize_normal, use_transform=use_transform) # (3, H*W)
normal = torch.matmul(R, normal) # (3, H*W)
normal[0,:] = normal[0,:].clone() * (-1) # transformed the direction to align with rendering engine (left-hand sided).
normal = normal.reshape(3, h, w).permute(1,2,0)
profiler.report_process('render normal time')
# (optional) forward sampling inside the surface
if num_forward_sampling != 0:
inside_samples = self.forward_sampling(latent, R, T, Zdepth, valid_mask, clamp_dist=clamp_dist, num_forward_sampling=num_forward_sampling, use_transform=use_transform) # (H*W, k)
inside_samples = inside_samples.reshape(h, w, num_forward_sampling)
profiler.report_process('forward sampling time')
# reshape mask and return
binary_mask = valid_mask.reshape(h, w).type(torch.uint8)
min_abs_query = min_abs_query.reshape(h, w)
profiler.report_process('finalization time')
profiler.report_all('total time')
if profile:
pdb.set_trace()
if num_forward_sampling == 0:
return depth, normal, binary_mask, min_abs_query
else:
return depth, normal, binary_mask, min_abs_query, inside_samples
if __name__ == '__main__':
pass
| 48.970874
| 344
| 0.64086
|
f917bb82c52df5f1f895cc458b4760919a6bc36f
| 281
|
py
|
Python
|
tf2utils/market.py
|
offish/tf2-rgb-values
|
7a526e9918afef9f79b7be37e81cd374df66c57d
|
[
"MIT"
] | 1
|
2019-06-16T15:55:38.000Z
|
2019-06-16T15:55:38.000Z
|
tf2utils/market.py
|
offish/tf2utils
|
7a526e9918afef9f79b7be37e81cd374df66c57d
|
[
"MIT"
] | null | null | null |
tf2utils/market.py
|
offish/tf2utils
|
7a526e9918afef9f79b7be37e81cd374df66c57d
|
[
"MIT"
] | 2
|
2019-03-14T18:58:32.000Z
|
2019-04-12T11:02:14.000Z
|
from tf2utils.methods import request
def get_price(name: str, cur: int = 1, appid: int = 440) -> dict:
overview = 'https://steamcommunity.com/market/priceoverview/'
params = {'currency': cur, 'appid': appid, 'market_hash_name': name}
return request(overview, params)
| 35.125
| 72
| 0.697509
|
5578ddf46fad16683a050f25237e829ec5f2c212
| 16,393
|
py
|
Python
|
django/utils/feedgenerator.py
|
benjaoming/django
|
6dbe979b4d9396e1b307c7d27388c97c13beb21c
|
[
"BSD-3-Clause"
] | 2
|
2015-01-21T15:45:07.000Z
|
2015-02-21T02:38:13.000Z
|
django/utils/feedgenerator.py
|
HenriqueLR/django
|
d1ca70110f49f0be90206c8da516ac16aebc8c75
|
[
"BSD-3-Clause"
] | null | null | null |
django/utils/feedgenerator.py
|
HenriqueLR/django
|
d1ca70110f49f0be90206c8da516ac16aebc8c75
|
[
"BSD-3-Clause"
] | 1
|
2020-05-25T08:55:19.000Z
|
2020-05-25T08:55:19.000Z
|
"""
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title="Poynter E-Media Tidbits",
... link="http://www.poynter.org/column.asp?id=31",
... description="A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language="en",
... )
>>> feed.add_item(
... title="Hello",
... link="http://www.holovaty.com/test/",
... description="Testing."
... )
>>> with open('test.rss', 'w') as fp:
... feed.write(fp, 'utf-8')
For definitions of the different versions of RSS, see:
http://web.archive.org/web/20110718035220/http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
from __future__ import unicode_literals
import datetime
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import force_text, iri_to_uri
from django.utils import datetime_safe
from django.utils import six
from django.utils.six import StringIO
from django.utils.six.moves.urllib.parse import urlparse
from django.utils.timezone import is_aware
def rfc2822_date(date):
# We can't use strftime() because it produces locale-dependent results, so
# we have to map english month and day names manually
months = ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec',)
days = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
dow = days[date.weekday()]
month = months[date.month - 1]
time_str = date.strftime('%s, %%d %s %%Y %%H:%%M:%%S ' % (dow, month))
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d%02d' % (hour, minute)
else:
return time_str + '-0000'
def rfc3339_date(date):
# Support datetime objects older than 1900
date = datetime_safe.new_datetime(date)
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
if six.PY2: # strftime returns a byte string in Python 2
time_str = time_str.decode('utf-8')
if is_aware(date):
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds // 60)
hour, minute = divmod(timezone, 60)
return time_str + '%+03d:%02d' % (hour, minute)
else:
return time_str + 'Z'
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://web.archive.org/web/20110514113830/http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
bits = urlparse(url)
d = ''
if date is not None:
d = ',%s' % datetime_safe.new_datetime(date).strftime('%Y-%m-%d')
return 'tag:%s%s:%s/%s' % (bits.hostname, d, bits.path, bits.fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [force_text(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, unique_id_is_permalink=None, enclosure=None,
categories=(), item_copyright=None, ttl=None, updateddate=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate and updateddate, which are datetime.datetime
objects, and enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_text(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_text(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'updateddate': updateddate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'unique_id_is_permalink': unique_id_is_permalink,
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError('subclasses of SyndicationFeed must provide a write() method')
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate or updateddate. If no items
have either of these attributes this returns the current date/time.
"""
latest_date = None
date_keys = ('updateddate', 'pubdate')
for item in self.items:
for date_key in date_keys:
item_date = item.get(date_key)
if item_date:
if latest_date is None or item_date > latest_date:
latest_date = item_date
return latest_date or datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml; charset=utf-8'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
def rss_attributes(self):
return {"version": self._version,
"xmlns:atom": "http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement('item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("item")
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", self.feed['link'])
handler.addQuickElement("description", self.feed['description'])
if self.feed['feed_url'] is not None:
handler.addQuickElement("atom:link", None,
{"rel": "self", "href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement("language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement("category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("copyright", self.feed['feed_copyright'])
handler.addQuickElement("lastBuildDate", rfc2822_date(self.latest_post_date()))
if self.feed['ttl'] is not None:
handler.addQuickElement("ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement("channel")
class RssUserland091Feed(RssFeed):
_version = "0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = "2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", item['link'])
if item['description'] is not None:
handler.addQuickElement("description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement("author", "%s (%s)" %
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement("author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement("dc:creator", item["author_name"],
{"xmlns:dc": "http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement("pubDate", rfc2822_date(item['pubdate']))
if item['comments'] is not None:
handler.addQuickElement("comments", item['comments'])
if item['unique_id'] is not None:
guid_attrs = {}
if isinstance(item.get('unique_id_is_permalink'), bool):
guid_attrs['isPermaLink'] = str(
item['unique_id_is_permalink']).lower()
handler.addQuickElement("guid", item['unique_id'], guid_attrs)
if item['ttl'] is not None:
handler.addQuickElement("ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("enclosure", '',
{"url": item['enclosure'].url, "length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml; charset=utf-8'
ns = "http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement('feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement("feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {"xmlns": self.ns, "xml:lang": self.feed['language']}
else:
return {"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement("title", self.feed['title'])
handler.addQuickElement("link", "", {"rel": "alternate", "href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement("link", "", {"rel": "self", "href": self.feed['feed_url']})
handler.addQuickElement("id", self.feed['id'])
handler.addQuickElement("updated", rfc3339_date(self.latest_post_date()))
if self.feed['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement("email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement("uri", self.feed['author_link'])
handler.endElement("author")
if self.feed['subtitle'] is not None:
handler.addQuickElement("subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement("category", "", {"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement("rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement("entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement("entry")
def add_item_elements(self, handler, item):
handler.addQuickElement("title", item['title'])
handler.addQuickElement("link", "", {"href": item['link'], "rel": "alternate"})
if item['pubdate'] is not None:
handler.addQuickElement('published', rfc3339_date(item['pubdate']))
if item['updateddate'] is not None:
handler.addQuickElement('updated', rfc3339_date(item['updateddate']))
# Author information.
if item['author_name'] is not None:
handler.startElement("author", {})
handler.addQuickElement("name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement("email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement("uri", item['author_link'])
handler.endElement("author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement("id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement("summary", item['description'], {"type": "html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement("link", '',
{"rel": "enclosure",
"href": item['enclosure'].url,
"length": item['enclosure'].length,
"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement("category", "", {"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement("rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| 39.501205
| 107
| 0.61264
|
5eb6ab8a4d9630137f645dbb48cd5f46cb276d6f
| 2,324
|
py
|
Python
|
examples/torch/trpo_pendulum_ray_sampler.py
|
thanhkaist/garage
|
1d840df357282a675b8fce839bb0e5f72a8abba9
|
[
"MIT"
] | 7
|
2022-02-01T03:02:24.000Z
|
2022-02-10T12:54:05.000Z
|
examples/torch/trpo_pendulum_ray_sampler.py
|
thanhkaist/garage
|
1d840df357282a675b8fce839bb0e5f72a8abba9
|
[
"MIT"
] | null | null | null |
examples/torch/trpo_pendulum_ray_sampler.py
|
thanhkaist/garage
|
1d840df357282a675b8fce839bb0e5f72a8abba9
|
[
"MIT"
] | 2
|
2022-02-03T03:33:25.000Z
|
2022-02-10T12:54:07.000Z
|
#!/usr/bin/env python3
"""This is an example to train a task with TRPO algorithm (PyTorch).
Uses Ray sampler instead of OnPolicyVectorizedSampler.
Here it runs InvertedDoublePendulum-v2 environment with 100 iterations.
"""
import numpy as np
import ray
import torch
from garage import wrap_experiment
from garage.envs import GarageEnv
from garage.experiment import deterministic, LocalRunner
from garage.sampler import RaySampler
from garage.torch.algos import TRPO
from garage.torch.policies import GaussianMLPPolicy
from garage.torch.value_functions import GaussianMLPValueFunction
@wrap_experiment(snapshot_mode='none')
def trpo_pendulum_ray_sampler(ctxt=None, seed=1):
"""Set up environment and algorithm and run the task.
Args:
ctxt (garage.experiment.ExperimentContext): The experiment
configuration used by LocalRunner to create the snapshotter.
seed (int): Used to seed the random number generator to produce
determinism.
"""
# Since this is an example, we are running ray in a reduced state.
# One can comment this line out in order to run ray at full capacity
ray.init(memory=52428800,
object_store_memory=78643200,
ignore_reinit_error=True,
log_to_driver=False,
include_webui=False)
deterministic.set_seed(seed)
env = GarageEnv(env_name='InvertedDoublePendulum-v2')
runner = LocalRunner(ctxt)
policy = GaussianMLPPolicy(env.spec,
hidden_sizes=[32, 32],
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
value_function = GaussianMLPValueFunction(env_spec=env.spec,
hidden_sizes=(32, 32),
hidden_nonlinearity=torch.tanh,
output_nonlinearity=None)
algo = TRPO(env_spec=env.spec,
policy=policy,
value_function=value_function,
max_path_length=100,
discount=0.99,
center_adv=False)
runner.setup(algo, env, sampler_cls=RaySampler)
runner.train(n_epochs=100, batch_size=1024)
s = np.random.randint(0, 1000)
trpo_pendulum_ray_sampler(seed=s)
| 35.212121
| 77
| 0.654905
|
a6f61c5c223a5c3c8e6af8bf7f15539d0a89ff1c
| 1,825
|
py
|
Python
|
scripts/bse.py
|
reckbo/ppl
|
916d96188a43bbc5915020edfa12f14895b5f66c
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/bse.py
|
reckbo/ppl
|
916d96188a43bbc5915020edfa12f14895b5f66c
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/bse.py
|
reckbo/ppl
|
916d96188a43bbc5915020edfa12f14895b5f66c
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import operator
from util import logfmt
from plumbum import local, cli, FG
from plumbum.cmd import unu
import logging
logger = logging.getLogger()
logging.basicConfig(level=logging.DEBUG, format=logfmt(__file__))
def get_grad_dirs(hdr):
return [map(float, line.split(b'=')[1].split())
for line in hdr.splitlines()
if b'DWMRI_gradient' in line]
def get_bval(hdr):
for line in hdr.splitlines():
if b'b-value' in line:
return float(line.split(b'=')[1])
def get_b0_index(hdr):
bval = get_bval(hdr)
bvals = [norm(gdir)*bval for gdir in get_grad_dirs(hdr)]
idx, min_bval = min(enumerate(bvals), key=operator.itemgetter(1))
logger.info("Found B0 of " + str(min_bval) + " at index " + str(idx))
return idx
def norm(vector):
return sum([v**2 for v in vector])
class App(cli.Application):
DESCRIPTION="Extracts the baseline (b0) from a nrrd DWI. Assumes \
the diffusion volumes are indexed by the last axis."
dwimask = cli.SwitchAttr(['-m','--mask'], cli.ExistingFile, help='DWI mask' ,mandatory=False)
dwi = cli.SwitchAttr(['-i','--infile'], cli.ExistingFile, help='DWI nrrd image',mandatory=True)
out = cli.SwitchAttr(['-o', '--out'], help='Extracted B0 nrrd image', mandatory=True)
def main(self):
hdr = unu("head", self.dwi)[:-1]
idx = get_b0_index(hdr)
slicecmd = unu["slice", "-a", "3", "-p", str(idx) ,"-i", self.dwi]
gzipcmd = unu["save", "-e", "gzip", "-f", "nrrd", "-o", self.out]
if self.dwimask:
maskcmd = unu["3op", "ifelse", "-w", "1", self.dwimask, "-", "0"]
(slicecmd | maskcmd | gzipcmd) & FG
else:
(slicecmd | gzipcmd) & FG
if __name__ == '__main__':
App.run()
| 33.796296
| 99
| 0.62137
|
bb75657b3bc73a7bff61c3ea4edfdb7c3c976aa7
| 2,785
|
py
|
Python
|
scripts/identify_neuron_classes/deprecated/identify_2nd3rd4thOrder_sensory_synapseThres.py
|
mwinding/connectome_analysis
|
dbc747290891805863c9481921d8080dc2043d21
|
[
"MIT"
] | 1
|
2021-06-10T05:48:16.000Z
|
2021-06-10T05:48:16.000Z
|
identify_neuron_classes/deprecated/identify_2nd3rd4thOrder_sensory_synapseThres.py
|
mwinding/connectome_tools
|
0392f6b1e924194299ea7760d8386eb01f3371a3
|
[
"MIT"
] | 2
|
2022-01-21T11:48:45.000Z
|
2022-01-21T11:48:45.000Z
|
scripts/identify_neuron_classes/deprecated/identify_2nd3rd4thOrder_sensory_synapseThres.py
|
mwinding/connectome_analysis
|
dbc747290891805863c9481921d8080dc2043d21
|
[
"MIT"
] | 1
|
2022-02-02T15:39:52.000Z
|
2022-02-02T15:39:52.000Z
|
#%%
import os
try:
os.chdir('/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/')
print(os.getcwd())
except:
pass
#%%
import sys
sys.path.append("/Volumes/GoogleDrive/My Drive/python_code/connectome_tools/")
import pandas as pd
import numpy as np
import connectome_tools.process_matrix as promat
import math
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import pymaid
from pymaid_creds import url, name, password, token
# convert pair-sorted brain/sensories matrix to binary matrix based on synapse threshold
matrix_ad = pd.read_csv('data/axon-dendrite.csv', header=0, index_col=0)
matrix_dd = pd.read_csv('data/dendrite-dendrite.csv', header=0, index_col=0)
matrix_aa = pd.read_csv('data/axon-axon.csv', header=0, index_col=0)
matrix_da = pd.read_csv('data/dendrite-axon.csv', header=0, index_col=0)
matrix = matrix_ad + matrix_dd + matrix_aa + matrix_da
# the columns are string by default and the indices int; now both are int
matrix_ad.columns = pd.to_numeric(matrix_ad.columns)
matrix_dd.columns = pd.to_numeric(matrix_dd.columns)
matrix_aa.columns = pd.to_numeric(matrix_aa.columns)
matrix_da.columns = pd.to_numeric(matrix_da.columns)
matrix.columns = pd.to_numeric(matrix.columns)
# import pair list CSV, manually generated
pairs = pd.read_csv('data/pairs-2020-05-08.csv', header = 0)
paired = pairs.values.flatten()
# %%
rm = pymaid.CatmaidInstance(url, token, name, password)
# pull sensory annotations and then pull associated skids
sensories = pymaid.get_annotated('mw brain inputs')
# %%
testinput = pymaid.get_skids_by_annotation("mw ORN 2nd_order PN")
# identify pairs in a-d graph
def get_paired_skids(skid, pairList):
# returns paired skids in array [left, right]; can input either left or right skid of a pair to identify
if(skid in pairList["leftid"].values):
pair_right = pairList["rightid"][pairList["leftid"]==skid].iloc[0]
pair_left = skid
if(skid in pairList["rightid"].values):
pair_left = pairList["leftid"][pairList["rightid"]==skid].iloc[0]
pair_right = skid
if((skid in pairList["leftid"].values) == False and (skid in pairList["rightid"].values) == False):
print("skid %i is not in paired list" % (skid))
return(0)
return([pair_left, pair_right])
def extract_pairs_from_list(skids, pairList):
pairs = []
for i in skids:
if(int(i) in pairList["leftid"].values):
pair = get_paired_skids(int(i), pairList)
pairs.append({'leftid': pair[0], 'rightid': pair[1]})
pairs = pd.DataFrame(pairs)
return(pairs)
testpairs = extract_pairs_from_list(testinput, pairs)
# look for downstream pairs in a-d graph
# identify LNs and descending in downstream pairs
# repeat
# %%
| 28.71134
| 104
| 0.721724
|
5f5fbb2c39e4950cc695eef60a8f1e98685126b3
| 2,521
|
py
|
Python
|
examples/dfp/v201805/proposal_line_item_service/update_proposal_line_items.py
|
christineyi3898/googleads-python-lib
|
cd707dc897b93cf1bbb19355f7424e7834e7fb55
|
[
"Apache-2.0"
] | 1
|
2019-10-21T04:10:22.000Z
|
2019-10-21T04:10:22.000Z
|
examples/dfp/v201805/proposal_line_item_service/update_proposal_line_items.py
|
christineyi3898/googleads-python-lib
|
cd707dc897b93cf1bbb19355f7424e7834e7fb55
|
[
"Apache-2.0"
] | null | null | null |
examples/dfp/v201805/proposal_line_item_service/update_proposal_line_items.py
|
christineyi3898/googleads-python-lib
|
cd707dc897b93cf1bbb19355f7424e7834e7fb55
|
[
"Apache-2.0"
] | 1
|
2019-10-21T04:10:51.000Z
|
2019-10-21T04:10:51.000Z
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates a proposal line item's notes.
To determine which proposal line items exist,
run get_all_proposal_line_items.py.
"""
# Import appropriate modules from the client library.
from googleads import dfp
# Set id of the proposal line item to update.
PROPOSAL_LINE_ITEM_ID = 'INSERT_PROPOSAL_LINE_ITEM_ID_HERE'
def main(client, proposal_line_item_id):
# Initialize appropriate service.
proposal_line_item_service = client.GetService(
'ProposalLineItemService', version='v201805')
# Create statement to select a proposal line item.
statement = (dfp.StatementBuilder()
.Where('id = :id')
.WithBindVariable('id', long(proposal_line_item_id))
.Limit(1))
# Get proposal line items by statement.
response = proposal_line_item_service.getProposalLineItemsByStatement(
statement.ToStatement())
if 'results' in response and len(response['results']):
# Update each the proposal line item's notes field.
proposal_line_item = response['results'][0]
proposal_line_item['internalNotes'] = 'Proposal line is ready to submit.'
# Update proposal line items remotely.
proposal_line_items = proposal_line_item_service.updateProposalLineItems(
[proposal_line_item])
# Display results.
if proposal_line_items:
for proposal_line_item in proposal_line_items:
print ('Line item with id "%s", belonging to proposal id "%s" and,'
' named "%s" was updated.' % (
proposal_line_item['id'], proposal_line_item['proposalId'],
proposal_line_item['name']))
else:
print 'No proposal line items were updated.'
else:
print 'No proposal line items found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, PROPOSAL_LINE_ITEM_ID)
| 34.534247
| 78
| 0.718366
|
7cd0482c75d7157df218071a2e22ce2904d094b6
| 5,130
|
py
|
Python
|
youtube_dl/extractor/appletrailers.py
|
Logmytech/youtube-dl-QT
|
1497297719a95c4f70fbfa32e0fa4e38cdd475dc
|
[
"MIT"
] | null | null | null |
youtube_dl/extractor/appletrailers.py
|
Logmytech/youtube-dl-QT
|
1497297719a95c4f70fbfa32e0fa4e38cdd475dc
|
[
"MIT"
] | null | null | null |
youtube_dl/extractor/appletrailers.py
|
Logmytech/youtube-dl-QT
|
1497297719a95c4f70fbfa32e0fa4e38cdd475dc
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
int_or_none,
)
class AppleTrailersIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?trailers\.apple\.com/trailers/(?P<company>[^/]+)/(?P<movie>[^/]+)'
_TEST = {
"url": "http://trailers.apple.com/trailers/wb/manofsteel/",
"playlist": [
{
"md5": "d97a8e575432dbcb81b7c3acb741f8a8",
"info_dict": {
"id": "manofsteel-trailer4",
"ext": "mov",
"duration": 111,
"title": "Trailer 4",
"upload_date": "20130523",
"uploader_id": "wb",
},
},
{
"md5": "b8017b7131b721fb4e8d6f49e1df908c",
"info_dict": {
"id": "manofsteel-trailer3",
"ext": "mov",
"duration": 182,
"title": "Trailer 3",
"upload_date": "20130417",
"uploader_id": "wb",
},
},
{
"md5": "d0f1e1150989b9924679b441f3404d48",
"info_dict": {
"id": "manofsteel-trailer",
"ext": "mov",
"duration": 148,
"title": "Trailer",
"upload_date": "20121212",
"uploader_id": "wb",
},
},
{
"md5": "5fe08795b943eb2e757fa95cb6def1cb",
"info_dict": {
"id": "manofsteel-teaser",
"ext": "mov",
"duration": 93,
"title": "Teaser",
"upload_date": "20120721",
"uploader_id": "wb",
},
},
]
}
_JSON_RE = r'iTunes.playURL\((.*?)\);'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
movie = mobj.group('movie')
uploader_id = mobj.group('company')
playlist_url = compat_urlparse.urljoin(url, 'includes/playlists/itunes.inc')
def fix_html(s):
s = re.sub(r'(?s)<script[^<]*?>.*?</script>', '', s)
s = re.sub(r'<img ([^<]*?)>', r'<img \1/>', s)
# The ' in the onClick attributes are not escaped, it couldn't be parsed
# like: http://trailers.apple.com/trailers/wb/gravity/
def _clean_json(m):
return 'iTunes.playURL(%s);' % m.group(1).replace('\'', ''')
s = re.sub(self._JSON_RE, _clean_json, s)
s = '<html>%s</html>' % s
return s
doc = self._download_xml(playlist_url, movie, transform_source=fix_html)
playlist = []
for li in doc.findall('./div/ul/li'):
on_click = li.find('.//a').attrib['onClick']
trailer_info_json = self._search_regex(self._JSON_RE,
on_click, 'trailer info')
trailer_info = json.loads(trailer_info_json)
title = trailer_info['title']
video_id = movie + '-' + re.sub(r'[^a-zA-Z0-9]', '', title).lower()
thumbnail = li.find('.//img').attrib['src']
upload_date = trailer_info['posted'].replace('-', '')
runtime = trailer_info['runtime']
m = re.search(r'(?P<minutes>[0-9]+):(?P<seconds>[0-9]{1,2})', runtime)
duration = None
if m:
duration = 60 * int(m.group('minutes')) + int(m.group('seconds'))
first_url = trailer_info['url']
trailer_id = first_url.split('/')[-1].rpartition('_')[0].lower()
settings_json_url = compat_urlparse.urljoin(url, 'includes/settings/%s.json' % trailer_id)
settings = self._download_json(settings_json_url, trailer_id, 'Downloading settings json')
formats = []
for format in settings['metadata']['sizes']:
# The src is a file pointing to the real video file
format_url = re.sub(r'_(\d*p.mov)', r'_h\1', format['src'])
formats.append({
'url': format_url,
'format': format['type'],
'width': int_or_none(format['width']),
'height': int_or_none(format['height']),
})
self._sort_formats(formats)
playlist.append({
'_type': 'video',
'id': video_id,
'title': title,
'formats': formats,
'title': title,
'duration': duration,
'thumbnail': thumbnail,
'upload_date': upload_date,
'uploader_id': uploader_id,
'user_agent': 'QuickTime compatible (youtube-dl)',
})
return {
'_type': 'playlist',
'id': movie,
'entries': playlist,
}
| 36.642857
| 104
| 0.45809
|
39371555d4c38577d65a0b21653aa060a6bfceb8
| 1,514
|
py
|
Python
|
setup.py
|
brighthive/jdx-client-api-python
|
ed94c578a6c9a5e9aadf8764439c22783ac1d9d5
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
brighthive/jdx-client-api-python
|
ed94c578a6c9a5e9aadf8764439c22783ac1d9d5
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
brighthive/jdx-client-api-python
|
ed94c578a6c9a5e9aadf8764439c22783ac1d9d5
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
JDX reference application API
This is a collection of schemas and endpoints for the various JDX, Concentric Sky facing REST endpoints, the schemas define an API contract of sorts between the request and response expectations of the JDX reference application. This API is to be mutually developed by Concentric Sky and BrightHive. # noqa: E501
The version of the OpenAPI document: 0.0.17
Contact: engineering@brighthive.io
Generated by: https://openapi-generator.tech
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "openapi-client"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="JDX reference application API",
author_email="engineering@brighthive.io",
url="",
keywords=["OpenAPI", "OpenAPI-Generator", "JDX reference application API"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
This is a collection of schemas and endpoints for the various JDX, Concentric Sky facing REST endpoints, the schemas define an API contract of sorts between the request and response expectations of the JDX reference application. This API is to be mutually developed by Concentric Sky and BrightHive. # noqa: E501
"""
)
| 36.926829
| 317
| 0.737781
|
7d73ee69c02b22c4deb234b6897e5132430f6142
| 1,551
|
py
|
Python
|
setup.py
|
Asmodius/sanic-scheduler
|
68c0329789dd2d8c8c9a82d33b4864c67ca94006
|
[
"MIT"
] | 7
|
2018-11-12T03:35:38.000Z
|
2021-01-13T04:30:11.000Z
|
setup.py
|
Asmodius/sanic-scheduler
|
68c0329789dd2d8c8c9a82d33b4864c67ca94006
|
[
"MIT"
] | 1
|
2018-12-19T10:07:57.000Z
|
2019-10-21T13:46:19.000Z
|
setup.py
|
Asmodius/sanic-scheduler
|
68c0329789dd2d8c8c9a82d33b4864c67ca94006
|
[
"MIT"
] | 2
|
2019-10-17T10:39:52.000Z
|
2019-12-03T10:57:32.000Z
|
import os
import sys
from setuptools import setup
def read(file_name):
return open(os.path.join(os.path.dirname(__file__), file_name)).read()
meta = {}
exec(read('sanic_scheduler/__meta__.py'), meta)
if sys.argv[-1] == 'publish':
os.system("rm dist/*.gz dist/*.whl")
os.system("git tag -a %s -m 'v%s'" % (meta['__version__'], meta['__version__']))
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
os.system("git push --tags")
sys.exit()
setup(
name=meta['__title__'],
version=meta['__version__'],
url=meta['__url__'],
license=meta['__license__'],
author=meta['__author__'],
author_email=meta['__email__'],
description=meta['__summary__'],
long_description=read('README.md'),
long_description_content_type='text/markdown',
platforms='all',
packages=['sanic_scheduler'],
# install_requires=['sanic'],
# tests_require=['pytest'],
# test_suite="tests.get_tests",
keywords='sanic schedule',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
)
| 29.826923
| 84
| 0.633785
|
3163ef859493bb996b1de8d13b84996c07045b22
| 22,484
|
py
|
Python
|
djangoProject1/venv/Lib/site-packages/owlready2/driver.py
|
meddhafer97/Risk-management-khnowledge-based-system
|
aba86734801a9e0313071e2c9931295e0da08ed0
|
[
"MIT"
] | null | null | null |
djangoProject1/venv/Lib/site-packages/owlready2/driver.py
|
meddhafer97/Risk-management-khnowledge-based-system
|
aba86734801a9e0313071e2c9931295e0da08ed0
|
[
"MIT"
] | null | null | null |
djangoProject1/venv/Lib/site-packages/owlready2/driver.py
|
meddhafer97/Risk-management-khnowledge-based-system
|
aba86734801a9e0313071e2c9931295e0da08ed0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Owlready2
# Copyright (C) 2017-2019 Jean-Baptiste LAMY
# LIMICS (Laboratoire d'informatique médicale et d'ingénierie des connaissances en santé), UMR_S 1142
# University Paris 13, Sorbonne paris-Cité, Bobigny, France
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from functools import lru_cache
import owlready2
from owlready2.base import *
from owlready2.base import _universal_datatype_2_abbrev
owlready2_optimized = None
try: import owlready2_optimized
except ImportError:
try:
# handle the case when owlready was installed with `setup.py develop`
# (this resulted in a slightly differen package structure)
from owlready2 import owlready2_optimized
except ImportError: pass
if not owlready2_optimized:
print("* Owlready2 * Warning: optimized Cython parser module 'owlready2_optimized' is not available, defaulting to slower Python implementation", file = sys.stderr)
INT_DATATYPES = { "http://www.w3.org/2001/XMLSchema#integer", "http://www.w3.org/2001/XMLSchema#byte", "http://www.w3.org/2001/XMLSchema#short", "http://www.w3.org/2001/XMLSchema#int", "http://www.w3.org/2001/XMLSchema#long", "http://www.w3.org/2001/XMLSchema#unsignedByte", "http://www.w3.org/2001/XMLSchema#unsignedShort", "http://www.w3.org/2001/XMLSchema#unsignedInt", "http://www.w3.org/2001/XMLSchema#unsignedLong", "http://www.w3.org/2001/XMLSchema#negativeInteger", "http://www.w3.org/2001/XMLSchema#nonNegativeInteger", "http://www.w3.org/2001/XMLSchema#positiveInteger" }
FLOAT_DATATYPES = { "http://www.w3.org/2001/XMLSchema#decimal", "http://www.w3.org/2001/XMLSchema#double", "http://www.w3.org/2001/XMLSchema#float", "http://www.w3.org/2002/07/owl#real" }
class BaseGraph(object):
_SUPPORT_CLONING = False
#READ_METHODS = ["_refactor", "_new_numbered_iri", "_abbreviate", "_unabbreviate",
# "get_triple_sp", "_get_data_triple_triple_sp", "get_triple_po", "_get_obj_triples_transitive_sp", "_get_obj_triples_transitive_po", "_get_obj_triples_transitive_sym", "_get_obj_triples_transitive_sp_indirect", "get_triples", "_get_data_triple_triples", "get_triples_s", "get_triples_sp", "_get_data_triple_triples_sp", "get_triples_po", "get_pred", "get_quads", "_get_triple_data_triples_sp", "_get_triple_data_triple_sp", "_get_triples_sp", "has_triple", "_has_data_triple_triple", "_del_triple", "_del_data_triple"]
#WRITE_METHODS = ["_add_triple", "_set_triple", "_add_data_triple", "_set_data_triple"]
BASE_METHODS = ["_refactor", "_new_numbered_iri", "_abbreviate", "_unabbreviate",
"_get_obj_triples_cspo_cspo", "_get_obj_triples_spo_spo", "_get_obj_triples_sp_co", "_get_obj_triples_s_po",
"_get_obj_triples_po_s", "_get_obj_triples_sp_o", "_get_obj_triple_sp_o", "_get_obj_triple_po_s", "_has_obj_triple_spo", "_del_obj_triple_raw_spo",
"_get_obj_triples_spi_o", "_get_obj_triples_pio_s",
"_get_data_triples_spod_spod", "_get_data_triples_sp_od", "_get_data_triple_sp_od", "_get_data_triples_s_pod", "_has_data_triple_spod", "_del_data_triple_raw_spod",
"_get_triples_spod_spod", "_get_triples_sp_od", "_get_triple_sp_od", "_get_triples_s_pod", "_get_triples_s_p", "_get_obj_triples_o_p",
"_get_obj_triples_transitive_sp", "_get_obj_triples_transitive_po", "_get_obj_triples_transitive_sym", "_get_obj_triples_transitive_sp_indirect"]
WORLD_METHODS = [] # "get_equivs_s_o"
ONTO_METHODS = ["_add_obj_triple_raw_spo", "_set_obj_triple_raw_spo", "_add_data_triple_raw_spod", "_set_data_triple_raw_spod"]
def sub_graph(self, user_context): return self.__class__(self, user_context)
def context_2_user_context(self, context): raise NotImplementedError
def parse(self, f): raise NotImplementedError
def save(self, f, format = "pretty-xml", filter = None): raise NotImplementedError
def _abbreviate (self, iri, create_if_missing = True): return iri
def _unabbreviate(self, iri): return iri
def _get_obj_triples_transitive_sp(self, s, p, already = None):
if already is None: already = set()
else:
if s in already: return already
already.add(s)
for o in self._get_obj_triples_sp_o(s, p):
self._get_obj_triples_transitive_sp(o, p, already)
return already
def _get_obj_triples_transitive_po(self, p, o, already = None):
if already is None: already = set()
else:
if o in already: return already
already.add(o)
for s in self._get_obj_triples_po_s(p, o):
self._get_obj_triples_transitive_po(p, s, already)
return already
def _get_obj_triples_transitive_sym(self, s, p, already = None):
if already is None: already = set()
else:
if s in already: return already
already.add(s)
for s2 in self._get_obj_triples_po_s(p, s): self._get_obj_triples_transitive_sym(s2, p, already)
for s2 in self._get_obj_triples_sp_o(s, p): self._get_obj_triples_transitive_sym(s2, p, already)
return already
def _get_obj_triples_transitive_sp_indirect(self, s, predicates_inverses, already = None):
if already is None: already = set()
else:
if s in already: return already
already.add(s)
for (predicate, inverse) in predicates_inverses:
for o in self._get_obj_triples_sp_o(s, predicate): self._get_obj_triples_transitive_sp_indirect(o, predicates_inverses, already)
if inverse:
for o in self._get_obj_triples_po_s(inverse, s): self._get_obj_triples_transitive_sp_indirect(o, predicates_inverses, already)
return already
def dump(self, format = "ntriples", file = None):
import io
s = io.BytesIO()
self.save(s, format)
print(s.getvalue().decode("utf8"), file = file)
def __bool__(self): return True # To avoid that "if graph:" call __len__()
class BaseMainGraph(BaseGraph):
def parse(self, f): raise NotImplementedError
def save(self, f, format = "rdfxml", **kargs): _save(f, format, self, **kargs)
class BaseSubGraph(BaseGraph):
def __init__(self, parent, onto):
self.parent = parent
self.onto = onto
def parse(self, f, format = None, delete_existing_triples = True, default_base = ""):
format = format or _guess_format(f)
if format == "ntriples":
objs, datas, on_prepare_obj, on_prepare_data, insert_objs, insert_datas, new_blank, _abbreviate, on_finish = self.create_parse_func(getattr(f, "name", ""), delete_existing_triples)
try:
current_line = 0
if owlready2_optimized:
owlready2_optimized.parse_ntriples(f, objs, datas, insert_objs, insert_datas, _abbreviate, new_blank, default_base)
else:
splitter = re.compile("\s")
bn_src_2_sql = {}
line = f.readline().decode("utf8")
while line:
current_line += 1
if (not line.startswith("#")) and (not line.startswith("\n")):
if not line.endswith("\n"): line = "%s\n" % line
s,p,o = splitter.split(line[:-3], 2)
if s.startswith("<"): s = s[1:-1]
elif s.startswith("_"):
bn = bn_src_2_sql.get(s)
if bn is None: bn = bn_src_2_sql[s] = new_blank()
s = bn
p = p[1:-1]
if o.startswith("<"): on_prepare_obj(s, p, o[1:-1])
elif o.startswith("_"):
bn = bn_src_2_sql.get(o)
if bn is None: bn = bn_src_2_sql[o] = new_blank()
on_prepare_obj(s, p, bn)
elif o.startswith('"'):
o, d = o.rsplit('"', 1)
if d.startswith("^"):
d = d[3:-1]
if d in INT_DATATYPES: o = int (o[1:])
elif d in FLOAT_DATATYPES: o = float(o[1:])
else: o = o[1:].encode("raw-unicode-escape").decode("unicode-escape")
else:
o = o[1:].encode("raw-unicode-escape").decode("unicode-escape")
on_prepare_data(s, p, o, d)
line = f.readline().decode("utf8")
onto_base_iri = on_finish()
except Exception as e:
if len(self) == 0:
self._add_obj_triple_raw_spo(self.onto.storid, rdf_type, owl_ontology)
if current_line:
raise OwlReadyOntologyParsingError("NTriples parsing error (or unrecognized file format) in %s, line %s." % (getattr(f, "name", getattr(f, "url", "???")), current_line)) from e
else:
raise OwlReadyOntologyParsingError("NTriples parsing error (or unrecognized file format) in %s." % getattr(f, "name", getattr(f, "url", "???"))) from e
elif format == "rdfxml":
objs, datas, on_prepare_obj, on_prepare_data, insert_objs, insert_datas, new_blank, _abbreviate, on_finish = self.create_parse_func(getattr(f, "name", ""), delete_existing_triples)
try:
if owlready2_optimized:
owlready2_optimized.parse_rdfxml(f, objs, datas, insert_objs, insert_datas, _abbreviate, new_blank, default_base)
else:
import owlready2.rdfxml_2_ntriples
owlready2.rdfxml_2_ntriples.parse(f, on_prepare_obj, on_prepare_data, new_blank, default_base)
onto_base_iri = on_finish()
except OwlReadyOntologyParsingError as e:
if len(self) == 0: self._add_obj_triple_raw_spo(self.onto.storid, rdf_type, owl_ontology)
raise e
elif format == "owlxml":
objs, datas, on_prepare_obj, on_prepare_data, insert_objs, insert_datas, new_blank, _abbreviate, on_finish = self.create_parse_func(getattr(f, "name", ""), delete_existing_triples)
try:
if owlready2_optimized:
owlready2_optimized.parse_owlxml(f, objs, datas, insert_objs, insert_datas, _abbreviate, new_blank, default_base)
else:
import owlready2.owlxml_2_ntriples
owlready2.owlxml_2_ntriples.parse(f, on_prepare_obj, on_prepare_data, new_blank, default_base)
onto_base_iri = on_finish()
except OwlReadyOntologyParsingError as e:
if len(self) == 0: self._add_obj_triple_raw_spo(self.onto.storid, rdf_type, owl_ontology)
raise e
else:
raise ValueError("Unsupported format %s." % format)
return onto_base_iri
def save(self, f, format = "rdfxml", commit = False, **kargs):
if commit: self.parent.commit()
_save(f, format, self, **kargs)
def _guess_format(f):
if f.seekable():
s = f.read(1000)
f.seek(0)
else:
s = f.peek(1000).lstrip()
if isinstance(s, str): s = s.encode("utf8")
if s.startswith(b"\xef\xbb\xbf"): s = s[3:] # Ignore byte-order mask
if not s.lstrip().startswith(b"<"): return "ntriples"
if s[s.find(b"\n") -1] == b".": return "ntriples"
if s.split(b"\n", 1)[0].endswith(b"."): return "ntriples"
if (b"<!DOCTYPE Ontology" in s) or (b"<!DOCTYPE owl:Ontology" in s) or (b"<Ontology xmlns=" in s): return "owlxml"
return "rdfxml"
def _save(f, format, graph, filter = None):
if format == "ntriples":
_unabbreviate = lru_cache(None)(graph._unabbreviate)
for s,p,o,d in graph._iter_triples():
if filter and callable(filter) and not filter(graph, s, p, o, d): continue
if s < 0: s = "_:%s" % (-s)
else: s = "<%s>" % _unabbreviate(s)
p = "<%s>" % _unabbreviate(p)
if d is None:
if o < 0: o = "_:%s" % (-o)
else: o = "<%s>" % _unabbreviate(o)
else:
if isinstance(o, str): o = o.replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n')
if isinstance(d, str) and d.startswith("@"): o = '"%s"%s' % (o, d)
elif d == 0: o = '"%s"' % o
else: o = '"%s"^^<%s>' % (o, _unabbreviate(d)) # Unabbreviate datatype's iri
f.write(("%s %s %s .\n" % (s, p, o)).encode("utf8"))
elif format == "nquads":
_unabbreviate = lru_cache(None)(graph._unabbreviate)
c_2_iri = { c : iri for c, iri in graph._iter_ontology_iri() }
for c,s,p,o,d in graph._iter_triples(True):
if filter and callable(filter) and not filter(graph, s, p, o, d, c): continue
if s < 0: s = "_:%s" % (-s)
else: s = "<%s>" % _unabbreviate(s)
p = "<%s>" % _unabbreviate(p)
if d is None:
if o < 0: o = "_:%s" % (-o)
else: o = "<%s>" % _unabbreviate(o)
else:
if isinstance(o, str): o = o.replace('\\', '\\\\').replace('"', '\\"').replace('\n', '\\n')
if isinstance(d, str) and d.startswith("@"): o = '"%s"%s' % (o, d)
elif d == 0: o = '"%s"' % o
else: o = '"%s"^^<%s>' % (o, _unabbreviate(d)) # Unabbreviate datatype's iri
f.write(("<%s> %s %s %s .\n" % (c_2_iri[c], s, p, o)).encode("utf8"))
elif format == "rdfxml":
@lru_cache(None)
def _unabbreviate(storid):
r = graph._unabbreviate(storid).replace("&", "&")
if r.startswith(base_iri):
if base_iri.endswith("/"): return r[len(base_iri) :]
else: return r[len(base_iri) - 1 :]
return r
base_iri = graph._iter_ontology_iri(graph.c)
xmlns = {
"http://www.w3.org/1999/02/22-rdf-syntax-ns#" : "rdf:",
"http://www.w3.org/2001/XMLSchema#" : "xsd:",
"http://www.w3.org/2000/01/rdf-schema#" : "rdfs:",
"http://www.w3.org/2002/07/owl#" : "owl:",
}
if isinstance(base_iri, str):
if base_iri.endswith("/"):
xmlns[base_iri] = ""
else:
xmlns[base_iri[:-1]] = ""
xmlns[base_iri ] = "#"
else:
base_iri = " " # Non-null, non-URL
xmlns_abbbrevs = set(xmlns.values())
@lru_cache(None)
def abbrev(storid):
x = graph._unabbreviate(storid).replace("&", "&")
splitat = max(x.rfind("/"), x.rfind("#"), x.rfind(":"))
if splitat == -1: return x
left = x[:splitat + 1]
xmln = xmlns.get(left)
if not xmln:
splitted = left[:-1].rsplit("/", 1)
if len(splitted) == 2:
xmln0 = left[:-1].rsplit("/", 1)[1][:4].replace("#", "").replace(":", "")
else:
xmln0 = left[:4].replace("#", "").replace(":", "")
if not xmln0[0] in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ": xmln0 = "x_" + xmln0
xmln = xmln0 + ":"
i = 2
while xmln in xmlns_abbbrevs: xmln = "%s%s:" % (xmln0, i) ; i += 1
xmlns[left] = xmln = xmln
xmlns_abbbrevs.add(xmln)
return xmln + x[splitat + 1:]
lines = []
liness = {}
for type in [
"owl:Ontology",
"owl:ObjectProperty",
"owl:DatatypeProperty",
"owl:AnnotationProperty",
"owl:AllDisjointProperties",
"owl:Class",
"owl:AllDisjointClasses",
"owl:NamedIndividual",
"owl:AllDifferent",
"", ]:
liness[type] = l = []
lines.append(l)
bn_2_inner_list = defaultdict(list)
inner_lists_used = set()
tags_with_list = {
"owl:intersectionOf",
"owl:unionOf",
"owl:members",
"owl:distinctMembers",
"owl:oneOf",
}
bad_types = {
"rdf:Description",
"owl:FunctionalProperty",
"owl:InverseFunctionalProperty",
"owl:TransitiveProperty",
"owl:SymmetricProperty",
"owl:ReflexiveProperty",
"owl:IrreflexiveProperty",
"owl:NamedIndividual",
}
def parse_list(bn):
has_literal = False
r = []
while bn and (bn != rdf_nil):
inner_lists_used.add(id(bn_2_inner_list[bn]))
#first = graph._get_obj_triple_sp_o(bn, rdf_first)
first, d = graph._get_triple_sp_od(bn, rdf_first)
if not ((d is None) and (first == rdf_nil)):
if not d is None: has_literal = True
r.append((first, d))
bn = graph._get_obj_triple_sp_o(bn, rdf_rest)
return has_literal, r
def purge():
nonlocal s_lines, current_s, type
if current_s < 0:
l = bn_2_inner_list[current_s]
current_s = 0
else:
#l = liness.get(type) or l = lines[-1]
l = liness.get(type)
if l is None: l = lines[-1]
if s_lines:
if current_s < 0:
l.append("""<%s rdf:nodeID="node%s">""" % (type, -current_s))
elif current_s > 0:
current_s = _unabbreviate(current_s)
l.append("""<%s rdf:about="%s">""" % (type, current_s))
else:
l.append("""<%s>""" % type)
l.extend(s_lines)
s_lines = []
l.append("""</%s>""" % type)
else:
if current_s < 0:
l.append("""<%s rdf:nodeID="node%s"/>""" % (type, -current_s))
elif current_s > 0:
current_s = _unabbreviate(current_s)
l.append("""<%s rdf:about="%s"/>""" % (type, current_s))
else:
l.append("""<%s/>""" % type)
if current_s: l.append("")
type = "rdf:Description"
s_lines = []
current_s = ""
for s,p,o,d in graph._iter_triples(False, True):
if filter and callable(filter) and not filter(graph, s, p, o, d): continue
if s != current_s:
if current_s: purge()
current_s = s
type = "rdf:Description"
if (p == rdf_type) and (type == "rdf:Description") and (not o < 0):
t = abbrev(o)
if not t in bad_types:
type = t
if type.startswith("#"): type = type[1:]
continue
p = abbrev(p)
if p.startswith("#"): p = p[1:]
if not d is None:
if isinstance(o, str): o = o.replace('&', '&').replace('<', '<').replace('>', '>')
if isinstance(d, str) and d.startswith("@"): s_lines.append(""" <%s xml:lang="%s">%s</%s>""" % (p, d[1:], o, p))
elif d: s_lines.append(""" <%s rdf:datatype="%s">%s</%s>""" % (p, _unabbreviate(d), o, p))
else: s_lines.append(""" <%s>%s</%s>""" % (p, o, p))
elif o < 0:
if p in tags_with_list:
list_has_literal, list_elements = parse_list(o)
if list_has_literal:
s_lines.append(""" <%s>""" % p)
def do_i(l, i):
s_lines.append(""" <rdf:Description>""")
o, d = l[i]
if d is None:
s_lines.append(""" <rdf:first><rdf:Description rdf:about="%s"/></rdf:first>""" % o)
else:
if isinstance(d, str) and d.startswith("@"): s_lines.append(""" <rdf:first xml:lang="%s">%s</rdf:first>""" % (d[1:], o))
elif d: s_lines.append(""" <rdf:first rdf:datatype="%s">%s</rdf:first>""" % (_unabbreviate(d), o))
else: s_lines.append(""" <rdf:first>%s</rdf:first>""" % o)
if i < len(l) - 1:
s_lines.append(""" <rdf:rest>""")
do_i(l, i + 1)
s_lines.append(""" </rdf:rest>""")
else:
s_lines.append(""" <rdf:rest rdf:resource="http://www.w3.org/1999/02/22-rdf-syntax-ns#nil"/>""")
s_lines.append(""" </rdf:Description>""")
if list_elements: do_i(list_elements, 0)
else:
s_lines.append(""" <%s rdf:parseType="Collection">""" % p)
for i, d in list_elements:
if i < 0:
l = bn_2_inner_list[i]
inner_lists_used.add(id(l))
s_lines.append(l)
elif isinstance(i, int):
i = _unabbreviate(i)
s_lines.append(""" <rdf:Description rdf:about="%s"/>""" % i)
else:
l = bn_2_inner_list[o]
inner_lists_used.add(id(l))
s_lines.append(""" <%s>""" % p)
s_lines.append(l)
s_lines.append(""" </%s>""" % p)
else:
o = _unabbreviate(o)
s_lines.append(""" <%s rdf:resource="%s"/>""" % (p, o))
purge()
if len(bn_2_inner_list) != len(inner_lists_used):
lines.append([])
for l in bn_2_inner_list.values():
if not id(l) in inner_lists_used:
lines[-1].extend(l)
lines[-1].append("")
decls = []
for iri, abbrev in xmlns.items():
if abbrev == "": decls.append('xml:base="%s"' % iri)
elif abbrev == "#": decls.append('xmlns="%s"' % iri)
else: decls.append('xmlns:%s="%s"' % (abbrev[:-1], iri))
if base_iri.endswith("/"):
decls.append('xmlns="%s"' % base_iri)
def flatten(l):
if not l: return
c = [l]
p = [0]
while c:
v = c[-1][p[-1]]
deep = len(c)
if p[-1] + 1 < len(c[-1]):
p[-1] += 1
else:
del c[-1]
del p[-1]
if isinstance(v, list):
if v:
c.append(v)
p.append(0)
else:
yield "%s%s" % (" " * (deep - 1), v)
f.write(b"""<?xml version="1.0"?>\n""")
f.write(("""<rdf:RDF %s>\n\n""" % "\n ".join(decls)).encode("utf8"))
f.write( """\n""".join(flatten(sum(lines, []))).encode("utf8"))
f.write(b"""\n\n</rdf:RDF>\n""")
| 40.88
| 583
| 0.568582
|
d219d17c660ac000f7d5d6dcef19731b6c61372a
| 1,349
|
py
|
Python
|
mythril/support/loader.py
|
yrashk/mythril
|
0cea8f562726da468ab4761ff1ff3746ab0d747a
|
[
"MIT"
] | null | null | null |
mythril/support/loader.py
|
yrashk/mythril
|
0cea8f562726da468ab4761ff1ff3746ab0d747a
|
[
"MIT"
] | null | null | null |
mythril/support/loader.py
|
yrashk/mythril
|
0cea8f562726da468ab4761ff1ff3746ab0d747a
|
[
"MIT"
] | 1
|
2018-08-27T02:38:05.000Z
|
2018-08-27T02:38:05.000Z
|
from mythril.disassembler.disassembly import Disassembly
import logging
import re
class DynLoader:
def __init__(self, eth):
self.eth = eth
self.storage_cache = {}
def read_storage(self, contract_address, index):
try:
contract_ref = self.storage_cache[contract_address]
data = contract_ref[index]
except KeyError:
self.storage_cache[contract_address] = {}
data = self.eth.eth_getStorageAt(contract_address, position=index, block='latest')
self.storage_cache[contract_address][index] = data
except IndexError:
data = self.eth.eth_getStorageAt(contract_address, position=index, block='latest')
self.storage_cache[contract_address][index] = data
return data
def dynld(self, contract_address, dependency_address):
logging.info("Dynld at contract " + contract_address + ": " + dependency_address)
m = re.match(r'^(0x[0-9a-fA-F]{40})$', dependency_address)
if m:
dependency_address = m.group(1)
else:
return None
logging.info("Dependency address: " + dependency_address)
code = self.eth.eth_getCode(dependency_address)
if code == "0x":
return None
else:
return Disassembly(code)
| 24.981481
| 94
| 0.62639
|
7eeea297bedfd7f471f23c0ce004e83a149e5aea
| 16,724
|
py
|
Python
|
ludwig/modules/metric_modules.py
|
lnxpy/ludwig
|
5efd17f76522df91efbfe761e082b3b256fe7952
|
[
"Apache-2.0"
] | null | null | null |
ludwig/modules/metric_modules.py
|
lnxpy/ludwig
|
5efd17f76522df91efbfe761e082b3b256fe7952
|
[
"Apache-2.0"
] | null | null | null |
ludwig/modules/metric_modules.py
|
lnxpy/ludwig
|
5efd17f76522df91efbfe761e082b3b256fe7952
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.metrics import \
MeanAbsoluteError as MeanAbsoluteErrorMetric
from tensorflow.python.keras.metrics import \
MeanSquaredError as MeanSquaredErrorMetric
from ludwig.constants import *
from ludwig.constants import PREDICTIONS
from ludwig.modules.loss_modules import (BWCEWLoss, SequenceLoss,
SigmoidCrossEntropyLoss,
SoftmaxCrossEntropyLoss)
from ludwig.utils.tf_utils import sequence_length_2D, to_sparse
metrics = {ACCURACY, TOKEN_ACCURACY, HITS_AT_K, R2, JACCARD, EDIT_DISTANCE,
MEAN_SQUARED_ERROR, MEAN_ABSOLUTE_ERROR,
PERPLEXITY}
max_metrics = {ACCURACY, TOKEN_ACCURACY, HITS_AT_K, R2, JACCARD}
min_metrics = {EDIT_DISTANCE, MEAN_SQUARED_ERROR, MEAN_ABSOLUTE_ERROR, LOSS,
PERPLEXITY}
class R2Score(tf.keras.metrics.Metric):
def __init__(self, name='r2_score'):
super(R2Score, self).__init__(name=name)
self.sum_y = self.add_weight(
'sum_y', initializer='zeros',
dtype=tf.float32
)
self.sum_y_squared = self.add_weight(
'sum_y_squared', initializer='zeros',
dtype=tf.float32
)
self.sum_y_hat = self.add_weight(
'sum_y_hat', initializer='zeros',
dtype=tf.float32
)
self.sum_y_hat_squared = self.add_weight(
'sum_y_hat_squared', initializer='zeros',
dtype=tf.float32
)
self.sum_y_hat = self.add_weight(
'sum_y_y_hat', initializer='zeros',
dtype=tf.float32
)
self.sum_y_y_hat = self.add_weight(
'sum_y_y_hat', initializer='zeros',
dtype=tf.float32
)
self.N = self.add_weight(
'N', initializer='zeros',
dtype=tf.float32
)
def update_state(self, y, y_hat):
y = tf.cast(y, dtype=tf.float32)
y_hat = tf.cast(y_hat, dtype=tf.float32)
self.sum_y.assign_add(tf.reduce_sum(y))
self.sum_y_squared.assign_add(tf.reduce_sum(y ** 2))
self.sum_y_hat.assign_add(tf.reduce_sum(y_hat))
self.sum_y_hat_squared.assign_add(tf.reduce_sum(y_hat ** 2))
self.sum_y_y_hat.assign_add(tf.reduce_sum(y * y_hat))
self.N.assign_add(y.shape[0])
def result(self):
y_bar = self.sum_y / self.N
tot_ss = self.sum_y_squared - 2.0 * y_bar * self.sum_y \
+ self.N * y_bar ** 2
res_ss = self.sum_y_squared - 2.0 * self.sum_y_y_hat \
+ self.sum_y_hat_squared
return 1.0 - res_ss / tot_ss
class ErrorScore(tf.keras.metrics.Metric):
def __init__(self, name='error_score'):
super(ErrorScore, self).__init__(name=name)
self.sum_error = self.add_weight(
'sum_error', initializer='zeros',
dtype=tf.float32
)
self.N = self.add_weight(
'N', initializer='zeros',
dtype=tf.float32
)
def update_state(self, y, y_hat):
y = tf.cast(y, tf.float32)
y_hat = tf.cast(y_hat, tf.float32)
self.sum_error.assign_add(tf.reduce_sum(y - y_hat))
self.N.assign_add(y.shape[0])
def result(self):
return self.sum_error / self.N
class BWCEWLMetric(tf.keras.metrics.Metric):
# Binary Weighted Cross Entropy Weighted Logits Score Metric
# See for additional info:
# https://www.tensorflow.org/api_docs/python/tf/keras/metrics/Metric
def __init__(
self,
positive_class_weight=1,
robust_lambda=0,
confidence_penalty=0,
name='binary_cross_entropy_weighted_loss_metric'
):
super(BWCEWLMetric, self).__init__(name=name)
self.bwcew_loss_function = BWCEWLoss(
positive_class_weight=positive_class_weight,
robust_lambda=robust_lambda,
confidence_penalty=confidence_penalty
)
self.sum_loss = self.add_weight(
'sum_loss', initializer='zeros',
dtype=tf.float32
)
self.N = self.add_weight(
'N', initializer='zeros',
dtype=tf.float32
)
def update_state(self, y, y_hat):
loss = self.bwcew_loss_function(y, y_hat)
self.sum_loss.assign_add(loss)
self.N.assign_add(1)
def result(self):
return self.sum_loss / self.N
class SoftmaxCrossEntropyMetric(tf.keras.metrics.Mean):
def __init__(
self,
num_classes=0,
feature_loss=None,
name='softmax_cross_entropy_metric'
):
super(SoftmaxCrossEntropyMetric, self).__init__(name=name)
self.softmax_cross_entropy_function = SoftmaxCrossEntropyLoss(
num_classes=num_classes,
feature_loss=feature_loss
)
def update_state(self, y, y_hat):
super().update_state(self.softmax_cross_entropy_function(y, y_hat))
class SigmoidCrossEntropyMetric(tf.keras.metrics.Mean):
def __init__(
self,
feature_loss=None,
name='sigmoid_cross_entropy_metric'
):
super(SigmoidCrossEntropyMetric, self).__init__(name=name)
self.sigmoid_cross_entropy_function = SigmoidCrossEntropyLoss()
def update_state(self, y, y_hat):
super().update_state(self.sigmoid_cross_entropy_function(y, y_hat))
class SequenceLossMetric(tf.keras.metrics.Mean):
def __init__(self, name=None):
super(SequenceLossMetric, self).__init__(name=name)
self.loss_function = SequenceLoss(from_logits=False)
def update_state(self, y, y_hat):
loss = self.loss_function(y, y_hat)
super().update_state(loss)
class SequenceLastAccuracyMetric(tf.keras.metrics.Accuracy):
"""
Sequence accuracy based on last token in the sequence
"""
def __init__(self, name=None):
super(SequenceLastAccuracyMetric, self).__init__(name=name)
def update_state(self, y_true, y_pred, sample_weight=None):
y_true = tf.cast(y_true, dtype=tf.int64)
targets_sequence_length = sequence_length_2D(y_true)
last_targets = tf.gather_nd(
y_true,
tf.stack(
[tf.range(tf.shape(y_true)[0]),
tf.maximum(
targets_sequence_length - 1,
0
)],
axis=1
)
)
super().update_state(last_targets, y_pred, sample_weight=sample_weight)
class PerplexityMetric(tf.keras.metrics.Mean):
def __init__(self, name=None):
super(PerplexityMetric, self).__init__(name=name)
self.loss_function = SequenceLoss(from_logits=False)
def update_state(self, y_true, y_pred, sample_weight=None):
loss = self.loss_function(y_true, y_pred)
super().update_state(loss)
def result(self):
mean = super().result()
return np.exp(mean)
class EditDistanceMetric(tf.keras.metrics.Mean):
def __init__(self, name=None):
super(EditDistanceMetric, self).__init__(name=name)
def update_state(self, y_true, y_pred):
# y_true: shape [batch_size, sequence_size]
# y_pred: shape [batch_size, sequence_size]
prediction_dtype = y_pred.dtype
prediction_sequence_length = sequence_length_2D(y_pred)
y_true_tensor = tf.cast(y_true, dtype=prediction_dtype)
target_sequence_length = sequence_length_2D(y_true_tensor)
edit_distance_val, _ = edit_distance(
y_true_tensor,
target_sequence_length,
y_pred,
prediction_sequence_length
)
super().update_state(edit_distance_val)
class TokenAccuracyMetric(tf.keras.metrics.Mean):
def __init__(self, name=None):
super(TokenAccuracyMetric, self).__init__(name=name)
def update_state(self, y_true, y_pred):
# y_true: shape [batch_size, sequence_size]
# y_pred: shape [batch_size, sequence_size]
prediction_dtype = y_pred.dtype
y_true_tensor = tf.cast(y_true, dtype=prediction_dtype)
target_sequence_length = sequence_length_2D(y_true_tensor)
masked_corrected_preds = masked_corrected_predictions(
y_true_tensor,
y_pred,
target_sequence_length
)
super().update_state(masked_corrected_preds)
class SequenceAccuracyMetric(tf.keras.metrics.Mean):
def __init__(self, name=None):
super(SequenceAccuracyMetric, self).__init__(name=name)
def update_state(self, y_true, y_pred):
# y_true: shape [batch_size, sequence_size]
# y_pred: shape [batch_size, sequence_size]
prediction_dtype = y_pred.dtype
y_true_tensor = tf.cast(y_true, dtype=prediction_dtype)
target_sequence_length = sequence_length_2D(y_true_tensor)
masked_sequence_corrected_preds = \
masked_sequence_corrected_predictions(
y_true_tensor, y_pred, target_sequence_length
)
super().update_state(masked_sequence_corrected_preds)
class CategoryAccuracy(tf.keras.metrics.Accuracy):
def __init__(self, name=None):
super(CategoryAccuracy, self).__init__(name=name)
def update_state(self, y_true, y_pred, sample_weight=None):
# make sure y_true is tf.int64
super().update_state(
tf.cast(y_true, dtype=tf.int64),
y_pred,
sample_weight=sample_weight
)
class HitsAtKMetric(tf.keras.metrics.SparseTopKCategoricalAccuracy):
def __init__(self, k=3, name=None):
super(HitsAtKMetric, self).__init__(k=k, name=name)
def update_state(self, y_true, y_pred, sample_weight=None):
super().update_state(
y_true,
y_pred[LOGITS],
sample_weight=sample_weight
)
class MAEMetric(MeanAbsoluteErrorMetric):
def __init__(self, **kwargs):
super(MAEMetric, self).__init__(**kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
super().update_state(
y_true, y_pred[PREDICTIONS], sample_weight=sample_weight
)
class MSEMetric(MeanSquaredErrorMetric):
def __init__(self, **kwargs):
super(MSEMetric, self).__init__(**kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
super().update_state(
y_true, y_pred[PREDICTIONS], sample_weight=sample_weight
)
def get_improved_fun(metric):
if metric in min_metrics:
return lambda x, y: x < y
else:
return lambda x, y: x > y
def get_initial_validation_value(metric):
if metric in min_metrics:
return float('inf')
else:
return float('-inf')
def get_best_function(metric):
if metric in min_metrics:
return min
else:
return max
def accuracy(targets, predictions, output_feature_name):
correct_predictions = tf.equal(predictions, targets,
name='correct_predictions_{}'.format(
output_feature_name))
accuracy = tf.reduce_mean(
tf.cast(correct_predictions, tf.float32),
name='accuracy_{}'.format(output_feature_name))
return accuracy, correct_predictions
def masked_corrected_predictions(
targets,
predictions,
targets_sequence_lengths
):
truncated_preds = predictions[:, :targets.shape[1]]
paddings = tf.stack([
[0, 0],
[0, tf.shape(targets)[1] - tf.shape(truncated_preds)[1]]
])
padded_truncated_preds = tf.pad(truncated_preds, paddings, name='ptp')
correct_preds = tf.equal(padded_truncated_preds, targets)
mask = tf.sequence_mask(targets_sequence_lengths,
maxlen=correct_preds.shape[1],
dtype=tf.int32)
_, masked_correct_preds = tf.dynamic_partition(correct_preds, mask, 2)
masked_correct_preds = tf.cast(masked_correct_preds, dtype=tf.float32)
return masked_correct_preds
def masked_sequence_corrected_predictions(
targets,
predictions,
targets_sequence_lengths
):
truncated_preds = predictions[:, :targets.shape[1]]
paddings = tf.stack([
[0, 0],
[0, tf.shape(targets)[1] - tf.shape(truncated_preds)[1]]
])
padded_truncated_preds = tf.pad(truncated_preds,
paddings,
name='ptp')
correct_preds = tf.equal(padded_truncated_preds, targets)
mask = tf.sequence_mask(targets_sequence_lengths,
maxlen=correct_preds.shape[1],
dtype=tf.int32)
one_masked_correct_prediction = \
1.0 - tf.cast(mask, tf.float32) + (
tf.cast(mask, tf.float32) * tf.cast(correct_preds,
tf.float32)
)
sequence_correct_preds = tf.reduce_prod(
one_masked_correct_prediction, axis=-1
)
return sequence_correct_preds
def hits_at_k(targets, predictions_logits, top_k, output_feature_name):
with tf.device('/cpu:0'):
hits_at_k = tf.nn.in_top_k(predictions_logits, targets, top_k,
name='hits_at_k_{}'.format(
output_feature_name))
mean_hits_at_k = tf.reduce_mean(tf.cast(hits_at_k, tf.float32),
name='mean_hits_at_k_{}'.format(
output_feature_name))
return hits_at_k, mean_hits_at_k
def edit_distance(targets, target_seq_length, predictions_sequence,
predictions_seq_length):
predicts = to_sparse(predictions_sequence,
predictions_seq_length,
tf.shape(predictions_sequence)[1])
labels = to_sparse(targets,
target_seq_length,
tf.shape(targets)[1])
edit_distance = tf.edit_distance(predicts, labels)
mean_edit_distance = tf.reduce_mean(edit_distance)
return edit_distance, mean_edit_distance
def perplexity(cross_entropy_loss):
# This seem weird but is correct:
# we are returning the cross entropy loss as it will be later summed,
# divided by the size of the dataset and finally exponentiated,
# because perplexity has a avg_exp aggregation strategy
# in the output config in SequenceOutputFeature.
# This implies that in Model update_output_stats_batch()
# the values read from the perplexity node will be summed
# and in Model update_output_stats() they will be divided
# by the set size first and exponentiated.
return cross_entropy_loss
def error(targets, predictions, output_feature_name):
# return tf.get_variable('error_{}'.format(output_feature_name), initializer=tf.subtract(targets, predictions))
return tf.subtract(targets, predictions,
name='error_{}'.format(output_feature_name))
def absolute_error(targets, predictions, output_feature_name):
# error = tf.get_variable('error_{}'.format(output_feature_name), initializer=tf.subtract(targets, predictions))
error = tf.subtract(targets, predictions)
return tf.abs(error, name='absolute_error_{}'.format(output_feature_name))
def squared_error(targets, predictions, output_feature_name):
# error = tf.get_variable('error_{}'.format(output_feature_name), initializer=tf.subtract(targets, predictions))
error = tf.subtract(targets, predictions)
return tf.pow(error, 2,
name='squared_error_{}'.format(output_feature_name))
def r2(targets, predictions, output_feature_name):
y_bar = tf.reduce_mean(targets)
tot_ss = tf.reduce_sum(tf.pow(targets - y_bar, 2))
res_ss = tf.reduce_sum(tf.pow(targets - predictions, 2))
r2 = tf.subtract(1., res_ss / tot_ss,
name='r2_{}'.format(output_feature_name))
return r2
| 34.340862
| 116
| 0.642251
|
bc8dec915b2a0f836e501455704016f4b1e4eff1
| 14,197
|
py
|
Python
|
research/differential_privacy/multiple_teachers/input.py
|
daquexian/models
|
fad6075359b852b9c0a4c6f1b068790d44a6441a
|
[
"Apache-2.0"
] | 310
|
2017-01-23T15:04:12.000Z
|
2022-01-11T05:50:41.000Z
|
research/differential_privacy/multiple_teachers/input.py
|
wzy1510300a28/models
|
42a3da72313b8814ef0ced8f425af90b57313b9f
|
[
"Apache-2.0"
] | 12
|
2017-05-23T17:42:59.000Z
|
2020-01-10T05:16:22.000Z
|
research/differential_privacy/multiple_teachers/input.py
|
wzy1510300a28/models
|
42a3da72313b8814ef0ced8f425af90b57313b9f
|
[
"Apache-2.0"
] | 73
|
2017-01-24T13:57:45.000Z
|
2021-09-29T18:52:57.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cPickle
import gzip
import math
import numpy as np
import os
from scipy.io import loadmat as loadmat
from six.moves import urllib
import sys
import tarfile
import tensorflow as tf
FLAGS = tf.flags.FLAGS
def create_dir_if_needed(dest_directory):
"""
Create directory if doesn't exist
:param dest_directory:
:return: True if everything went well
"""
if not tf.gfile.IsDirectory(dest_directory):
tf.gfile.MakeDirs(dest_directory)
return True
def maybe_download(file_urls, directory):
"""
Download a set of files in temporary local folder
:param directory: the directory where to download
:return: a tuple of filepaths corresponding to the files given as input
"""
# Create directory if doesn't exist
assert create_dir_if_needed(directory)
# This list will include all URLS of the local copy of downloaded files
result = []
# For each file of the dataset
for file_url in file_urls:
# Extract filename
filename = file_url.split('/')[-1]
# If downloading from GitHub, remove suffix ?raw=True from local filename
if filename.endswith("?raw=true"):
filename = filename[:-9]
# Deduce local file url
#filepath = os.path.join(directory, filename)
filepath = directory + '/' + filename
# Add to result list
result.append(filepath)
# Test if file already exists
if not tf.gfile.Exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(file_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
return result
def image_whitening(data):
"""
Subtracts mean of image and divides by adjusted standard variance (for
stability). Operations are per image but performed for the entire array.
:param image: 4D array (ID, Height, Weight, Channel)
:return: 4D array (ID, Height, Weight, Channel)
"""
assert len(np.shape(data)) == 4
# Compute number of pixels in image
nb_pixels = np.shape(data)[1] * np.shape(data)[2] * np.shape(data)[3]
# Subtract mean
mean = np.mean(data, axis=(1,2,3))
ones = np.ones(np.shape(data)[1:4], dtype=np.float32)
for i in xrange(len(data)):
data[i, :, :, :] -= mean[i] * ones
# Compute adjusted standard variance
adj_std_var = np.maximum(np.ones(len(data), dtype=np.float32) / math.sqrt(nb_pixels), np.std(data, axis=(1,2,3))) #NOLINT(long-line)
# Divide image
for i in xrange(len(data)):
data[i, :, :, :] = data[i, :, :, :] / adj_std_var[i]
print(np.shape(data))
return data
def extract_svhn(local_url):
"""
Extract a MATLAB matrix into two numpy arrays with data and labels
:param local_url:
:return:
"""
with tf.gfile.Open(local_url, mode='r') as file_obj:
# Load MATLAB matrix using scipy IO
dict = loadmat(file_obj)
# Extract each dictionary (one for data, one for labels)
data, labels = dict["X"], dict["y"]
# Set np type
data = np.asarray(data, dtype=np.float32)
labels = np.asarray(labels, dtype=np.int32)
# Transpose data to match TF model input format
data = data.transpose(3, 0, 1, 2)
# Fix the SVHN labels which label 0s as 10s
labels[labels == 10] = 0
# Fix label dimensions
labels = labels.reshape(len(labels))
return data, labels
def unpickle_cifar_dic(file):
"""
Helper function: unpickles a dictionary (used for loading CIFAR)
:param file: filename of the pickle
:return: tuple of (images, labels)
"""
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict['data'], dict['labels']
def extract_cifar10(local_url, data_dir):
"""
Extracts the CIFAR-10 dataset and return numpy arrays with the different sets
:param local_url: where the tar.gz archive is located locally
:param data_dir: where to extract the archive's file
:return: a tuple (train data, train labels, test data, test labels)
"""
# These numpy dumps can be reloaded to avoid performing the pre-processing
# if they exist in the working directory.
# Changing the order of this list will ruin the indices below.
preprocessed_files = ['/cifar10_train.npy',
'/cifar10_train_labels.npy',
'/cifar10_test.npy',
'/cifar10_test_labels.npy']
all_preprocessed = True
for file in preprocessed_files:
if not tf.gfile.Exists(data_dir + file):
all_preprocessed = False
break
if all_preprocessed:
# Reload pre-processed training data from numpy dumps
with tf.gfile.Open(data_dir + preprocessed_files[0], mode='r') as file_obj:
train_data = np.load(file_obj)
with tf.gfile.Open(data_dir + preprocessed_files[1], mode='r') as file_obj:
train_labels = np.load(file_obj)
# Reload pre-processed testing data from numpy dumps
with tf.gfile.Open(data_dir + preprocessed_files[2], mode='r') as file_obj:
test_data = np.load(file_obj)
with tf.gfile.Open(data_dir + preprocessed_files[3], mode='r') as file_obj:
test_labels = np.load(file_obj)
else:
# Do everything from scratch
# Define lists of all files we should extract
train_files = ["data_batch_" + str(i) for i in xrange(1,6)]
test_file = ["test_batch"]
cifar10_files = train_files + test_file
# Check if all files have already been extracted
need_to_unpack = False
for file in cifar10_files:
if not tf.gfile.Exists(file):
need_to_unpack = True
break
# We have to unpack the archive
if need_to_unpack:
tarfile.open(local_url, 'r:gz').extractall(data_dir)
# Load training images and labels
images = []
labels = []
for file in train_files:
# Construct filename
filename = data_dir + "/cifar-10-batches-py/" + file
# Unpickle dictionary and extract images and labels
images_tmp, labels_tmp = unpickle_cifar_dic(filename)
# Append to lists
images.append(images_tmp)
labels.append(labels_tmp)
# Convert to numpy arrays and reshape in the expected format
train_data = np.asarray(images, dtype=np.float32).reshape((50000,3,32,32))
train_data = np.swapaxes(train_data, 1, 3)
train_labels = np.asarray(labels, dtype=np.int32).reshape(50000)
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[0], train_data)
np.save(data_dir + preprocessed_files[1], train_labels)
# Construct filename for test file
filename = data_dir + "/cifar-10-batches-py/" + test_file[0]
# Load test images and labels
test_data, test_images = unpickle_cifar_dic(filename)
# Convert to numpy arrays and reshape in the expected format
test_data = np.asarray(test_data,dtype=np.float32).reshape((10000,3,32,32))
test_data = np.swapaxes(test_data, 1, 3)
test_labels = np.asarray(test_images, dtype=np.int32).reshape(10000)
# Save so we don't have to do this again
np.save(data_dir + preprocessed_files[2], test_data)
np.save(data_dir + preprocessed_files[3], test_labels)
return train_data, train_labels, test_data, test_labels
def extract_mnist_data(filename, num_images, image_size, pixel_depth):
"""
Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
# if not os.path.exists(file):
if not tf.gfile.Exists(filename+".npy"):
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(image_size * image_size * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (pixel_depth / 2.0)) / pixel_depth
data = data.reshape(num_images, image_size, image_size, 1)
np.save(filename, data)
return data
else:
with tf.gfile.Open(filename+".npy", mode='r') as file_obj:
return np.load(file_obj)
def extract_mnist_labels(filename, num_images):
"""
Extract the labels into a vector of int64 label IDs.
"""
# if not os.path.exists(file):
if not tf.gfile.Exists(filename+".npy"):
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int32)
np.save(filename, labels)
return labels
else:
with tf.gfile.Open(filename+".npy", mode='r') as file_obj:
return np.load(file_obj)
def ld_svhn(extended=False, test_only=False):
"""
Load the original SVHN data
:param extended: include extended training data in the returned array
:param test_only: disables loading of both train and extra -> large speed up
:return: tuple of arrays which depend on the parameters
"""
# Define files to be downloaded
# WARNING: changing the order of this list will break indices (cf. below)
file_urls = ['http://ufldl.stanford.edu/housenumbers/train_32x32.mat',
'http://ufldl.stanford.edu/housenumbers/test_32x32.mat',
'http://ufldl.stanford.edu/housenumbers/extra_32x32.mat']
# Maybe download data and retrieve local storage urls
local_urls = maybe_download(file_urls, FLAGS.data_dir)
# Extra Train, Test, and Extended Train data
if not test_only:
# Load and applying whitening to train data
train_data, train_labels = extract_svhn(local_urls[0])
train_data = image_whitening(train_data)
# Load and applying whitening to extended train data
ext_data, ext_labels = extract_svhn(local_urls[2])
ext_data = image_whitening(ext_data)
# Load and applying whitening to test data
test_data, test_labels = extract_svhn(local_urls[1])
test_data = image_whitening(test_data)
if test_only:
return test_data, test_labels
else:
if extended:
# Stack train data with the extended training data
train_data = np.vstack((train_data, ext_data))
train_labels = np.hstack((train_labels, ext_labels))
return train_data, train_labels, test_data, test_labels
else:
# Return training and extended training data separately
return train_data,train_labels, test_data,test_labels, ext_data,ext_labels
def ld_cifar10(test_only=False):
"""
Load the original CIFAR10 data
:param extended: include extended training data in the returned array
:param test_only: disables loading of both train and extra -> large speed up
:return: tuple of arrays which depend on the parameters
"""
# Define files to be downloaded
file_urls = ['https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz']
# Maybe download data and retrieve local storage urls
local_urls = maybe_download(file_urls, FLAGS.data_dir)
# Extract archives and return different sets
dataset = extract_cifar10(local_urls[0], FLAGS.data_dir)
# Unpack tuple
train_data, train_labels, test_data, test_labels = dataset
# Apply whitening to input data
train_data = image_whitening(train_data)
test_data = image_whitening(test_data)
if test_only:
return test_data, test_labels
else:
return train_data, train_labels, test_data, test_labels
def ld_mnist(test_only=False):
"""
Load the MNIST dataset
:param extended: include extended training data in the returned array
:param test_only: disables loading of both train and extra -> large speed up
:return: tuple of arrays which depend on the parameters
"""
# Define files to be downloaded
# WARNING: changing the order of this list will break indices (cf. below)
file_urls = ['http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
]
# Maybe download data and retrieve local storage urls
local_urls = maybe_download(file_urls, FLAGS.data_dir)
# Extract it into np arrays.
train_data = extract_mnist_data(local_urls[0], 60000, 28, 1)
train_labels = extract_mnist_labels(local_urls[1], 60000)
test_data = extract_mnist_data(local_urls[2], 10000, 28, 1)
test_labels = extract_mnist_labels(local_urls[3], 10000)
if test_only:
return test_data, test_labels
else:
return train_data, train_labels, test_data, test_labels
def partition_dataset(data, labels, nb_teachers, teacher_id):
"""
Simple partitioning algorithm that returns the right portion of the data
needed by a given teacher out of a certain nb of teachers
:param data: input data to be partitioned
:param labels: output data to be partitioned
:param nb_teachers: number of teachers in the ensemble (affects size of each
partition)
:param teacher_id: id of partition to retrieve
:return:
"""
# Sanity check
assert len(data) == len(labels)
assert int(teacher_id) < int(nb_teachers)
# This will floor the possible number of batches
batch_len = int(len(data) / nb_teachers)
# Compute start, end indices of partition
start = teacher_id * batch_len
end = (teacher_id+1) * batch_len
# Slice partition off
partition_data = data[start:end]
partition_labels = labels[start:end]
return partition_data, partition_labels
| 33.483491
| 134
| 0.700007
|
7fc6f3d4b636c25ca81e89df2af542051c764def
| 54,986
|
py
|
Python
|
rapid7vmconsole/models/submission.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
rapid7vmconsole/models/submission.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
rapid7vmconsole/models/submission.py
|
pdeardorff-r7/vm-console-client-python
|
4bee83aa4db2b328ba6894cebac55743f922ce5a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is like` ` not like` | | `container-status` | `is` ` is not` | | `containers` | `are` | | `criticality-tag` | `is` ` is not` ` is greater than` ` is less than` ` is applied` ` is not applied` | | `custom-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `cve` | `is` ` is not` ` contains` ` does not contain` | | `cvss-access-complexity` | `is` ` is not` | | `cvss-authentication-required` | `is` ` is not` | | `cvss-access-vector` | `is` ` is not` | | `cvss-availability-impact` | `is` ` is not` | | `cvss-confidentiality-impact` | `is` ` is not` | | `cvss-integrity-impact` | `is` ` is not` | | `cvss-v3-confidentiality-impact` | `is` ` is not` | | `cvss-v3-integrity-impact` | `is` ` is not` | | `cvss-v3-availability-impact` | `is` ` is not` | | `cvss-v3-attack-vector` | `is` ` is not` | | `cvss-v3-attack-complexity` | `is` ` is not` | | `cvss-v3-user-interaction` | `is` ` is not` | | `cvss-v3-privileges-required` | `is` ` is not` | | `host-name` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is empty` ` is not empty` ` is like` ` not like` | | `host-type` | `in` ` not in` | | `ip-address` | `is` ` is not` ` in range` ` not in range` ` is like` ` not like` | | `ip-address-type` | `in` ` not in` | | `last-scan-date` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `location-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `mobile-device-last-sync-time` | `is-within-the-last` ` is earlier than` | | `open-ports` | `is` ` is not` ` in range` | | `operating-system` | `contains` ` does not contain` ` is empty` ` is not empty` | | `owner-tag` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` ` is applied` ` is not applied` | | `pci-compliance` | `is` | | `risk-score` | `is` ` is not` ` in range` ` greater than` ` less than` | | `service-name` | `contains` ` does not contain` | | `site-id` | `in` ` not in` | | `software` | `contains` ` does not contain` | | `vAsset-cluster` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-datacenter` | `is` ` is not` | | `vAsset-host-name` | `is` ` is not` ` contains` ` does not contain` ` starts with` | | `vAsset-power-state` | `in` ` not in` | | `vAsset-resource-pool-path` | `contains` ` does not contain` | | `vulnerability-assessed` | `is-on-or-before` ` is on or after` ` is between` ` is earlier than` ` is within the last` | | `vulnerability-category` | `is` ` is not` ` starts with` ` ends with` ` contains` ` does not contain` | | `vulnerability-cvss-v3-score` | `is` ` is not` | | `vulnerability-cvss-score` | `is` ` is not` ` in range` ` is greater than` ` is less than` | | `vulnerability-exposures` | `includes` ` does not include` | | `vulnerability-title` | `contains` ` does not contain` ` is` ` is not` ` starts with` ` ends with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|-----------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `numeric` | `numeric` | | `is-earlier-than` | `numeric` | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: support@rapid7.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from rapid7vmconsole.models.link import Link # noqa: F401,E501
class Submission(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'comment': 'str',
'date': 'str',
'links': 'list[Link]',
'name': 'str',
'reason': 'str',
'user': 'int'
}
attribute_map = {
'comment': 'comment',
'date': 'date',
'links': 'links',
'name': 'name',
'reason': 'reason',
'user': 'user'
}
def __init__(self, comment=None, date=None, links=None, name=None, reason=None, user=None): # noqa: E501
"""Submission - a model defined in Swagger""" # noqa: E501
self._comment = None
self._date = None
self._links = None
self._name = None
self._reason = None
self._user = None
self.discriminator = None
if comment is not None:
self.comment = comment
if date is not None:
self.date = date
if links is not None:
self.links = links
if name is not None:
self.name = name
if reason is not None:
self.reason = reason
if user is not None:
self.user = user
@property
def comment(self):
"""Gets the comment of this Submission. # noqa: E501
A comment from the submitter as to why the exception was submitted. # noqa: E501
:return: The comment of this Submission. # noqa: E501
:rtype: str
"""
return self._comment
@comment.setter
def comment(self, comment):
"""Sets the comment of this Submission.
A comment from the submitter as to why the exception was submitted. # noqa: E501
:param comment: The comment of this Submission. # noqa: E501
:type: str
"""
self._comment = comment
@property
def date(self):
"""Gets the date of this Submission. # noqa: E501
The date and time the vulnerability exception was submitted. # noqa: E501
:return: The date of this Submission. # noqa: E501
:rtype: str
"""
return self._date
@date.setter
def date(self, date):
"""Sets the date of this Submission.
The date and time the vulnerability exception was submitted. # noqa: E501
:param date: The date of this Submission. # noqa: E501
:type: str
"""
self._date = date
@property
def links(self):
"""Gets the links of this Submission. # noqa: E501
:return: The links of this Submission. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this Submission.
:param links: The links of this Submission. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def name(self):
"""Gets the name of this Submission. # noqa: E501
The login name of the user that submitted the vulnerability exception. # noqa: E501
:return: The name of this Submission. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Submission.
The login name of the user that submitted the vulnerability exception. # noqa: E501
:param name: The name of this Submission. # noqa: E501
:type: str
"""
self._name = name
@property
def reason(self):
"""Gets the reason of this Submission. # noqa: E501
The reason the vulnerability exception was submitted. One of: `\"False Positive\"`, `\"Compensating Control\"`, `\"Acceptable Use\"`, `\"Acceptable Risk\"`, `\"Other\"` # noqa: E501
:return: The reason of this Submission. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this Submission.
The reason the vulnerability exception was submitted. One of: `\"False Positive\"`, `\"Compensating Control\"`, `\"Acceptable Use\"`, `\"Acceptable Risk\"`, `\"Other\"` # noqa: E501
:param reason: The reason of this Submission. # noqa: E501
:type: str
"""
self._reason = reason
@property
def user(self):
"""Gets the user of this Submission. # noqa: E501
The identifier of the user that submitted the vulnerability exception. # noqa: E501
:return: The user of this Submission. # noqa: E501
:rtype: int
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this Submission.
The identifier of the user that submitted the vulnerability exception. # noqa: E501
:param user: The user of this Submission. # noqa: E501
:type: int
"""
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Submission):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 215.631373
| 48,045
| 0.495872
|
4df505d10a1bb2fe4bb5d026260be26a9766ad29
| 8,544
|
py
|
Python
|
raiden/tests/unit/test_wal.py
|
ezdac/raiden
|
d7504996e6738b55d5a9dcf9a36ef66797f6f326
|
[
"MIT"
] | null | null | null |
raiden/tests/unit/test_wal.py
|
ezdac/raiden
|
d7504996e6738b55d5a9dcf9a36ef66797f6f326
|
[
"MIT"
] | 60
|
2021-06-14T08:32:39.000Z
|
2022-03-31T23:45:35.000Z
|
raiden/tests/unit/test_wal.py
|
ezdac/raiden
|
d7504996e6738b55d5a9dcf9a36ef66797f6f326
|
[
"MIT"
] | null | null | null |
import os
import random
import sqlite3
from dataclasses import dataclass, field
from datetime import datetime
import pytest
import ulid
from raiden.constants import RAIDEN_DB_VERSION
from raiden.exceptions import InvalidDBData
from raiden.storage.serialization import JSONSerializer
from raiden.storage.sqlite import (
HIGH_STATECHANGE_ULID,
LOW_STATECHANGE_ULID,
RANGE_ALL_STATE_CHANGES,
SerializedSQLiteStorage,
StateChangeID,
)
from raiden.storage.utils import TimestampedEvent
from raiden.storage.wal import WriteAheadLog, restore_state
from raiden.tests.utils.factories import (
make_address,
make_block_hash,
make_canonical_identifier,
make_locksroot,
make_token_network_registry_address,
make_transaction_hash,
)
from raiden.transfer.architecture import State, StateChange, TransitionResult
from raiden.transfer.events import EventPaymentSentFailed
from raiden.transfer.state_change import Block, ContractReceiveChannelBatchUnlock
from raiden.utils.typing import BlockGasLimit, BlockNumber, Callable, List, TokenAmount
class Empty(State):
pass
def state_transition_noop(state, state_change): # pylint: disable=unused-argument
return TransitionResult(Empty(), list())
@dataclass
class AccState(State):
state_changes: List[Block] = field(default_factory=list)
def state_transtion_acc(state, state_change):
state = state
state.state_changes.append(state_change)
return TransitionResult(state, list())
def new_wal(state_transition: Callable, state: State = None) -> WriteAheadLog:
serializer = JSONSerializer()
state = state or Empty()
storage = SerializedSQLiteStorage(":memory:", serializer)
storage.write_first_state_snapshot(state)
return WriteAheadLog(state, storage, state_transition)
def dispatch(wal: WriteAheadLog, state_changes: List[StateChange]):
with wal.process_state_change_atomically() as dispatcher:
for state_change in state_changes:
dispatcher.dispatch(state_change)
def test_initial_state_snapshotting():
serializer = JSONSerializer()
state = Empty()
storage = SerializedSQLiteStorage(":memory:", serializer)
assert not storage.database.has_snapshot()
assert not storage.get_snapshot_before_state_change(LOW_STATECHANGE_ULID)
storage.write_first_state_snapshot(state)
assert storage.database.has_snapshot()
assert storage.get_snapshot_before_state_change(LOW_STATECHANGE_ULID)
def test_connect_to_corrupt_db(tmpdir):
serializer = JSONSerializer
dbpath = os.path.join(tmpdir, "log.db")
with open(dbpath, "wb") as f:
f.write(os.urandom(256))
with pytest.raises(InvalidDBData):
SerializedSQLiteStorage(dbpath, serializer)
def test_wal_has_version():
wal = new_wal(state_transition_noop)
assert wal.version == RAIDEN_DB_VERSION
# Let's make sure that nobody makes a setter for this attribute
with pytest.raises(AttributeError):
wal.version = 5
def test_write_read_log() -> None:
wal = new_wal(state_transition_noop)
block_number = BlockNumber(1337)
block_hash = make_block_hash()
block = Block(block_number=block_number, gas_limit=BlockGasLimit(1), block_hash=block_hash)
unlocked_amount = TokenAmount(10)
returned_amount = TokenAmount(5)
participant = make_address()
partner = make_address()
locksroot = make_locksroot()
contract_receive_unlock = ContractReceiveChannelBatchUnlock(
transaction_hash=make_transaction_hash(),
canonical_identifier=make_canonical_identifier(token_network_address=make_address()),
receiver=participant,
sender=partner,
locksroot=locksroot,
unlocked_amount=unlocked_amount,
returned_tokens=returned_amount,
block_number=block_number,
block_hash=block_hash,
)
state_changes1 = wal.storage.get_statechanges_by_range(RANGE_ALL_STATE_CHANGES)
count1 = len(state_changes1)
dispatch(wal, [block])
state_changes2 = wal.storage.get_statechanges_by_range(RANGE_ALL_STATE_CHANGES)
count2 = len(state_changes2)
assert count1 + 1 == count2
dispatch(wal, [contract_receive_unlock])
state_changes3 = wal.storage.get_statechanges_by_range(RANGE_ALL_STATE_CHANGES)
count3 = len(state_changes3)
assert count2 + 1 == count3
result1, result2 = state_changes3[-2:]
assert isinstance(result1, Block)
assert result1.block_number == block_number
assert isinstance(result2, ContractReceiveChannelBatchUnlock)
assert result2.receiver == participant
assert result2.sender == partner
assert result2.locksroot == locksroot
assert result2.unlocked_amount == unlocked_amount
assert result2.returned_tokens == returned_amount
# Make sure state snapshot can only go for corresponding state change ids
with pytest.raises(sqlite3.IntegrityError):
wal.storage.write_state_snapshot(State(), StateChangeID(ulid.new()), 1)
def test_timestamped_event():
event = EventPaymentSentFailed(
make_token_network_registry_address(), make_address(), 1, make_address(), "whatever"
)
log_time = datetime.fromisoformat("2018-09-07T20:02:35.000")
timestamped = TimestampedEvent(event, log_time)
assert timestamped.log_time == log_time
assert isinstance(timestamped.event, EventPaymentSentFailed)
assert timestamped.reason == timestamped.event.reason == "whatever"
assert timestamped.identifier == timestamped.event.identifier == 1
def test_write_read_events():
wal = new_wal(state_transition_noop)
event = EventPaymentSentFailed(
make_token_network_registry_address(), make_address(), 1, make_address(), "whatever"
)
with pytest.raises(sqlite3.IntegrityError):
unexisting_state_change_id = random.getrandbits(16 * 8).to_bytes(16, "big")
wal.storage.write_events([(unexisting_state_change_id, event)])
previous_events = wal.storage.get_events_with_timestamps()
state_change_ids = wal.storage.write_state_changes([StateChange()])
wal.storage.write_events([(state_change_ids[0], event)])
new_events = wal.storage.get_events_with_timestamps()
assert len(previous_events) + 1 == len(new_events)
latest_event = new_events[-1]
assert isinstance(latest_event, TimestampedEvent)
assert isinstance(latest_event.event, EventPaymentSentFailed)
def test_restore_without_snapshot():
wal = new_wal(state_transition_noop, AccState())
block1 = Block(block_number=5, gas_limit=1, block_hash=make_transaction_hash())
dispatch(wal, [block1])
block2 = Block(block_number=7, gas_limit=1, block_hash=make_transaction_hash())
dispatch(wal, [block2])
block3 = Block(block_number=8, gas_limit=1, block_hash=make_transaction_hash())
dispatch(wal, [block3])
aggregate = restore_state(
transition_function=state_transtion_acc,
storage=wal.storage,
state_change_identifier=HIGH_STATECHANGE_ULID,
node_address=make_address(),
)
assert aggregate.state_changes == [block1, block2, block3]
def test_restore_without_snapshot_in_batches():
wal = new_wal(state_transition_noop, AccState())
block1 = Block(block_number=5, gas_limit=1, block_hash=make_transaction_hash())
block2 = Block(block_number=7, gas_limit=1, block_hash=make_transaction_hash())
block3 = Block(block_number=8, gas_limit=1, block_hash=make_transaction_hash())
dispatch(wal, [block1, block2, block3])
aggregate = restore_state(
transition_function=state_transtion_acc,
storage=wal.storage,
state_change_identifier=HIGH_STATECHANGE_ULID,
node_address=make_address(),
)
assert aggregate.state_changes == [block1, block2, block3]
def test_get_snapshot_before_state_change() -> None:
wal = new_wal(state_transtion_acc, AccState())
block1 = Block(
block_number=BlockNumber(5), gas_limit=BlockGasLimit(1), block_hash=make_block_hash()
)
dispatch(wal, [block1])
wal.snapshot(1)
block2 = Block(
block_number=BlockNumber(7), gas_limit=BlockGasLimit(1), block_hash=make_block_hash()
)
dispatch(wal, [block2])
wal.snapshot(2)
block3 = Block(
block_number=BlockNumber(8), gas_limit=BlockGasLimit(1), block_hash=make_block_hash()
)
dispatch(wal, [block3])
wal.snapshot(3)
snapshot = wal.storage.get_snapshot_before_state_change(HIGH_STATECHANGE_ULID)
assert snapshot and snapshot.data == AccState([block1, block2, block3])
| 33.375
| 95
| 0.750585
|
02fa3b7b19b31e82979d0b053a8f7788849dcee0
| 2,522
|
py
|
Python
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/web_infrastructure/ansible_tower/tower_job_cancel.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
ansible/ansible/modules/web_infrastructure/ansible_tower/tower_job_cancel.py
|
SergeyCherepanov/ansible
|
875711cd2fd6b783c812241c2ed7a954bf6f670f
|
[
"MIT"
] | 9
|
2017-06-25T03:31:52.000Z
|
2021-05-17T23:43:12.000Z
|
ansible/ansible/modules/web_infrastructure/ansible_tower/tower_job_cancel.py
|
SergeyCherepanov/ansible
|
875711cd2fd6b783c812241c2ed7a954bf6f670f
|
[
"MIT"
] | 3
|
2018-05-26T21:31:22.000Z
|
2019-09-28T17:00:45.000Z
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_cancel
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: Cancel an Ansible Tower Job.
description:
- Cancel Ansible Tower jobs. See
U(https://www.ansible.com/tower) for an overview.
options:
job_id:
description:
- ID of the job to cancel
required: True
fail_if_not_running:
description:
- Fail loudly if the I(job_id) does not reference a running job.
default: False
type: bool
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Cancel job
tower_job_cancel:
job_id: job.id
'''
RETURN = '''
id:
description: job id requesting to cancel
returned: success
type: int
sample: 94
status:
description: status of the cancel request
returned: success
type: str
sample: canceled
'''
from ansible.module_utils.ansible_tower import TowerModule, tower_auth_config, tower_check_mode
try:
import tower_cli
import tower_cli.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def main():
argument_spec = dict(
job_id=dict(type='int', required=True),
fail_if_not_running=dict(type='bool', default=False),
)
module = TowerModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
job_id = module.params.get('job_id')
json_output = {}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
job = tower_cli.get_resource('job')
params = module.params.copy()
try:
result = job.cancel(job_id, **params)
json_output['id'] = job_id
except (exc.ConnectionError, exc.BadRequest, exc.TowerCLIError, exc.AuthError) as excinfo:
module.fail_json(msg='Unable to cancel job_id/{0}: {1}'.format(job_id, excinfo), changed=False)
json_output['changed'] = result['changed']
json_output['status'] = result['status']
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| 24.72549
| 107
| 0.664552
|
9775abbd4cb9b4b4efa9e0053771c6c695702301
| 1,023
|
py
|
Python
|
neurst_pt/layers/__init__.py
|
ishine/neurst
|
2ba322393fcfed4261b33f4a657e12bbe321baaa
|
[
"Apache-2.0"
] | 208
|
2020-11-12T03:56:41.000Z
|
2022-03-27T07:01:27.000Z
|
neurst_pt/layers/__init__.py
|
ishine/neurst
|
2ba322393fcfed4261b33f4a657e12bbe321baaa
|
[
"Apache-2.0"
] | 16
|
2021-02-20T07:57:03.000Z
|
2022-01-27T07:36:31.000Z
|
neurst_pt/layers/__init__.py
|
ishine/neurst
|
2ba322393fcfed4261b33f4a657e12bbe321baaa
|
[
"Apache-2.0"
] | 33
|
2020-11-12T04:44:50.000Z
|
2022-03-23T09:22:29.000Z
|
import torch.nn as nn
from neurst.utils.registry import setup_registry
from neurst_pt.layers.attentions.multi_head_attention import MultiHeadAttention, MultiHeadSelfAttention
from neurst_pt.layers.common_layers import PrePostProcessingWrapper, TransformerFFN
build_base_layer, register_base_layer = setup_registry("base_layer", base_class=nn.Module,
verbose_creation=False, backend="pt")
register_base_layer(MultiHeadSelfAttention)
register_base_layer(MultiHeadAttention)
register_base_layer(TransformerFFN)
def build_transformer_component(layer_args,
norm_shape,
dropout_rate,
pre_norm=True,
epsilon=1e-6):
base_layer = build_base_layer(layer_args)
return PrePostProcessingWrapper(
layer=base_layer,
norm_shape=norm_shape,
dropout_rate=dropout_rate,
epsilon=epsilon,
pre_norm=pre_norm)
| 37.888889
| 103
| 0.672532
|
be607bfadcd233e30f6625146a16312c522c6f77
| 13,105
|
py
|
Python
|
maya/plug-ins/caramel.py
|
cedricB/circeCharacterWorksTools
|
cf7d793239c291a8a8aec5c60ede1250415581d9
|
[
"MIT"
] | 34
|
2015-03-13T08:40:02.000Z
|
2022-03-31T12:30:48.000Z
|
maya/plug-ins/caramel.py
|
cedricB/circeCharacterWorksTools
|
cf7d793239c291a8a8aec5c60ede1250415581d9
|
[
"MIT"
] | null | null | null |
maya/plug-ins/caramel.py
|
cedricB/circeCharacterWorksTools
|
cf7d793239c291a8a8aec5c60ede1250415581d9
|
[
"MIT"
] | 9
|
2015-03-13T08:40:04.000Z
|
2020-11-06T09:15:45.000Z
|
'''
########################################################################
# #
# caramel.py #
# #
# Email: cedricbazillou@gmail.com #
# blog: http://circecharacterworks.wordpress.com/ #
########################################################################
L I C E N S E:
Copyright (c) 2014 Cedric BAZILLOU All rights reserved.
Permission is hereby granted
-to modify the file
-distribute
-share
-do derivative work
The above copyright notice and this permission notice shall be included in all copies of the Software
and is subject to the following conditions:
- Te user uses the same type of license
- credit the original author
- does not claim patent nor copyright from the original work
P U R P O S E:
Trigger an array of pose space helper/blendshape
I N S T A L L A T I O N:
Copy the "caramel.py" to your Maya plugins directory
Windows: Program Files\Autodesk\MayaXXXX\bin\plug-ins\
or better in your maya user directory:
%MAYA_APP_DIR%\%mayaNumber%\scripts\plug-ins\( create one if it does not exists )
'''
import math, sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
kPluginNodeName = "caramel"
kPluginNodeId = OpenMaya.MTypeId(0xF1C473)
kPluginVersion = "1.286"
kPluginAuthor = "Bazillou2012"
outFn = OpenMaya.MFnNurbsSurface()
surfDataFn = OpenMaya.MFnNurbsSurfaceData()
class caramel(OpenMayaMPx.MPxNode):
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
def check_curve_surface_plugs(self,argList):
actionData = argList[0]
inputNurbsConnected = False
if actionData.isNull() == False :
inputNurbsConnected = True
return inputNurbsConnected
def compute_knotData(self,input_Hdle,size_Value ):
knotMatrixList = []
knotSmoothList = OpenMaya.MDoubleArray()
hdleSizeList = []
aimMatB = 0
knotCount = input_Hdle.elementCount()
for k in range(knotCount):
knotMatrix_value = input_Hdle.inputValue().asMatrix()
knotMatrixList.append(knotMatrix_value )
if k != knotCount-1:
input_Hdle.next()
return knotMatrixList
def computKnotList (self,degreeN ,vertLen):
#degree N with M span
#The number of knots required for a curve is M + 2N - 1
path_knots = OpenMaya.MDoubleArray()
spansM = float(vertLen-degreeN)
ispans = vertLen-degreeN
for k in range(degreeN-1):
path_knots.append(0.0)
for k in range(ispans +1) :
path_knots.append(k)
for k in range(degreeN-1):
path_knots.append(spansM)
return path_knots
def compute(self,Plug,Data):
if Plug == self.output or Plug == self.outputCurve or Plug == self.profil:
#Layout necessary output / input handle to gather data
input_Hdle = Data.inputArrayValue(self.input)
knotCount = input_Hdle.elementCount()
if knotCount > 0:
output_handle = Data.outputValue(self.output)
size_Value = Data.inputValue(self.size).asDouble()
width_Value = Data.inputValue(self.width).asDouble()
outputCurveDegree_Value = Data.inputValue(self.curveDegree).asInt()
orientHandle_Value = Data.inputValue(self.orientHandle).asBool()
knotMatrixList = self.compute_knotData(input_Hdle,size_Value )
neutralPoint = OpenMaya.MPoint(0,0,0)
PointA = OpenMaya.MPoint(0,0,width_Value)
PointB = OpenMaya.MPoint(0,0,-width_Value)
pointListA = OpenMaya.MPointArray()
pointListB = OpenMaya.MPointArray()
if len(knotMatrixList) > 1:
pointListA.append(PointA*knotMatrixList[0])
pointListB.append(PointB*knotMatrixList[0])
if len(knotMatrixList) > 2:
for k in range(1,len(knotMatrixList)):
pointListA.append(PointA*knotMatrixList[k])
pointListB.append(PointB*knotMatrixList[k])
knotList = self.computKnotList(outputCurveDegree_Value,pointListA.length())
newOutputObj = surfDataFn.create()
uKnotSequences = OpenMaya.MDoubleArray()
vKnotSequences = OpenMaya.MDoubleArray()
uKnotSequences.append(0.0)
uKnotSequences.append(1.0)
controlVertices = OpenMaya.MPointArray()
for k in range(pointListB.length()):
controlVertices.append(pointListB[k])
for k in range(pointListA.length()):
controlVertices.append(pointListA[k])
if Plug == self.output :
for k in range(knotList.length()):
vKnotSequences.append(knotList[k])
outFn.create ( controlVertices, uKnotSequences,vKnotSequences, 1, outputCurveDegree_Value,
OpenMaya.MFnNurbsSurface.kOpen , OpenMaya.MFnNurbsSurface.kOpen ,True, newOutputObj)
output_handle.setMObject(newOutputObj)
output_handle.setClean()
if Plug == self.outputCurve:
output_Handle = Data.outputValue(self.outputCurve)
outputCurveFn = OpenMaya.MFnNurbsCurve()
crvDatStock = OpenMaya.MFnNurbsCurveData()
crbOBJ = crvDatStock.create()
outputCurveFn = OpenMaya.MFnNurbsCurve()
cv_pointList = OpenMaya.MPointArray(pointListA.length())
for k in range(pointListA.length()):
cv_pointList.set(pointListA[k] + (pointListB[k] - pointListA[k])*0.5, k)
outputCurveFn.create( cv_pointList , knotList, outputCurveDegree_Value,OpenMaya.MFnNurbsCurve.kOpen, False, False, crbOBJ )
output_Handle.setMObject(crbOBJ)
output_Handle.setClean()
#------------------------------------------------------------------------------------------------------
if Plug == self.profil:
aimMat = OpenMaya.MMatrix()
if orientHandle_Value == True:
neutralPnt = OpenMaya.MPoint()
pointA = neutralPnt*knotMatrixList[0]
pointB = neutralPnt*knotMatrixList[1]
offsetVecB = pointB*knotMatrixList[0].inverse() - pointA*knotMatrixList[0].inverse()
theTargetVector = offsetVecB.normal()
referenceVector = OpenMaya.MVector(1,0,0)
aimQuaternion = referenceVector.rotateTo(theTargetVector)
neutralQuat = OpenMaya.MQuaternion()
aimMat = aimQuaternion.asMatrix() * knotMatrixList[0]
output_Handle = Data.outputValue(self.profil)
outputCurveFn = OpenMaya.MFnNurbsCurve()
crvDatStock = OpenMaya.MFnNurbsCurveData()
crbOBJ = crvDatStock.create()
outputCurveFn = OpenMaya.MFnNurbsCurve()
cv_pointList = OpenMaya.MPointArray()
cv_pointList.append(OpenMaya.MPoint(0,0,width_Value*-0.5)*aimMat )
cv_pointList.append(OpenMaya.MPoint(0,0,width_Value*0.5)*aimMat )
outputCurveFn.createWithEditPoints(cv_pointList , 1,OpenMaya.MFnNurbsCurve.kOpen, False, False, True, crbOBJ )
output_Handle.setMObject(crbOBJ)
output_Handle.setClean()
else:
return
def nodeCreator():
return OpenMayaMPx.asMPxPtr(caramel())
def nodeInitializer():
typed_Attr = OpenMaya.MFnTypedAttribute()
nAttr = OpenMaya.MFnNumericAttribute()
cAttr = OpenMaya.MFnCompoundAttribute()
matAttr = OpenMaya.MFnMatrixAttribute()
#---------------------------------------------------------------------------- Input Attributes
caramel.orientHandle = nAttr.create( "orientHandle", "hDle", OpenMaya.MFnNumericData.kBoolean,0)
nAttr.setWritable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setKeyable(1)
nAttr.setHidden(0)
caramel.addAttribute(caramel.orientHandle)
caramel.input = matAttr.create("input", "in",OpenMaya.MFnMatrixAttribute.kDouble)
matAttr.setArray(1)
matAttr.setStorable(0)
matAttr.setKeyable(0)
matAttr.setHidden(1)
caramel.addAttribute(caramel.input)
caramel.width = nAttr.create("width","wdt", OpenMaya.MFnNumericData.kDouble,0.2)
nAttr.setWritable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setKeyable(1)
nAttr.setHidden(0)
nAttr.setMin(0.001)
nAttr.setSoftMax(20.0)
caramel.addAttribute(caramel.width)
caramel.curveDegree = nAttr.create("curveDegree","cDg", OpenMaya.MFnNumericData.kInt,2)
nAttr.setWritable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setKeyable(1)
nAttr.setHidden(0)
nAttr.setMin(1)
nAttr.setMax(3)
caramel.addAttribute(caramel.curveDegree)
caramel.size = nAttr.create("size","sz", OpenMaya.MFnNumericData.kDouble,1.0)
nAttr.setWritable(1)
nAttr.setStorable(1)
nAttr.setReadable(1)
nAttr.setKeyable(1)
nAttr.setHidden(0)
nAttr.setMin(0.001)
nAttr.setSoftMax(2.0)
caramel.addAttribute(caramel.size)
#---------------------------------------------------------------------------- Output Attributes
caramel.output = typed_Attr.create( "output", "out", OpenMaya.MFnData.kNurbsSurface)
typed_Attr.setStorable(0)
typed_Attr.setKeyable(0)
typed_Attr.setHidden(True)
caramel.addAttribute(caramel.output)
caramel.attributeAffects( caramel.input , caramel.output )
caramel.attributeAffects( caramel.width , caramel.output )
caramel.attributeAffects( caramel.size , caramel.output )
caramel.attributeAffects( caramel.curveDegree , caramel.output )
caramel.attributeAffects( caramel.orientHandle , caramel.output )
#---------------------------------------------------------------------------- Output Attributes
caramel.outputCurve = typed_Attr.create( "outputCurve", "outCrv", OpenMaya.MFnData.kNurbsCurve)
typed_Attr.setStorable(0)
typed_Attr.setKeyable(0)
typed_Attr.setHidden(True)
caramel.addAttribute(caramel.outputCurve)
caramel.attributeAffects( caramel.input , caramel.outputCurve )
caramel.attributeAffects( caramel.width , caramel.outputCurve )
caramel.attributeAffects( caramel.size , caramel.outputCurve )
caramel.attributeAffects( caramel.curveDegree , caramel.outputCurve )
caramel.attributeAffects( caramel.orientHandle , caramel.outputCurve )
caramel.profil = typed_Attr.create( "profil", "prf", OpenMaya.MFnNurbsCurveData.kNurbsCurve )
typed_Attr.setStorable(0)
typed_Attr.setKeyable(0)
typed_Attr.setHidden(True)
caramel.addAttribute( caramel.profil )
caramel.attributeAffects( caramel.input , caramel.profil )
caramel.attributeAffects( caramel.width , caramel.profil )
caramel.attributeAffects( caramel.size , caramel.profil )
caramel.attributeAffects( caramel.curveDegree , caramel.profil )
caramel.attributeAffects( caramel.orientHandle , caramel.profil )
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject, kPluginAuthor , kPluginVersion , "Any")
try:
mplugin.registerNode( kPluginNodeName, kPluginNodeId, nodeCreator, nodeInitializer, OpenMayaMPx.MPxNode.kDependNode)
except:
sys.stderr.write( "Failed to register node: %s" % kPluginNodeName ); raise
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode( kPluginNodeId )
except:
sys.stderr.write( "Failed to deregister node: %s" % kPluginNodeName ); raise
| 43.538206
| 144
| 0.570011
|
c8a54a18accd2969ff404dcaadad6fc485edb7ce
| 12,962
|
py
|
Python
|
run_projector.py
|
johndpope/GenEdi
|
9ff8e2f1984aa04c7f634be388db8bb85ee57b35
|
[
"MIT"
] | 4
|
2020-12-26T20:03:47.000Z
|
2022-02-11T12:04:50.000Z
|
run_projector.py
|
GreenLimeSia/Generating-and-Editing
|
45fbe1d72bf11b7df9ec22489b6b6b1c503a87a2
|
[
"MIT"
] | 3
|
2020-12-26T21:02:00.000Z
|
2021-05-27T12:47:08.000Z
|
run_projector.py
|
GreenLimeSia/Generating-and-Editing
|
45fbe1d72bf11b7df9ec22489b6b6b1c503a87a2
|
[
"MIT"
] | 2
|
2020-12-26T20:03:52.000Z
|
2021-08-24T02:17:53.000Z
|
import os
import argparse
import numpy as np
import torch
import warnings
import stylegan2
from stylegan2 import utils
# ----------------------------------------------------------------------------
_description = """StyleGAN2 projector.
Run 'python %(prog)s <subcommand> --help' for subcommand help."""
# ----------------------------------------------------------------------------
_examples = """examples:
# Train a network or convert a pretrained one.
# Example of converting pretrained ffhq model:
python run_convert_from_tf --download ffhq-config-f --output G.pth D.pth Gs.pth
# Project generated images
python %(prog)s project_generated_images --network=Gs.pth --seeds=0,1,5
# Project real images
python %(prog)s project_real_images --network=Gs.pth --data-dir=path/to/image_folder
"""
# ----------------------------------------------------------------------------
def _add_shared_arguments(parser):
parser.add_argument(
'--network',
help='Network file path',
required=True,
metavar='FILE'
)
parser.add_argument(
'--num_steps',
type=int,
help='Number of steps to use for projection. ' + \
'Default: %(default)s',
default=1000,
metavar='VALUE'
)
parser.add_argument(
'--batch_size',
help='Batch size. Default: %(default)s',
type=int,
default=1,
metavar='VALUE'
)
parser.add_argument(
'--label',
help='Label to use for dlatent statistics gathering ' + \
'(should be integer index of class). Default: no label.',
type=int,
default=None,
metavar='CLASS_INDEX'
)
parser.add_argument(
'--initial_learning_rate',
help='Initial learning rate of projection. Default: %(default)s',
default=0.1,
type=float,
metavar='VALUE'
)
parser.add_argument(
'--initial_noise_factor',
help='Initial noise factor of projection. Default: %(default)s',
default=0.05,
type=float,
metavar='VALUE'
)
parser.add_argument(
'--lr_rampdown_length',
help='Learning rate rampdown length for projection. ' + \
'Should be in range [0, 1]. Default: %(default)s',
default=0.25,
type=float,
metavar='VALUE'
)
parser.add_argument(
'--lr_rampup_length',
help='Learning rate rampup length for projection. ' + \
'Should be in range [0, 1]. Default: %(default)s',
default=0.05,
type=float,
metavar='VALUE'
)
parser.add_argument(
'--noise_ramp_length',
help='Learning rate rampdown length for projection. ' + \
'Should be in range [0, 1]. Default: %(default)s',
default=0.75,
type=float,
metavar='VALUE'
)
parser.add_argument(
'--regularize_noise_weight',
help='The weight for noise regularization. Default: %(default)s',
default=1e5,
type=float,
metavar='VALUE'
)
parser.add_argument(
'--output',
help='Root directory for run results. Default: %(default)s',
type=str,
default='./results',
metavar='DIR'
)
parser.add_argument(
'--num_snapshots',
help='Number of snapshots. Default: %(default)s',
type=int,
default=5,
metavar='VALUE'
)
parser.add_argument(
'--pixel_min',
help='Minumum of the value range of pixels in generated images. ' + \
'Default: %(default)s',
default=-1,
type=float,
metavar='VALUE'
)
parser.add_argument(
'--pixel_max',
help='Maximum of the value range of pixels in generated images. ' + \
'Default: %(default)s',
default=1,
type=float,
metavar='VALUE'
)
parser.add_argument(
'--gpu',
help='CUDA device indices (given as separate ' + \
'values if multiple, i.e. "--gpu 0 1"). Default: Use CPU',
type=int,
default=[],
nargs='*',
metavar='INDEX'
)
# ----------------------------------------------------------------------------
def get_arg_parser():
parser = argparse.ArgumentParser(
description=_description,
epilog=_examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
range_desc = 'NOTE: This is a single argument, where list ' + \
'elements are separated by "," and ranges are defined as "a-b". ' + \
'Only integers are allowed.'
subparsers = parser.add_subparsers(help='Sub-commands', dest='command')
project_generated_images_parser = subparsers.add_parser(
'project_generated_images', help='Project generated images')
project_generated_images_parser.add_argument(
'--seeds',
help='List of random seeds for generating images. ' + \
'Default: 66,230,389,1518. ' + range_desc,
type=utils.range_type,
default=[66, 230, 389, 1518],
metavar='RANGE'
)
project_generated_images_parser.add_argument(
'--truncation_psi',
help='Truncation psi. Default: %(default)s',
type=float,
default=1.0,
metavar='VALUE'
)
_add_shared_arguments(project_generated_images_parser)
project_real_images_parser = subparsers.add_parser(
'project_real_images', help='Project real images')
project_real_images_parser.add_argument(
'--data_dir',
help='Dataset root directory',
type=str,
required=True,
metavar='DIR'
)
project_real_images_parser.add_argument(
'--seed',
help='When there are more images available than ' + \
'the number that is going to be projected this ' + \
'seed is used for picking samples. Default: %(default)s',
type=int,
default=1234,
metavar='VALUE'
)
project_real_images_parser.add_argument(
'--num_images',
type=int,
help='Number of images to project. Default: %(default)s',
default=3,
metavar='VALUE'
)
_add_shared_arguments(project_real_images_parser)
return parser
# ----------------------------------------------------------------------------
def project_images(G, images, name_prefix, args):
device = torch.device(args.gpu[0] if args.gpu else 'cpu')
if device.index is not None:
torch.cuda.set_device(device.index)
if len(args.gpu) > 1:
warnings.warn(
'Multi GPU is not available for projection. ' + \
'Using device {}'.format(device)
)
G = utils.unwrap_module(G).to(device)
lpips_model = stylegan2.external_models.lpips.LPIPS_VGG16(
pixel_min=args.pixel_min, pixel_max=args.pixel_max)
proj = stylegan2.project.Projector(
G=G,
dlatent_avg_samples=10000,
dlatent_avg_label=args.label,
dlatent_device=device,
dlatent_batch_size=1024,
lpips_model=lpips_model,
lpips_size=256
)
for i in range(0, len(images), args.batch_size):
target = images[i: i + args.batch_size]
proj.start(
target=target,
num_steps=args.num_steps,
initial_learning_rate=args.initial_learning_rate,
initial_noise_factor=args.initial_noise_factor,
lr_rampdown_length=args.lr_rampdown_length,
lr_rampup_length=args.lr_rampup_length,
noise_ramp_length=args.noise_ramp_length,
regularize_noise_weight=args.regularize_noise_weight,
verbose=True,
verbose_prefix='Projecting image(s) {}/{}'.format(
i * args.batch_size + len(target), len(images))
)
snapshot_steps = set(
args.num_steps - np.linspace(
0, args.num_steps, args.num_snapshots, endpoint=False, dtype=int))
for k, image in enumerate(
utils.tensor_to_PIL(target, pixel_min=args.pixel_min, pixel_max=args.pixel_max)):
image.save(os.path.join(args.output, name_prefix[i + k] + 'target.png'))
for j in range(args.num_steps):
proj.step()
if j in snapshot_steps:
generated = utils.tensor_to_PIL(
proj.generate(), pixel_min=args.pixel_min, pixel_max=args.pixel_max)
for k, image in enumerate(generated):
image.save(os.path.join(
args.output, name_prefix[i + k] + 'step%04d.png' % (j + 1)))
# ----------------------------------------------------------------------------
def project_generated_images(G, args):
latent_size, label_size = G.latent_size, G.label_size
device = torch.device(args.gpu[0] if args.gpu else 'cpu')
if device.index is not None:
torch.cuda.set_device(device.index)
G.to(device)
if len(args.gpu) > 1:
warnings.warn(
'Noise can not be randomized based on the seed ' + \
'when using more than 1 GPU device. Noise will ' + \
'now be randomized from default random state.'
)
G.random_noise()
G = torch.nn.DataParallel(G, device_ids=args.gpu)
else:
noise_reference = G.static_noise()
def get_batch(seeds):
latents = []
labels = []
if len(args.gpu) <= 1:
noise_tensors = [[] for _ in noise_reference]
for seed in seeds:
rnd = np.random.RandomState(seed)
latents.append(torch.from_numpy(rnd.randn(latent_size)))
if len(args.gpu) <= 1:
for i, ref in enumerate(noise_reference):
noise_tensors[i].append(
torch.from_numpy(rnd.randn(*ref.size()[1:])))
if label_size:
labels.append(torch.tensor([rnd.randint(0, label_size)]))
latents = torch.stack(latents, dim=0).to(device=device, dtype=torch.float32)
if labels:
labels = torch.cat(labels, dim=0).to(device=device, dtype=torch.int64)
else:
labels = None
if len(args.gpu) <= 1:
noise_tensors = [
torch.stack(noise, dim=0).to(device=device, dtype=torch.float32)
for noise in noise_tensors
]
else:
noise_tensors = None
return latents, labels, noise_tensors
images = []
progress = utils.ProgressWriter(len(args.seeds))
progress.write('Generating images...', step=False)
for i in range(0, len(args.seeds), args.batch_size):
latents, labels, noise_tensors = get_batch(args.seeds[i: i + args.batch_size])
if noise_tensors is not None:
G.static_noise(noise_tensors=noise_tensors)
with torch.no_grad():
images.append(G(latents, labels=labels))
progress.step()
images = torch.cat(images, dim=0)
progress.write('Done!', step=False)
progress.close()
name_prefix = ['seed%04d-' % seed for seed in args.seeds]
project_images(G, images, name_prefix, args)
# ----------------------------------------------------------------------------
def project_real_images(G, args):
device = torch.device(args.gpu[0] if args.gpu else 'cpu')
print('Loading images from "%s"...' % args.data_dir)
dataset = utils.ImageFolder(
args.data_dir, pixel_min=args.pixel_min, pixel_max=args.pixel_max)
rnd = np.random.RandomState(args.seed)
indices = rnd.choice(
len(dataset), size=min(args.num_images, len(dataset)), replace=False)
images = []
for i in indices:
data = dataset[i]
if isinstance(data, (tuple, list)):
data = data[0]
images.append(data)
images = torch.stack(images).to(device)
name_prefix = ['image%04d-' % i for i in indices]
print('Done!')
project_images(G, images, name_prefix, args)
# ----------------------------------------------------------------------------
def main():
args = get_arg_parser().parse_args()
assert args.command, 'Missing subcommand.'
assert os.path.isdir(args.output) or not os.path.splitext(args.output)[-1], \
'--output argument should specify a directory, not a file.'
if not os.path.exists(args.output):
os.makedirs(args.output)
G = stylegan2.models.load(args.network)
assert isinstance(G, stylegan2.models.Generator), 'Model type has to be ' + \
'stylegan2.models.Generator. Found {}.'.format(type(G))
if args.command == 'project_generated_images':
project_generated_images(G, args)
elif args.command == 'project_real_images':
project_real_images(G, args)
else:
raise TypeError('Unkown command {}'.format(args.command))
if __name__ == '__main__':
main()
| 31.926108
| 109
| 0.570051
|
49716194cf75b8f81619576046120682fdff9f49
| 1,907
|
py
|
Python
|
scripts/transfer_datasets.py
|
thsshz/keras-retinanet
|
55d353c5abac95214e38e921ebe72d77c2c21a19
|
[
"Apache-2.0"
] | null | null | null |
scripts/transfer_datasets.py
|
thsshz/keras-retinanet
|
55d353c5abac95214e38e921ebe72d77c2c21a19
|
[
"Apache-2.0"
] | null | null | null |
scripts/transfer_datasets.py
|
thsshz/keras-retinanet
|
55d353c5abac95214e38e921ebe72d77c2c21a19
|
[
"Apache-2.0"
] | null | null | null |
import csv
def transfer_annotations(input_name, output_name, pre_dir):
input_file = open(input_name, 'r')
input_lines = input_file.readlines()
output_lines = []
for input_line in input_lines:
input_line = input_line.rstrip()
annotations = input_line.split(' ')
image_name = pre_dir + annotations[0]
for k in range(len(annotations)):
if k == 0:
continue
t = (k - 1) % 5
if t == 0:
label = annotations[k]
elif t == 1:
x1 = annotations[k]
elif t == 2:
y1 = annotations[k]
elif t == 3:
x2 = str(int(x1) + int(annotations[k]))
elif t == 4:
y2 = str(int(y1) + int(annotations[k]))
output_lines.append((image_name, x1, y1, x2, y2, label))
if k == 0:
output_lines.append((image_name, '', '', '', '', ''))
input_file.close()
with open(output_name, 'w', newline='') as csv_file:
csv_writer = csv.writer(csv_file)
for output_line in output_lines:
csv_writer.writerow(output_line)
def transfer_classes(output_name):
output_lines = [('1', 0), ('2', 1)]
with open(output_name, 'w', newline='') as csv_file:
csv_writer = csv.writer(csv_file)
for output_line in output_lines:
csv_writer.writerow(output_line)
def main():
transfer_annotations("../pedestrian_detection_trainval/train_annotations.txt", "../pedestrian_detection_trainval/train_annotations.csv", "train/")
transfer_classes("../pedestrian_detection_trainval/train_classes.csv")
transfer_annotations("../pedestrian_detection_trainval/val.txt", "../pedestrian_detection_trainval/val.csv", "val/")
transfer_classes("../pedestrian_detection_trainval/val_classes.csv")
if __name__ == '__main__':
main()
| 35.981132
| 150
| 0.597273
|
8a3226bbcac8409c631f3c2c320d4b3898af8a92
| 3,342
|
py
|
Python
|
iriusrisk-python-client-lib/iriusrisk_python_client_lib/models/unassing_users_group_request_body.py
|
iriusrisk/iriusrisk-python-client-lib
|
4912706cd1e5c0bc555dbc7da02fb64cbeab3b18
|
[
"Apache-2.0"
] | null | null | null |
iriusrisk-python-client-lib/iriusrisk_python_client_lib/models/unassing_users_group_request_body.py
|
iriusrisk/iriusrisk-python-client-lib
|
4912706cd1e5c0bc555dbc7da02fb64cbeab3b18
|
[
"Apache-2.0"
] | null | null | null |
iriusrisk-python-client-lib/iriusrisk_python_client_lib/models/unassing_users_group_request_body.py
|
iriusrisk/iriusrisk-python-client-lib
|
4912706cd1e5c0bc555dbc7da02fb64cbeab3b18
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
IriusRisk API
Products API # noqa: E501
OpenAPI spec version: 1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class UnassingUsersGroupRequestBody(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'usernames': 'list[str]'
}
attribute_map = {
'usernames': 'usernames'
}
def __init__(self, usernames=None): # noqa: E501
"""UnassingUsersGroupRequestBody - a model defined in Swagger""" # noqa: E501
self._usernames = None
self.discriminator = None
if usernames is not None:
self.usernames = usernames
@property
def usernames(self):
"""Gets the usernames of this UnassingUsersGroupRequestBody. # noqa: E501
List of usernames of users to be unassigned from a group # noqa: E501
:return: The usernames of this UnassingUsersGroupRequestBody. # noqa: E501
:rtype: list[str]
"""
return self._usernames
@usernames.setter
def usernames(self, usernames):
"""Sets the usernames of this UnassingUsersGroupRequestBody.
List of usernames of users to be unassigned from a group # noqa: E501
:param usernames: The usernames of this UnassingUsersGroupRequestBody. # noqa: E501
:type: list[str]
"""
self._usernames = usernames
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UnassingUsersGroupRequestBody, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UnassingUsersGroupRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.322034
| 92
| 0.577199
|
fbb0c5d6125f2ede5f1e035be8a59d6f8cb42b81
| 2,334
|
py
|
Python
|
src/dependencys/comtypes/comtypes/test/test_outparam.py
|
Advik-B/Virus
|
51bc71b9e0ce4953053d43fc88477ab29957b2c3
|
[
"MIT"
] | 1
|
2021-11-24T07:32:20.000Z
|
2021-11-24T07:32:20.000Z
|
src/dependencys/comtypes/comtypes/test/test_outparam.py
|
Advik-B/Virus
|
51bc71b9e0ce4953053d43fc88477ab29957b2c3
|
[
"MIT"
] | null | null | null |
src/dependencys/comtypes/comtypes/test/test_outparam.py
|
Advik-B/Virus
|
51bc71b9e0ce4953053d43fc88477ab29957b2c3
|
[
"MIT"
] | null | null | null |
import sys
from ctypes import *
import unittest
import comtypes.test
comtypes.test.requires("devel")
from comtypes import BSTR, IUnknown, GUID, COMMETHOD, HRESULT
if sys.version_info >= (3, 0):
text_type = str
else:
text_type = unicode
class IMalloc(IUnknown):
_iid_ = GUID("{00000002-0000-0000-C000-000000000046}")
_methods_ = [
COMMETHOD([], c_void_p, "Alloc", ([], c_ulong, "cb")),
COMMETHOD([], c_void_p, "Realloc", ([], c_void_p, "pv"), ([], c_ulong, "cb")),
COMMETHOD([], None, "Free", ([], c_void_p, "py")),
COMMETHOD([], c_ulong, "GetSize", ([], c_void_p, "pv")),
COMMETHOD([], c_int, "DidAlloc", ([], c_void_p, "pv")),
COMMETHOD([], None, "HeapMinimize"), # 25
]
malloc = POINTER(IMalloc)()
oledll.ole32.CoGetMalloc(1, byref(malloc))
assert bool(malloc)
def from_outparm(self):
if not self:
return None
result = wstring_at(self)
if not malloc.DidAlloc(self):
raise ValueError("memory was NOT allocated by CoTaskMemAlloc")
windll.ole32.CoTaskMemFree(self)
return result
c_wchar_p.__ctypes_from_outparam__ = from_outparm
def comstring(text, typ=c_wchar_p):
text = text_type(text)
size = (len(text) + 1) * sizeof(c_wchar)
mem = windll.ole32.CoTaskMemAlloc(size)
print("malloc'd 0x%x, %d bytes" % (mem, size))
ptr = cast(mem, typ)
memmove(mem, text, size)
return ptr
class Test(unittest.TestCase):
def test_c_char(self):
## ptr = c_wchar_p("abc")
## self.failUnlessEqual(ptr.__ctypes_from_outparam__(),
## "abc")
## p = BSTR("foo bar spam")
x = comstring("Hello, World")
y = comstring("foo bar")
z = comstring("spam, spam, and spam")
## (x.__ctypes_from_outparam__(), x.__ctypes_from_outparam__())
print((x.__ctypes_from_outparam__(), None)) # x.__ctypes_from_outparam__())
## print comstring("Hello, World", c_wchar_p).__ctypes_from_outparam__()
## print comstring("Hello, World", c_wchar_p).__ctypes_from_outparam__()
## print comstring("Hello, World", c_wchar_p).__ctypes_from_outparam__()
## print comstring("Hello, World", c_wchar_p).__ctypes_from_outparam__()
if __name__ == "__main__":
unittest.main()
| 29.175
| 86
| 0.622965
|
2aad92209b7dce39de8e5e85dd3ac99c2132eb4d
| 3,379
|
py
|
Python
|
SoftLayer/CLI/virt/create_options.py
|
SLsthompson/softlayer-python
|
6dd6159f3e335e8d0e50dd3f2c8fa8cd62211a06
|
[
"MIT"
] | null | null | null |
SoftLayer/CLI/virt/create_options.py
|
SLsthompson/softlayer-python
|
6dd6159f3e335e8d0e50dd3f2c8fa8cd62211a06
|
[
"MIT"
] | null | null | null |
SoftLayer/CLI/virt/create_options.py
|
SLsthompson/softlayer-python
|
6dd6159f3e335e8d0e50dd3f2c8fa8cd62211a06
|
[
"MIT"
] | null | null | null |
"""Virtual server order options."""
# :license: MIT, see LICENSE for more details.
import os
import os.path
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
@click.command()
@environment.pass_env
def cli(env):
"""Virtual server order options."""
vsi = SoftLayer.VSManager(env.client)
result = vsi.get_create_options()
table = formatting.KeyValueTable(['name', 'value'])
table.align['name'] = 'r'
table.align['value'] = 'l'
# Datacenters
datacenters = [dc['template']['datacenter']['name']
for dc in result['datacenters']]
table.add_row(['datacenter',
formatting.listing(datacenters, separator='\n')])
# CPUs
standard_cpu = [x for x in result['processors']
if not x['template'].get(
'dedicatedAccountHostOnlyFlag', False)]
ded_cpu = [x for x in result['processors']
if x['template'].get('dedicatedAccountHostOnlyFlag',
False)]
def add_cpus_row(cpu_options, name):
"""Add CPU rows to the table."""
cpus = []
for cpu_option in cpu_options:
cpus.append(str(cpu_option['template']['startCpus']))
table.add_row(['cpus (%s)' % name,
formatting.listing(cpus, separator=',')])
add_cpus_row(ded_cpu, 'private')
add_cpus_row(standard_cpu, 'standard')
# Memory
memory = [str(m['template']['maxMemory']) for m in result['memory']]
table.add_row(['memory',
formatting.listing(memory, separator=',')])
# Operating Systems
op_sys = [o['template']['operatingSystemReferenceCode'] for o in
result['operatingSystems']]
op_sys = sorted(op_sys)
os_summary = set()
for operating_system in op_sys:
os_summary.add(operating_system[0:operating_system.find('_')])
for summary in sorted(os_summary):
table.add_row([
'os (%s)' % summary,
os.linesep.join(sorted([x for x in op_sys
if x[0:len(summary)] == summary]))
])
# Disk
local_disks = [x for x in result['blockDevices']
if x['template'].get('localDiskFlag', False)]
san_disks = [x for x in result['blockDevices']
if not x['template'].get('localDiskFlag', False)]
def add_block_rows(disks, name):
"""Add block rows to the table."""
simple = {}
for disk in disks:
block = disk['template']['blockDevices'][0]
bid = block['device']
if bid not in simple:
simple[bid] = []
simple[bid].append(str(block['diskImage']['capacity']))
for label in sorted(simple):
table.add_row(['%s disk(%s)' % (name, label),
formatting.listing(simple[label],
separator=',')])
add_block_rows(local_disks, 'local')
add_block_rows(san_disks, 'san')
# Network
speeds = []
for comp in result['networkComponents']:
speed = comp['template']['networkComponents'][0]['maxSpeed']
speeds.append(str(speed))
speeds = sorted(speeds)
table.add_row(['nic', formatting.listing(speeds, separator=',')])
env.fout(table)
| 30.169643
| 72
| 0.571175
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.