hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b70ca4490939d9e8825c9cbd533fc6d9aa4185a5
| 1,847
|
py
|
Python
|
app/models.py
|
michael-huber2772/portfolio-dashboard
|
cec332529c977cd0b12fdeaaf6d13f2642837dbc
|
[
"MIT"
] | null | null | null |
app/models.py
|
michael-huber2772/portfolio-dashboard
|
cec332529c977cd0b12fdeaaf6d13f2642837dbc
|
[
"MIT"
] | null | null | null |
app/models.py
|
michael-huber2772/portfolio-dashboard
|
cec332529c977cd0b12fdeaaf6d13f2642837dbc
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2019 - present AppSeed.us
"""
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from datetime import datetime
# Create your models here.
class Customer(models.Model):
customer_name = models.CharField(max_length=250)
def __str__(self):
return self.customer_name
class Machine(models.Model):
weight = models.IntegerField()
min_profit_per_hour = models.DecimalField(max_digits=10, decimal_places=5)
class MTag(models.Model):
name = models.CharField(max_length=250, null=False)
def __str__(self):
return self.name
class RawMaterial(models.Model):
name = models.CharField(max_length=250, null=False)
tag = models.ManyToManyField(MTag)
def __str__(self):
return self.name
class Product(models.Model):
product_code = models.CharField(max_length=250)
product_description = models.TextField()
customer = models.ManyToManyField(Customer)
machine = models.ManyToManyField(Machine)
raw_material = models.ManyToManyField(RawMaterial)
def __str__(self):
return self.product_code
class ProductPrice(models.Model):
FLAG_CHOICES = (
('Y', 'Y'),
('N', 'N')
)
product = models.ForeignKey(Product, null=True, on_delete=models.SET_NULL)
price = models.DecimalField(max_digits=10, decimal_places=2)
min_order_quantity = models.IntegerField(null=False)
start_date = models.DateTimeField(default=datetime.now(), null=True)
end_date = models.DateTimeField(default=datetime(2999, 12, 31, 18, 00), null=True)
current_flag = models.CharField(max_length=1, choices=FLAG_CHOICES)
version = models.IntegerField()
def __str__(self):
return f"{self.product} ({self.min_order_quantity}) {self.current_flag}"
| 29.31746
| 86
| 0.715755
|
b9fd6383feb785cc430a07f6bc1e0a9b74bd0b62
| 3,770
|
py
|
Python
|
tests/test_customisation.py
|
Pijuli/django-jazzmin
|
e3f9d45183d58f78bf4c6793969490631a84681d
|
[
"MIT"
] | 2
|
2022-03-13T09:07:11.000Z
|
2022-03-17T11:50:05.000Z
|
tests/test_customisation.py
|
Pijuli/django-jazzmin
|
e3f9d45183d58f78bf4c6793969490631a84681d
|
[
"MIT"
] | null | null | null |
tests/test_customisation.py
|
Pijuli/django-jazzmin
|
e3f9d45183d58f78bf4c6793969490631a84681d
|
[
"MIT"
] | null | null | null |
import pytest
from bs4 import BeautifulSoup
from django.urls import reverse
from jazzmin.settings import CHANGEFORM_TEMPLATES
from jazzmin.templatetags.jazzmin import get_sections
from .test_app.library.books.admin import BookAdmin
from .test_app.library.factories import BookFactory, UserFactory
@pytest.mark.django_db
def test_update_site_logo(admin_client, custom_jazzmin_settings):
"""
We can add a site logo, and it renders out
"""
url = reverse("admin:index")
custom_jazzmin_settings["site_logo"] = "books/img/logo.png"
response = admin_client.get(url)
soup = BeautifulSoup(response.content, "html.parser")
assert soup.find("a", class_="brand-link").find("img")["src"] == "/static/books/img/logo.png"
@pytest.mark.django_db
@pytest.mark.parametrize("config_value,template", [(k, v) for k, v in CHANGEFORM_TEMPLATES.items()])
def test_changeform_templates(config_value, template, admin_client, custom_jazzmin_settings):
"""
All changeform config values use the correct templates
"""
custom_jazzmin_settings["changeform_format"] = config_value
book = BookFactory()
url = reverse("admin:books_book_change", args=(book.pk,))
response = admin_client.get(url)
templates_used = [t.name for t in response.templates]
assert template in templates_used
@pytest.mark.django_db
def test_changeform_template_override(admin_client, custom_jazzmin_settings):
"""
We can set a global template, and override it per model
"""
custom_jazzmin_settings.update(
{"changeform_format": "vertical_tabs", "changeform_format_overrides": {"books.book": "carousel"}}
)
user = UserFactory()
book = BookFactory()
books_url = reverse("admin:books_book_change", args=(book.pk,))
users_url = reverse("admin:auth_user_change", args=(user.pk,))
response = admin_client.get(books_url)
templates_used = [t.name for t in response.templates]
assert CHANGEFORM_TEMPLATES["carousel"] in templates_used
response = admin_client.get(users_url)
templates_used = [t.name for t in response.templates]
assert CHANGEFORM_TEMPLATES["vertical_tabs"] in templates_used
@pytest.mark.django_db
def test_changeform_template_default(admin_client, custom_jazzmin_settings):
"""
The horizontal_tabs template is used by default
"""
assert custom_jazzmin_settings["changeform_format"] == "horizontal_tabs"
book = BookFactory()
books_url = reverse("admin:books_book_change", args=(book.pk,))
response = admin_client.get(books_url)
templates_used = [t.name for t in response.templates]
assert CHANGEFORM_TEMPLATES["horizontal_tabs"] in templates_used
@pytest.mark.django_db
def test_changeform_single(admin_client, monkeypatch):
"""
The single template is used when the modeladmin has no fieldsets, or inlines
"""
book = BookFactory()
books_url = reverse("admin:books_book_change", args=(book.pk,))
monkeypatch.setattr(BookAdmin, "fieldsets", None)
monkeypatch.setattr(BookAdmin, "inlines", [])
response = admin_client.get(books_url)
templates_used = [t.name for t in response.templates]
assert CHANGEFORM_TEMPLATES["single"] in templates_used
@pytest.mark.django_db
@pytest.mark.parametrize("order", [("Book loans", "general", "other"), ("other", "Book loans", "general")])
def test_changeform_section_ordering(change_form_context, order):
"""
We respect the given order for sections
"""
admin_form = change_form_context["adminform"]
inline_formsets = change_form_context["inline_admin_formsets"]
admin_form.model_admin.jazzmin_section_order = order
fieldsets = get_sections(admin_form, inline_formsets)
assert tuple(x.name for x in fieldsets) == order
| 33.660714
| 107
| 0.738727
|
66cc0c6831fa2ceb97c505ddb25d6fb1c1186d73
| 2,411
|
py
|
Python
|
misc/logistic_loss.py
|
lnsndn/neural-word-search
|
cae82593a546590ff272b6062004d66dd2497337
|
[
"MIT"
] | 5
|
2018-12-15T00:02:05.000Z
|
2021-07-06T14:40:32.000Z
|
misc/logistic_loss.py
|
lnsndn/neural-word-search
|
cae82593a546590ff272b6062004d66dd2497337
|
[
"MIT"
] | 4
|
2019-07-31T15:43:41.000Z
|
2022-03-09T09:57:48.000Z
|
misc/logistic_loss.py
|
lnsndn/neural-word-search
|
cae82593a546590ff272b6062004d66dd2497337
|
[
"MIT"
] | 7
|
2019-01-04T10:50:20.000Z
|
2022-01-18T18:03:01.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 19 22:59:42 2017
@author: tomas
"""
import torch
from torch.autograd import Variable
"""
One-vs-all logistic loss; each example has a single positive class.
On the forward pass we take:
- input: Tensor of shape (N, C) giving scores for C classes for each
of N examples.
- target: LongTensor of shape (N) giving labels for each of the N
examples; each element is an integer in the range [0, C] with the
interpretation that target[i] = 0 means that input[i] is a negative
example for all classes; if target[i] = c > 0 then input[i] is a positive
example for class c and a negative example for all other classes.
The amounts to evaluating the binary logistic loss for each element of the
(N, C) array of scores. For an element x = scores[{i, j}], its binary label
is y = 1 if target[i] = j and y = 0 otherwise. The binary logistic loss is
given by:
loss(x, y) = log(1 + exp(-x)) if y == 1
log(1 + exp(-x)) + x if y == 0
You can derive this as KL(target, predicted) where target and predicted are
distributions over two classes (positive and negative), the target
distribution is
P(pos) = y
P(neg) = 1 - y
and the predicted distribution is
P(pos) = 1 / (1 + exp(-x))
P(neg) = exp(-x) / (1 + exp(-x)))
To improve numeric stability, we make use of the fact that for all a,
log(1 + exp(-x)) = log(exp(a) + exp(a - x)) - a
In practice we choose a = min(0, x) to make sure that all exponents
are negative; this way we won't have overflow resulting in inf, but
we may have underflow resulting in 0 which is preferable.
"""
class LogisticLoss(torch.nn.Module):
def forward(self, input, target):
"""
Inputs:
- input: N tensor of class scores
- target: N LongTensor giving ground-truth for elements of inputs;
each element should be an integer in the range [0, C];
if target[i] == 0 then input[i] should be negative for all classes.
"""
gpu = input.is_cuda
ones = Variable(torch.zeros(input.size()))
if gpu:
ones = ones.cuda()
a = torch.min(input, ones)
log_den = torch.log(torch.exp(a) + torch.exp(a - input)) - a
mask = torch.eq(target, 0)
losses = log_den + input * mask.float()
output = torch.mean(losses)
return output
| 32.581081
| 87
| 0.639983
|
f841f717f85dd20c47ca64385945d00be8e5a9bb
| 797
|
py
|
Python
|
python/sync.py
|
SubProto/raybot
|
a955fc497010c37d2c0f7d1c4975ae1cf8125cee
|
[
"MIT"
] | null | null | null |
python/sync.py
|
SubProto/raybot
|
a955fc497010c37d2c0f7d1c4975ae1cf8125cee
|
[
"MIT"
] | 2
|
2016-09-02T04:08:16.000Z
|
2016-09-02T04:08:53.000Z
|
python/sync.py
|
SubProto/raybot
|
a955fc497010c37d2c0f7d1c4975ae1cf8125cee
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import serial
import time
import sys
if len(sys.argv) != 4:
print "Usage: %s <motor code> <run time> <serial port>" % sys.argv[0]
sys.exit()
def getResponse():
s = ser.readline()
print "RECV: "
print s
if "NMI:" in s:
print "NMI signal received"
sys.exit()
if "IRQ:" in s:
print "IRQ signal received"
s = ser.readline()
print "RECV: "
print s
ser = serial.Serial(sys.argv[3], 115200, timeout=5)
getResponse()
ser.write(b"WD02200\n")
getResponse()
ser.write(b"WD02000\n")
getResponse()
ser.write(b"WD022%s\n" % sys.argv[1])
getResponse()
for x in range(int(sys.argv[2]) * 2):
ser.write(b"WD020%s\n" % sys.argv[1])
getResponse()
time.sleep(0.5)
ser.write(b"WD02200\n")
getResponse()
ser.close()
| 18.113636
| 70
| 0.611041
|
450d3658eeca6cd0d9a86505b66932f4c4cf5622
| 618
|
py
|
Python
|
tests/project/gaming/migrations/0003_optional_profile_player.py
|
DoctorJohn/django-admin-anchors
|
5e2d317e42e46b6a6556c1dc2f9984787ab9b945
|
[
"MIT"
] | 4
|
2021-04-02T02:51:37.000Z
|
2022-02-01T19:06:20.000Z
|
tests/project/gaming/migrations/0003_optional_profile_player.py
|
DoctorJohn/django-admin-anchors
|
5e2d317e42e46b6a6556c1dc2f9984787ab9b945
|
[
"MIT"
] | null | null | null |
tests/project/gaming/migrations/0003_optional_profile_player.py
|
DoctorJohn/django-admin-anchors
|
5e2d317e42e46b6a6556c1dc2f9984787ab9b945
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-04-08 12:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("gaming", "0002_optional_team_captain"),
]
operations = [
migrations.AlterField(
model_name="profile",
name="player",
field=models.OneToOneField(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="profile",
to="gaming.player",
),
),
]
| 23.769231
| 60
| 0.553398
|
6248489509fe46349a20beaece8f367bc284c443
| 71,861
|
py
|
Python
|
tensorflow_federated/python/core/impl/transformations.py
|
jpgard/federated-1
|
1d619208e3bf3b702beb97e64bb955de24320c55
|
[
"Apache-2.0"
] | 1
|
2019-10-10T06:19:38.000Z
|
2019-10-10T06:19:38.000Z
|
tensorflow_federated/python/core/impl/transformations.py
|
jpgard/federated-1
|
1d619208e3bf3b702beb97e64bb955de24320c55
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_federated/python/core/impl/transformations.py
|
jpgard/federated-1
|
1d619208e3bf3b702beb97e64bb955de24320c55
|
[
"Apache-2.0"
] | 2
|
2019-10-10T06:19:41.000Z
|
2021-01-28T03:06:55.000Z
|
# Lint as: python3
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of transformations that can be applied to a computation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import six
from six.moves import range
from six.moves import zip
from tensorflow_federated.python.common_libs import anonymous_tuple
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.core.api import computation_types
from tensorflow_federated.python.core.impl import compiled_computation_transforms
from tensorflow_federated.python.core.impl import type_utils
from tensorflow_federated.python.core.impl.compiler import building_block_analysis
from tensorflow_federated.python.core.impl.compiler import building_block_factory
from tensorflow_federated.python.core.impl.compiler import building_blocks
from tensorflow_federated.python.core.impl.compiler import intrinsic_defs
from tensorflow_federated.python.core.impl.compiler import transformation_utils
from tensorflow_federated.python.core.impl.compiler import tree_analysis
def _apply_transforms(comp, transforms):
"""Applies all `transforms` in a single walk of `comp`.
This function is private for a reason; TFF does not intend to expose the
capability to chain arbitrary transformations in this way, since the
application of one transformation may cause the resulting AST to violate the
assumptions of another. This function should be used quite selectively and
considered extensively in order to avoid such subtle issues.
Args:
comp: An instance of `building_blocks.ComputationBuildingBlock` to transform
with all elements of `transforms`.
transforms: An instance of `transformation_utils.TransformSpec` or iterable
thereof, the transformations to apply to `comp`.
Returns:
A transformed version of `comp`, with all transformations in `transforms`
applied.
Raises:
TypeError: If the types don't match.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
if isinstance(transforms, transformation_utils.TransformSpec):
transforms = [transforms]
else:
for transform in transforms:
py_typecheck.check_type(transform, transformation_utils.TransformSpec)
def _transform(comp):
modified = False
for transform in transforms:
comp, transform_modified = transform.transform(comp)
modified = modified or transform_modified
return comp, modified
return transformation_utils.transform_postorder(comp, _transform)
def remove_lambdas_and_blocks(comp):
"""Removes any called lambdas and blocks from `comp`.
This function will rename all the variables in `comp` in a single walk of the
AST, then replace called lambdas with blocks in another walk, since this
transformation interacts with scope in delicate ways. It will chain inlining
the blocks and collapsing the selection-from-tuple pattern together into a
final pass.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` from which we
want to remove called lambdas and blocks.
Returns:
A transformed version of `comp` which has no called lambdas or blocks, and
no extraneous selections from tuples.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
comp, _ = uniquify_reference_names(comp)
comp, _ = replace_called_lambda_with_block(comp)
block_inliner = InlineBlock(comp)
selection_replacer = ReplaceSelectionFromTuple()
transforms = [block_inliner, selection_replacer]
symbol_tree = transformation_utils.SymbolTree(
transformation_utils.ReferenceCounter)
def _transform_fn(comp, symbol_tree):
"""Transform function chaining inlining and collapsing selections.
This function is inlined here as opposed to factored out and parameterized
by the transforms to apply, due to the delicacy of chaining transformations
which rely on state. These transformations should be safe if they appear
first in the list of transforms, but due to the difficulty of reasoning
about the invariants the transforms can rely on in this setting, there is
no function exposed which hoists out the internal logic.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` we wish to
check for inlining and collapsing of selections.
symbol_tree: Instance of `building_blocks.SymbolTree` defining the
bindings available to `comp`.
Returns:
A transformed version of `comp`.
"""
modified = False
for transform in transforms:
if transform.global_transform:
comp, transform_modified = transform.transform(comp, symbol_tree)
else:
comp, transform_modified = transform.transform(comp)
modified = modified or transform_modified
return comp, modified
return transformation_utils.transform_postorder_with_symbol_bindings(
comp, _transform_fn, symbol_tree)
class ExtractComputation(transformation_utils.TransformSpec):
"""Extracts a computation if a variable it depends on is not bound.
This transforms a computation which matches the `predicate` or is a Block, and
replaces the computations with a LET
construct if a variable it depends on is not bound by the current scope. Both
the `parameter_name` of a `building_blocks.Lambda` and the name of
any variable defined by a `building_blocks.Block` can affect the
scope in which a reference in computation is bound.
NOTE: This function extracts `computation_building_block.Block` because block
variables can restrict the scope in which computations are bound.
"""
def __init__(self, comp, predicate):
"""Constructs a new instance.
Args:
comp: The computation building block in which to perform the extractions.
The names of lambda parameters and block variables in `comp` must be
unique.
predicate: A function that takes a single computation building block as a
argument and returns `True` if the computation should be extracted and
`False` otherwise.
Raises:
TypeError: If types do not match.
ValueError: If `comp` contains variables with non-unique names.
"""
super(ExtractComputation, self).__init__()
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
tree_analysis.check_has_unique_names(comp)
self._name_generator = building_block_factory.unique_name_generator(comp)
self._predicate = predicate
self._unbound_references = get_map_of_unbound_references(comp)
def _contains_unbound_reference(self, comp, names):
"""Returns `True` if `comp` contains unbound references to `names`.
This function will update the non-local `_unbound_references` captured from
the parent context if `comp` is not contained in that collection. This can
happen when new computations are created and added to the AST.
Args:
comp: The computation building block to test.
names: A Python string or a list, tuple, or set of Python strings.
"""
if isinstance(names, six.string_types):
names = (names,)
if comp not in self._unbound_references:
references = get_map_of_unbound_references(comp)
self._unbound_references.update(references)
return any(n in self._unbound_references[comp] for n in names)
def _passes_test_or_block(self, comp):
"""Returns `True` if `comp` matches the `predicate` or is a block."""
return self._predicate(comp) or isinstance(comp, building_blocks.Block)
def should_transform(self, comp):
"""Returns `True` if `comp` should be transformed.
The following `_extract_intrinsic_*` methods all depend on being invoked
after `should_transform` evaluates to `True` for a given `comp`. Because of
this certain assumptions are made:
* transformation functions will transform a given `comp`
* block variables are guaranteed to not be empty
Args:
comp: The computation building block in which to test.
"""
if isinstance(comp, building_blocks.Block):
return (self._passes_test_or_block(comp.result) or
any(isinstance(e, building_blocks.Block) for _, e in comp.locals))
elif isinstance(comp, building_blocks.Call):
return (self._passes_test_or_block(comp.function) or
self._passes_test_or_block(comp.argument))
elif isinstance(comp, building_blocks.Lambda):
if self._predicate(comp.result):
return True
if isinstance(comp.result, building_blocks.Block):
for index, (_, variable) in enumerate(comp.result.locals):
names = [n for n, _ in comp.result.locals[:index]]
if (not self._contains_unbound_reference(variable,
comp.parameter_name) and
not self._contains_unbound_reference(variable, names)):
return True
elif isinstance(comp, building_blocks.Selection):
return self._passes_test_or_block(comp.source)
elif isinstance(comp, building_blocks.Tuple):
return any(self._passes_test_or_block(e) for e in comp)
return False
def _extract_from_block(self, comp):
"""Returns a new computation with all intrinsics extracted."""
if self._predicate(comp.result):
name = six.next(self._name_generator)
variables = comp.locals
variables.append((name, comp.result))
result = building_blocks.Reference(name, comp.result.type_signature)
elif isinstance(comp.result, building_blocks.Block):
variables = comp.locals + comp.result.locals
result = comp.result.result
else:
variables = comp.locals
result = comp.result
def _remove_blocks_from_variables(variables):
new_variables = []
for name, variable in variables:
if isinstance(variable, building_blocks.Block):
new_variables.extend(variable.locals)
new_variables.append((name, variable.result))
else:
new_variables.append((name, variable))
return new_variables
variables = _remove_blocks_from_variables(variables)
return building_blocks.Block(variables, result)
def _extract_from_call(self, comp):
"""Returns a new computation with all intrinsics extracted."""
variables = []
if self._predicate(comp.function):
name = six.next(self._name_generator)
variables.append((name, comp.function))
function = building_blocks.Reference(name, comp.function.type_signature)
elif isinstance(comp.function, building_blocks.Block):
block = comp.function
variables.extend(block.locals)
function = block.result
else:
function = comp.function
if comp.argument is not None:
if self._predicate(comp.argument):
name = six.next(self._name_generator)
variables.append((name, comp.argument))
argument = building_blocks.Reference(name, comp.argument.type_signature)
elif isinstance(comp.argument, building_blocks.Block):
block = comp.argument
variables.extend(block.locals)
argument = block.result
else:
argument = comp.argument
else:
argument = None
call = building_blocks.Call(function, argument)
block = building_blocks.Block(variables, call)
return self._extract_from_block(block)
def _extract_from_lambda(self, comp):
"""Returns a new computation with all intrinsics extracted."""
if self._predicate(comp.result):
name = six.next(self._name_generator)
variables = [(name, comp.result)]
result = building_blocks.Reference(name, comp.result.type_signature)
if not self._contains_unbound_reference(comp.result, comp.parameter_name):
fn = building_blocks.Lambda(comp.parameter_name, comp.parameter_type,
result)
block = building_blocks.Block(variables, fn)
return self._extract_from_block(block)
else:
block = building_blocks.Block(variables, result)
block = self._extract_from_block(block)
return building_blocks.Lambda(comp.parameter_name, comp.parameter_type,
block)
else:
block = comp.result
extracted_variables = []
retained_variables = []
for name, variable in block.locals:
names = [n for n, _ in retained_variables]
if (not self._contains_unbound_reference(variable, comp.parameter_name)
and not self._contains_unbound_reference(variable, names)):
extracted_variables.append((name, variable))
else:
retained_variables.append((name, variable))
if retained_variables:
result = building_blocks.Block(retained_variables, block.result)
else:
result = block.result
fn = building_blocks.Lambda(comp.parameter_name, comp.parameter_type,
result)
block = building_blocks.Block(extracted_variables, fn)
return self._extract_from_block(block)
def _extract_from_selection(self, comp):
"""Returns a new computation with all intrinsics extracted."""
if self._predicate(comp.source):
name = six.next(self._name_generator)
variables = [(name, comp.source)]
source = building_blocks.Reference(name, comp.source.type_signature)
else:
block = comp.source
variables = block.locals
source = block.result
selection = building_blocks.Selection(
source, name=comp.name, index=comp.index)
block = building_blocks.Block(variables, selection)
return self._extract_from_block(block)
def _extract_from_tuple(self, comp):
"""Returns a new computation with all intrinsics extracted."""
variables = []
elements = []
for name, element in anonymous_tuple.iter_elements(comp):
if self._passes_test_or_block(element):
variable_name = six.next(self._name_generator)
variables.append((variable_name, element))
ref = building_blocks.Reference(variable_name, element.type_signature)
elements.append((name, ref))
else:
elements.append((name, element))
tup = building_blocks.Tuple(elements)
block = building_blocks.Block(variables, tup)
return self._extract_from_block(block)
def transform(self, comp):
"""Returns a new transformed computation or `comp`."""
if not self.should_transform(comp):
return comp, False
if isinstance(comp, building_blocks.Block):
comp = self._extract_from_block(comp)
elif isinstance(comp, building_blocks.Call):
comp = self._extract_from_call(comp)
elif isinstance(comp, building_blocks.Lambda):
comp = self._extract_from_lambda(comp)
elif isinstance(comp, building_blocks.Selection):
comp = self._extract_from_selection(comp)
elif isinstance(comp, building_blocks.Tuple):
comp = self._extract_from_tuple(comp)
return comp, True
def extract_computations(comp):
"""Extracts computations to the scope which binds a variable it depends on.
NOTE: If a computation does not contain a variable that is bound by a
computation in `comp` it will be extracted to the root.
Args:
comp: The computation building block in which to perform the transformation.
Returns:
A new computation with the transformation applied or the original `comp`.
"""
def _predicate(comp):
return not isinstance(comp, building_blocks.Reference)
return _apply_transforms(comp, ExtractComputation(comp, _predicate))
def extract_intrinsics(comp):
"""Extracts intrinsics to the scope which binds a variable it depends on.
NOTE: If an intrinsic does not contain a variable that is bound by a
computation in `comp` it will be extracted to the root.
Args:
comp: The computation building block in which to perform the transformation.
Returns:
A new computation with the transformation applied or the original `comp`.
"""
def _predicate(comp):
return building_block_analysis.is_called_intrinsic(comp)
return _apply_transforms(comp, ExtractComputation(comp, _predicate))
class InlineBlock(transformation_utils.TransformSpec):
"""Inlines the block variables in `comp` whitelisted by `variable_names`.
Each invocation of the `transform` method checks for presence of a
block-bound `building_blocks.Reference`, and inlines this
reference with its appropriate value.
"""
def __init__(self, comp, variable_names=None):
"""Initializes the block inliner.
Checks that `comp` has unique names, and that `variable_names` is an
iterable of string types.
Args:
comp: The top-level computation to inline.
variable_names: The variable names to inline. If `None`, inlines all
variables.
Raises:
ValueError: If `comp` contains variables with non-unique names.
TypeError: If `variable_names` is a non-`list`, `set` or `tuple`, or
contains anything other than strings.
"""
super(InlineBlock, self).__init__(global_transform=True)
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
tree_analysis.check_has_unique_names(comp)
if variable_names is not None:
py_typecheck.check_type(variable_names, (list, tuple, set))
for name in variable_names:
py_typecheck.check_type(name, six.string_types)
self._variable_names = variable_names
def _should_inline_variable(self, name):
return self._variable_names is None or name in self._variable_names
def should_transform(self, comp):
return ((isinstance(comp, building_blocks.Reference) and
self._should_inline_variable(comp.name)) or
(isinstance(comp, building_blocks.Block) and any(
self._should_inline_variable(name) for name, _ in comp.locals)))
def transform(self, comp, symbol_tree):
if not self.should_transform(comp):
return comp, False
if isinstance(comp, building_blocks.Reference):
try:
value = symbol_tree.get_payload_with_name(comp.name).value
except NameError:
# This reference is unbound
value = None
# This identifies a variable bound by a Block as opposed to a Lambda.
if value is not None:
return value, True
return comp, False
elif isinstance(comp, building_blocks.Block):
variables = [(name, value)
for name, value in comp.locals
if not self._should_inline_variable(name)]
if not variables:
comp = comp.result
else:
comp = building_blocks.Block(variables, comp.result)
return comp, True
return comp, False
def inline_block_locals(comp, variable_names=None):
"""Inlines the block variables in `comp` whitelisted by `variable_names`."""
symbol_tree = transformation_utils.SymbolTree(
transformation_utils.ReferenceCounter)
transform_spec = InlineBlock(comp, variable_names)
return transformation_utils.transform_postorder_with_symbol_bindings(
comp, transform_spec.transform, symbol_tree)
class MergeChainedBlocks(transformation_utils.TransformSpec):
r"""Merges chained blocks into one block.
Looks for occurrences of the following pattern:
Block
/ \
[...] Block
/ \
[...] Comp(x)
And merges them to
Block
/ \
[...] Comp(x)
Preserving the relative ordering of any locals declarations, which preserves
scoping rules.
Notice that because TFF Block constructs bind their variables in sequence, it
is completely safe to add the locals lists together in this implementation,
"""
def should_transform(self, comp):
"""Returns `True` if `comp` is a block and its result is a block."""
return (isinstance(comp, building_blocks.Block) and
isinstance(comp.result, building_blocks.Block))
def transform(self, comp):
"""Returns a new transformed computation or `comp`."""
if not self.should_transform(comp):
return comp, False
comp = building_blocks.Block(comp.locals + comp.result.locals,
comp.result.result)
return comp, True
def merge_chained_blocks(comp):
"""Merges chained blocks into one block."""
return _apply_transforms(comp, MergeChainedBlocks())
class MergeChainedFederatedMapsOrApplys(transformation_utils.TransformSpec):
r"""Merges chained federated maps or federated apply into one structure.
This transform matches the following pattern, and replaces the following
computation containing two federated map intrinsics:
Call
/ \
Intrinsic Tuple
|
[Comp(x), Call]
/ \
Intrinsic Tuple
|
[Comp(y), Comp(z)]
intrinsic(<x, intrinsic(<y, z>)>)
with the following computation containing one federated map or apply
intrinsic:
Call
/ \
Intrinsic Tuple
|
[Block, Comp(z)]
/ \
[fn=Tuple] Lambda(arg)
| \
[Comp(y), Comp(x)] Call
/ \
Sel(1) Call
/ / \
Ref(fn) Sel(0) Ref(arg)
/
Ref(fn)
intrinsic(<(let fn=<y, x> in (arg -> fn[1](fn[0](arg)))), z>)
The functional computations `x` and `y`, and the argument `z` are retained;
the other computations are replaced.
"""
def __init__(self, comp):
"""Constructs a new instance.
Args:
comp: The computation building block in which to perform the merges.
Raises:
TypeError: If types do not match.
"""
super(MergeChainedFederatedMapsOrApplys, self).__init__()
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
self._name_generator = building_block_factory.unique_name_generator(comp)
def should_transform(self, comp):
"""Returns `True` if `comp` is a chained federated map."""
if building_block_analysis.is_called_intrinsic(comp, (
intrinsic_defs.FEDERATED_APPLY.uri,
intrinsic_defs.FEDERATED_MAP.uri,
)):
outer_arg = comp.argument[1]
if building_block_analysis.is_called_intrinsic(outer_arg,
comp.function.uri):
return True
return False
def _create_block_to_chained_calls(self, comps):
r"""Constructs a transformed block computation from `comps`.
Block
/ \
[fn=Tuple] Lambda(arg)
| \
[Comp(y), Comp(x)] Call
/ \
Sel(1) Call
/ / \
Ref(fn) Sel(0) Ref(arg)
/
Ref(fn)
(let fn=<y, x> in (arg -> fn[1](fn[0](arg)))
Args:
comps: A Python list of computations.
Returns:
A `building_blocks.Block`.
"""
functions = building_blocks.Tuple(comps)
functions_name = six.next(self._name_generator)
functions_ref = building_blocks.Reference(functions_name,
functions.type_signature)
arg_name = six.next(self._name_generator)
arg_type = comps[0].type_signature.parameter
arg_ref = building_blocks.Reference(arg_name, arg_type)
arg = arg_ref
for index, _ in enumerate(comps):
fn_sel = building_blocks.Selection(functions_ref, index=index)
call = building_blocks.Call(fn_sel, arg)
arg = call
fn = building_blocks.Lambda(arg_ref.name, arg_ref.type_signature, call)
return building_blocks.Block(((functions_ref.name, functions),), fn)
def transform(self, comp):
"""Returns a new transformed computation or `comp`."""
if not self.should_transform(comp):
return comp, False
block = self._create_block_to_chained_calls((
comp.argument[1].argument[0],
comp.argument[0],
))
arg = building_blocks.Tuple([
block,
comp.argument[1].argument[1],
])
intrinsic_type = computation_types.FunctionType(
arg.type_signature, comp.function.type_signature.result)
intrinsic = building_blocks.Intrinsic(comp.function.uri, intrinsic_type)
comp = building_blocks.Call(intrinsic, arg)
return comp, True
def merge_chained_federated_maps_or_applys(comp):
"""Merges chained federated maps or federated apply into one structure."""
return _apply_transforms(comp, MergeChainedFederatedMapsOrApplys(comp))
class MergeTupleIntrinsics(transformation_utils.TransformSpec):
r"""Merges a tuple of called intrinsics into one called intrinsic.
This transform matches the following pattern, and replaces the following
computation containing a tuple of called intrinsics all represeting the same
operation:
Tuple
|
[Call, Call, ...]
/ \ / \
Intrinsic Tuple Intrinsic Tuple
| |
[Comp(f1), Comp(v1), ...] [Comp(f2), Comp(v2), ...]
<Intrinsic(<f1, v1>), Intrinsic(<f2, v2>)>
with the following computation containing one called intrinsic:
federated_unzip(Call)
/ \
Intrinsic Tuple
|
[Block, federated_zip(Tuple), ...]
/ \ |
[fn=Tuple] Lambda(arg) [Comp(v1), Comp(v2), ...]
| \
[Comp(f1), Comp(f2), ...] Tuple
|
[Call, Call, ...]
/ \ / \
Sel(0) Sel(0) Sel(1) Sel(1)
/ / / /
Ref(fn) Ref(arg) Ref(fn) Ref(arg)
Intrinsic(<
(let fn=<f1, f2> in (arg -> <fn[0](arg[0]), fn[1](arg[1])>)),
<v1, v2>,
>)
The functional computations `f1`, `f2`, etc..., and the computations `v1`,
`v2`, etc... are retained; the other computations are replaced.
NOTE: This is just an example of what this transformation would look like when
applied to a tuple of federated maps. The components `f1`, `f2`, `v1`, and
`v2` and the number of those components are not important.
This transformation is implemented to match the following intrinsics:
* intrinsic_defs.FEDERATED_AGGREGATE.uri
* intrinsic_defs.FEDERATED_APPLY.uri
* intrinsic_defs.FEDERATED_BROADCAST.uri
* intrinsic_defs.FEDERATED_MAP.uri
"""
def __init__(self, comp, uri):
"""Constructs a new instance.
Args:
comp: The computation building block in which to perform the merges.
uri: The URI of the intrinsic to merge.
Raises:
TypeError: If types do not match.
ValueError: If the `uri` has an unexpected value.
"""
super(MergeTupleIntrinsics, self).__init__()
py_typecheck.check_type(uri, six.string_types)
self._name_generator = building_block_factory.unique_name_generator(comp)
expected_uri = (
intrinsic_defs.FEDERATED_AGGREGATE.uri,
intrinsic_defs.FEDERATED_APPLY.uri,
intrinsic_defs.FEDERATED_BROADCAST.uri,
intrinsic_defs.FEDERATED_MAP.uri,
)
if uri not in expected_uri:
raise ValueError(
'The value of `uri` is expected to be on of {}, found {}'.format(
expected_uri, uri))
self._uri = uri
def should_transform(self, comp):
return (isinstance(comp, building_blocks.Tuple) and comp and
building_block_analysis.is_called_intrinsic(comp[0], self._uri) and
all(
building_block_analysis.is_called_intrinsic(
element, comp[0].function.uri) for element in comp))
def _transform_args_with_type(self, comps, type_signature):
"""Transforms a Python `list` of computations.
Given a computation containing `n` called intrinsics with `m` arguments,
this function takes a Python `list` of computations `comps` containing the
`m`-th argument from each computation `n` and creates a new computation
representing th `m`-th arguments that should be passed to the called
intrinsic of the transformed computation.
Args:
comps: A Python list of computations.
type_signature: The type to use when determining how to transform the
computations.
Returns:
A `building_blocks.Block`.
"""
if isinstance(type_signature, computation_types.FederatedType):
return self._transform_args_with_federated_types(comps, type_signature)
elif isinstance(type_signature, computation_types.FunctionType):
return self._transform_args_with_functional_types(comps, type_signature)
elif isinstance(type_signature, computation_types.AbstractType):
return self._transform_args_with_abstract_types(comps, type_signature)
else:
raise TypeError(
'Expected a FederatedType, FunctionalType, or an AbstractType, '
'found: {}'.format(type(type_signature)))
def _transform_args_with_abstract_types(self, comps, type_signature):
r"""Transforms a Python `list` of computations with abstract types.
Tuple
|
[Comp, Comp, ...]
Args:
comps: A Python list of computations.
type_signature: The type to use when determining how to transform the
computations.
Returns:
A `building_blocks.Tuple`.
"""
del type_signature # Unused
return building_blocks.Tuple(comps)
def _transform_args_with_federated_types(self, comps, type_signature):
r"""Transforms a Python `list` of computations with federated types.
federated_zip(Tuple)
|
[Comp, Comp, ...]
Args:
comps: A Python list of computations.
type_signature: The type to use when determining how to transform the
computations.
Returns:
A `building_blocks.Block`.
"""
del type_signature # Unused
values = building_blocks.Tuple(comps)
return building_block_factory.create_federated_zip(values)
def _transform_args_with_functional_types(self, comps, type_signature):
r"""Transforms a Python `list` of computations with functional types.
Block
/ \
[fn=Tuple] Lambda(arg)
| \
[Comp(f1), Comp(f2), ...] Tuple
|
[Call, Call, ...]
/ \ / \
Sel(0) Sel(0) Sel(1) Sel(1)
| | | |
Ref(fn) Ref(arg) Ref(fn) Ref(arg)
Args:
comps: a Python list of computations.
type_signature: The type to use when determining how to transform the
computations.
Returns:
A `building_blocks.Block`.
"""
functions = building_blocks.Tuple(comps)
fn_name = six.next(self._name_generator)
fn_ref = building_blocks.Reference(fn_name, functions.type_signature)
if isinstance(type_signature.parameter, computation_types.NamedTupleType):
arg_type = [[] for _ in range(len(type_signature.parameter))]
for functional_comp in comps:
named_type_signatures = anonymous_tuple.to_elements(
functional_comp.type_signature.parameter)
for index, (_, concrete_type) in enumerate(named_type_signatures):
arg_type[index].append(concrete_type)
else:
arg_type = [e.type_signature.parameter for e in comps]
arg_name = six.next(self._name_generator)
arg_ref = building_blocks.Reference(arg_name, arg_type)
if isinstance(type_signature.parameter, computation_types.NamedTupleType):
arg = building_block_factory.create_zip(arg_ref)
else:
arg = arg_ref
elements = []
for index, functional_comp in enumerate(comps):
sel_fn = building_blocks.Selection(fn_ref, index=index)
sel_arg = building_blocks.Selection(arg, index=index)
call = building_blocks.Call(sel_fn, sel_arg)
elements.append(call)
calls = building_blocks.Tuple(elements)
result = building_blocks.Lambda(arg_ref.name, arg_ref.type_signature, calls)
return building_blocks.Block(((fn_ref.name, functions),), result)
def _transform_args(self, comp, type_signature):
"""Transforms the arguments from `comp`.
Given a computation containing a tuple of intrinsics that can be merged,
this function creates arguments that should be passed to the call of the
transformed computation.
Args:
comp: The computation building block in which to perform the transform.
type_signature: The type to use when determining how to transform the
computations.
Returns:
A `building_blocks.ComputationBuildingBlock` representing the
transformed arguments from `comp`.
"""
if isinstance(type_signature, computation_types.NamedTupleType):
comps = [[] for _ in range(len(type_signature))]
for _, call in anonymous_tuple.iter_elements(comp):
for index, arg in enumerate(call.argument):
comps[index].append(arg)
transformed_args = []
for args, arg_type in zip(comps, type_signature):
transformed_arg = self._transform_args_with_type(args, arg_type)
transformed_args.append(transformed_arg)
return building_blocks.Tuple(transformed_args)
else:
args = []
for _, call in anonymous_tuple.iter_elements(comp):
args.append(call.argument)
return self._transform_args_with_type(args, type_signature)
def transform(self, comp):
"""Returns a new transformed computation or `comp`."""
if not self.should_transform(comp):
return comp, False
intrinsic_def = intrinsic_defs.uri_to_intrinsic_def(self._uri)
arg = self._transform_args(comp, intrinsic_def.type_signature.parameter)
named_comps = anonymous_tuple.to_elements(comp)
parameter_type = computation_types.to_type(arg.type_signature)
type_signature = [call.type_signature.member for _, call in named_comps]
result_type = computation_types.FederatedType(
type_signature, intrinsic_def.type_signature.result.placement,
intrinsic_def.type_signature.result.all_equal)
intrinsic_type = computation_types.FunctionType(parameter_type, result_type)
intrinsic = building_blocks.Intrinsic(self._uri, intrinsic_type)
call = building_blocks.Call(intrinsic, arg)
tup = building_block_factory.create_federated_unzip(call)
names = [name for name, _ in named_comps]
transformed_comp = building_block_factory.create_named_tuple(tup, names)
return transformed_comp, True
def merge_tuple_intrinsics(comp, uri):
r"""Merges tuples of called intrinsics into one called intrinsic."""
return _apply_transforms(comp, MergeTupleIntrinsics(comp, uri))
def remove_duplicate_computations(comp):
r"""Removes duplicated computations from `comp`.
This transform traverses `comp` postorder and remove duplicated computation
building blocks from `comp`. Additionally, Blocks variables whose value is a
Reference and References pointing to References are removed.
Args:
comp: The computation building block in which to perform the removals.
Returns:
A new computation with the transformation applied or the original `comp`.
Raises:
TypeError: If types do not match.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
tree_analysis.check_has_unique_names(comp)
def _should_transform(comp):
"""Returns `True` if `comp` should be transformed."""
return (isinstance(comp, building_blocks.Block) or
isinstance(comp, building_blocks.Reference))
def _transform(comp, symbol_tree):
"""Returns a new transformed computation or `comp`."""
if not _should_transform(comp):
return comp, False
if isinstance(comp, building_blocks.Block):
variables = []
for name, value in comp.locals:
symbol_tree.walk_down_one_variable_binding()
payload = symbol_tree.get_payload_with_name(name)
if (not payload.removed and
not isinstance(value, building_blocks.Reference)):
variables.append((name, value))
if not variables:
comp = comp.result
else:
comp = building_blocks.Block(variables, comp.result)
return comp, True
elif isinstance(comp, building_blocks.Reference):
value = symbol_tree.get_payload_with_name(comp.name).value
if value is None:
return comp, False
while isinstance(value, building_blocks.Reference):
new_value = symbol_tree.get_payload_with_name(value.name).value
if new_value is None:
comp = building_blocks.Reference(value.name, value.type_signature)
return comp, True
else:
value = new_value
payloads_with_value = symbol_tree.get_all_payloads_with_value(
value, _computations_equal)
if payloads_with_value:
highest_payload = payloads_with_value[-1]
lower_payloads = payloads_with_value[:-1]
for payload in lower_payloads:
symbol_tree.update_payload_with_name(payload.name)
comp = building_blocks.Reference(highest_payload.name,
highest_payload.value.type_signature)
return comp, True
return comp, False
class TrackRemovedReferences(transformation_utils.BoundVariableTracker):
"""transformation_utils.SymbolTree node for removing References in ASTs."""
def __init__(self, name, value):
super(TrackRemovedReferences, self).__init__(name, value)
self._removed = False
@property
def removed(self):
return self._removed
def update(self, value):
self._removed = True
def __str__(self):
return 'Name: {}; value: {}; removed: {}'.format(self.name, self.value,
self.removed)
symbol_tree = transformation_utils.SymbolTree(TrackRemovedReferences)
return transformation_utils.transform_postorder_with_symbol_bindings(
comp, _transform, symbol_tree)
def remove_mapped_or_applied_identity(comp):
r"""Removes all the mapped or applied identity functions in `comp`.
This transform traverses `comp` postorder, matches the following pattern, and
removes all the mapped or applied identity fucntions by replacing the
following computation:
Call
/ \
Intrinsic Tuple
|
[Lambda(x), Comp(y)]
\
Ref(x)
Intrinsic(<(x -> x), y>)
with its argument:
Comp(y)
y
Args:
comp: The computation building block in which to perform the removals.
Returns:
A new computation with the transformation applied or the original `comp`.
Raises:
TypeError: If types do not match.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
def _should_transform(comp):
"""Returns `True` if `comp` is a mapped or applied identity function."""
if (isinstance(comp, building_blocks.Call) and
isinstance(comp.function, building_blocks.Intrinsic) and
comp.function.uri in (
intrinsic_defs.FEDERATED_MAP.uri,
intrinsic_defs.FEDERATED_MAP_ALL_EQUAL.uri,
intrinsic_defs.FEDERATED_APPLY.uri,
intrinsic_defs.SEQUENCE_MAP.uri,
)):
called_function = comp.argument[0]
return building_block_analysis.is_identity_function(called_function)
return False
def _transform(comp):
if not _should_transform(comp):
return comp, False
transformed_comp = comp.argument[1]
return transformed_comp, True
return transformation_utils.transform_postorder(comp, _transform)
class ReplaceCalledLambdaWithBlock(transformation_utils.TransformSpec):
r"""Replaces all the called lambdas in `comp` with a block.
This transform replaces the following computation containing a called lambda:
Call
/ \
Lambda(x) Comp(y)
\
Comp(z)
(x -> z)(y)
with the following computation containing a block:
Block
/ \
[x=Comp(y)] Comp(z)
let x=y in z
"""
def should_transform(self, comp):
return (isinstance(comp, building_blocks.Call) and
isinstance(comp.function, building_blocks.Lambda))
def transform(self, comp):
if not self.should_transform(comp):
return comp, False
transformed_comp = building_blocks.Block(
[(comp.function.parameter_name, comp.argument)], comp.function.result)
return transformed_comp, True
def replace_called_lambda_with_block(comp):
"""Replaces all the called lambdas in `comp` with a block."""
return _apply_transforms(comp, ReplaceCalledLambdaWithBlock())
class ReplaceSelectionFromTuple(transformation_utils.TransformSpec):
r"""Replaces any selection from a tuple with the underlying tuple element.
Invocations of `transform` replace any occurences of:
Selection
\
Tuple
|
[Comp, Comp, ...]
with the appropriate Comp, as determined by the `index` or `name` of the
`Selection`.
"""
def should_transform(self, comp):
return (isinstance(comp, building_blocks.Selection) and
isinstance(comp.source, building_blocks.Tuple))
def _get_index_from_name(self, selection_name, tuple_type_signature):
named_type_signatures = anonymous_tuple.to_elements(tuple_type_signature)
return [x[0] for x in named_type_signatures].index(selection_name)
def transform(self, comp):
if not self.should_transform(comp):
return comp, False
if comp.name is not None:
index = self._get_index_from_name(comp.name, comp.source.type_signature)
else:
index = comp.index
return comp.source[index], True
def replace_selection_from_tuple_with_element(comp):
"""Replaces any selection from a tuple with the underlying tuple element."""
return _apply_transforms(comp, ReplaceSelectionFromTuple())
def uniquify_compiled_computation_names(comp):
"""Replaces all the compiled computations names in `comp` with unique names.
This transform traverses `comp` postorder and replaces the name of all the
comiled computations found in `comp` with a unique name.
Args:
comp: The computation building block in which to perform the replacements.
Returns:
A new computation with the transformation applied or the original `comp`.
Raises:
TypeError: If types do not match.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
name_generator = building_block_factory.unique_name_generator(None, prefix='')
def _should_transform(comp):
return isinstance(comp, building_blocks.CompiledComputation)
def _transform(comp):
if not _should_transform(comp):
return comp, False
transformed_comp = building_blocks.CompiledComputation(
comp.proto, six.next(name_generator))
return transformed_comp, True
return transformation_utils.transform_postorder(comp, _transform)
def uniquify_reference_names(comp):
"""Replaces all the bound reference names in `comp` with unique names.
Notice that `uniquify_reference_names` simply leaves alone any reference
which is unbound under `comp`.
Args:
comp: The computation building block in which to perform the replacements.
Returns:
Returns a transformed version of comp inside of which all variable names
are guaranteed to be unique.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
name_generator = building_block_factory.unique_name_generator(None)
class _RenameNode(transformation_utils.BoundVariableTracker):
"""transformation_utils.SymbolTree node for renaming References in ASTs."""
def __init__(self, name, value):
super(_RenameNode, self).__init__(name, value)
py_typecheck.check_type(name, str)
self.new_name = six.next(name_generator)
def __str__(self):
return 'Value: {}, name: {}, new_name: {}'.format(self.value, self.name,
self.new_name)
def _transform(comp, context_tree):
"""Renames References in `comp` to unique names."""
if isinstance(comp, building_blocks.Reference):
try:
new_name = context_tree.get_payload_with_name(comp.name).new_name
return building_blocks.Reference(new_name, comp.type_signature,
comp.context), True
except NameError:
return comp, False
elif isinstance(comp, building_blocks.Block):
new_locals = []
for name, val in comp.locals:
context_tree.walk_down_one_variable_binding()
new_name = context_tree.get_payload_with_name(name).new_name
new_locals.append((new_name, val))
return building_blocks.Block(new_locals, comp.result), True
elif isinstance(comp, building_blocks.Lambda):
context_tree.walk_down_one_variable_binding()
new_name = context_tree.get_payload_with_name(
comp.parameter_name).new_name
return building_blocks.Lambda(new_name, comp.parameter_type,
comp.result), True
return comp, False
symbol_tree = transformation_utils.SymbolTree(_RenameNode)
return transformation_utils.transform_postorder_with_symbol_bindings(
comp, _transform, symbol_tree)
class TFParser(object):
"""Callable taking subset of TFF AST constructs to CompiledComputations.
When this function is applied via `transformation_utils.transform_postorder`
to a TFF AST node satisfying its assumptions, the tree under this node will
be reduced to a single instance of
`building_blocks.CompiledComputation` representing the same
logic.
Notice that this function is designed to be applied to what is essentially
a subtree of a larger TFF AST; once the processing on a single device has
been aligned at the AST level, and placement separated from the logic of
this processing, we should be left with a function wrapped via
`federated_map` or `federated_apply` to a federated argument. It is this
function which we need to reduce to TensorFlow, and it is to the root
node of this function which we are looking to apply `TFParser`. Because of
this, we assume that there is a lambda expression at the top of the AST
we are looking to parse, as well as the rest of the assumptions below.
1. All called lambdas have been converted to blocks.
2. All blocks have been inlined; that is, there are no block/LET constructs
remaining.
3. All compiled computations are called.
4. No compiled computations have been partially called; we believe this
should be handled correctly today but we haven't reasoned explicitly about
this possibility.
5. The only leaf nodes present under `comp` are compiled computations and
references to the argument of the top-level lambda which we are hoping to
replace with a compiled computation. Further, every leaf node which is a
reference has as its parent a `building_blocks.Call`, whose
associated function is a TF graph. This prevents us from needing to
deal with arbitrary nesting of references and TF graphs, and significantly
clarifies the reasoning. This can be accomplished by "decorating" the
appropriate leaves with called identity TF graphs, the construction of
which is provided by a utility module.
6. There is only a single lambda binding any references present in the AST,
and it is placed at the root of the AST to which we apply `TFParser`.
7. There are no intrinsics present in the AST.
"""
# TODO(b/133328350): Allow for this to take in multiple selections from a
# single argument.
def __init__(self):
"""Populates the parser library with mutually exclusive options."""
self._parse_library = [
compiled_computation_transforms.SelectionFromCalledTensorFlowBlock(),
compiled_computation_transforms.LambdaWrappingGraph(),
compiled_computation_transforms.TupleCalledGraphs(),
compiled_computation_transforms.CalledCompositionOfTensorFlowBlocks(),
compiled_computation_transforms.CalledGraphOnReplicatedArg(),
]
def __call__(self, comp):
"""Transforms `comp` by checking all elements of the parser library.
This function is roughly performing intermediate-code generation, taking
TFF and generating TF. Calling this function is essentially checking the
stack and selecting a semantic action based on its contents, and *only one*
of these actions should be selected for a given computation.
Notice that since the parser library contains mutually exclusive options,
it is safe to return early.
Args:
comp: The `building_blocks.ComputationBuildingBlock` to check for
possibility of reduction according to the parsing library.
Returns:
A tuple whose first element is a possibly transformed version of `comp`,
and whose second is a Boolean indicating whether or not `comp` was
transformed. This is in conforming to the conventions of
`transformation_utils.transform_postorder`.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
for option in self._parse_library:
if option.should_transform(comp):
return option.transform(comp)
return comp, False
def insert_called_tf_identity_at_leaves(comp):
r"""Inserts an identity TF graph called on References under `comp`.
For ease of reasoning about and proving completeness of TFF-to-TF
translation capabilities, we will maintain the invariant that
we constantly pass up the AST instances of the pattern:
Call
/ \
CompiledComputation Reference
Any block of TFF reducible to TensorFlow must have a functional type
signature without nested functions, and therefore we may assume there is
a single Reference in the code we are parsing to TF. We continually push logic
into the compiled computation as we make our way up the AST, preserving the
pattern above; when we hit the lambda that binds this reference, we simply
unwrap the call.
To perform this process, we must begin with this pattern; otherwise there
may be some arbitrary TFF constructs present between any occurrences of TF
and the arguments to which they are applied, e.g. arbitrary selections from
and nesting of tuples containing references.
`insert_called_tf_identity_at_leaves` ensures that the pattern above is
present at the leaves of any portion of the TFF AST which is destined to be
reduced to TF.
We detect such a destiny by checking for the existence of a
`building_blocks.Lambda` whose parameter and result type
can both be bound into TensorFlow. This pattern is enforced here as
parameter validation on `comp`.
Args:
comp: Instance of `building_blocks.Lambda` whose AST we will traverse,
replacing appropriate instances of `building_blocks.Reference` with graphs
representing the identity function of the appropriate type called on the
same reference. `comp` must declare a parameter and result type which are
both able to be stamped in to a TensorFlow graph.
Returns:
A possibly modified version of `comp`, where any references now have a
parent of type `building_blocks.Call` with function an instance
of `building_blocks.CompiledComputation`.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
if isinstance(comp, building_blocks.CompiledComputation):
return comp, False
if not (isinstance(comp, building_blocks.Lambda) and
type_utils.is_tensorflow_compatible_type(comp.result.type_signature)
and type_utils.is_tensorflow_compatible_type(comp.parameter_type)):
raise ValueError(
'`insert_called_tf_identity_at_leaves` should only be '
'called on instances of '
'`building_blocks.Lambda` whose parameter '
'and result types can both be stamped into TensorFlow '
'graphs. You have called in on a {} of type signature {}.'.format(
comp.compact_representation(), comp.type_signature))
def _should_decorate(comp):
return (isinstance(comp, building_blocks.Reference) and
type_utils.is_tensorflow_compatible_type(comp.type_signature))
def _decorate(comp):
identity_function = building_block_factory.create_compiled_identity(
comp.type_signature)
return building_blocks.Call(identity_function, comp)
def _decorate_if_reference_without_graph(comp):
"""Decorates references under `comp` if necessary."""
if (isinstance(comp, building_blocks.Tuple) and
any(_should_decorate(x) for x in comp)):
elems = []
for x in anonymous_tuple.iter_elements(comp):
if _should_decorate(x[1]):
elems.append((x[0], _decorate(x[1])))
else:
elems.append((x[0], x[1]))
return building_blocks.Tuple(elems), True
elif (isinstance(comp, building_blocks.Call) and
not isinstance(comp.function, building_blocks.CompiledComputation) and
_should_decorate(comp.argument)):
arg = _decorate(comp.argument)
return building_blocks.Call(comp.function, arg), True
elif (isinstance(comp, building_blocks.Selection) and
_should_decorate(comp.source)):
return building_blocks.Selection(
_decorate(comp.source), name=comp.name, index=comp.index), True
elif (isinstance(comp, building_blocks.Lambda) and
_should_decorate(comp.result)):
return building_blocks.Lambda(comp.parameter_name, comp.parameter_type,
_decorate(comp.result)), True
elif isinstance(comp, building_blocks.Block) and (
any(_should_decorate(x[1]) for x in comp.locals) or
_should_decorate(comp.result)):
new_locals = []
for x in comp.locals:
if _should_decorate(x[1]):
new_locals.append((x[0], _decorate(x[1])))
else:
new_locals.append((x[0], x[1]))
new_result = comp.result
if _should_decorate(comp.result):
new_result = _decorate(comp.result)
return building_blocks.Block(new_locals, new_result), True
return comp, False
return transformation_utils.transform_postorder(
comp, _decorate_if_reference_without_graph)
def unwrap_placement(comp):
"""Strips `comp`'s placement, returning a single call to map, apply or value.
For this purpose it is necessary to assume that all processing under `comp`
is happening at a single placement.
The other assumptions on inputs of `unwrap_placement` are enumerated as
follows:
1. There is at most one unbound reference under `comp`, which is of federated
type.
2. The only intrinsics present here are apply or map, zip,
and federated_value_at_*.
3. The type signature of `comp` is federated.
4. There are no instances of `building_blocks.Data` of federated
type under `comp`; how these would be handled by a function such as this
is not entirely clear.
Under these conditions, `unwrap_placement` will produce a single call to
federated_map, federated_apply or federated_value, depending on the placement
and type signature of `comp`. Other than this single map or apply, no
intrinsics will remain under `comp`.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` satisfying the
assumptions above.
Returns:
A modified version of `comp`, whose root is a single called
intrinsic, and containing no other intrinsics. Equivalent
to `comp`.
Raises:
TypeError: If the lone unbound reference under `comp` is not of federated
type, `comp` itself is not of federated type, or `comp` is not a building
block.
ValueError: If we encounter a placement other than the one declared by
`comp.type_signature`, an intrinsic not present in the whitelist above, or
`comp` contains more than one unbound reference.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(comp.type_signature, computation_types.FederatedType)
single_placement = comp.type_signature.placement
tree_analysis.check_has_single_placement(comp, single_placement)
name_generator = building_block_factory.unique_name_generator(comp)
all_unbound_references = get_map_of_unbound_references(comp)
root_unbound_references = all_unbound_references[comp]
if len(root_unbound_references) > 1:
raise ValueError(
'`unwrap_placement` can only handle computations with at most a single '
'unbound reference; you have passed in the computation {} with {} '
'unbound references.'.format(comp.compact_representation(),
len(root_unbound_references)))
if len(root_unbound_references) == 1:
unbound_reference_name = root_unbound_references.pop()
else:
unbound_reference_name = None
def _rename_unbound_variable(comp, unbound_variable_name):
"""Reads info about the unbound variable, and renames it uniquely.
The unique rename is simply to preserve uniqueness of names if this
property is present in the argument to `unwrap_placement`, since we will
eventually be binding a new reference of non-federated type in place
of this federated unbound reference.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` with at most
a single unbound reference.
unbound_variable_name: The name of the lone unbound variable present under
`comp`.
Returns:
A tuple, whose first element a is possibly transformed version of `comp`
with its unbound variable renamed to a name which is globally unique
within `comp`, and its second element a tuple containing the new name
given to the unbound reference, and the type of this unbound reference.
"""
unbound_reference_name_and_type_pair = [(None, None)]
class _UnboundVariableIdentifier(transformation_utils.BoundVariableTracker):
"""transformation_utils.SymbolTree node for tracking unbound variables."""
def __init__(self, name, value):
super(_UnboundVariableIdentifier, self).__init__(name, value)
self.unbound = False
def __str__(self):
return ''
def update(self, x):
del x # Unused
self.unbound = True
symbol_tree = transformation_utils.SymbolTree(_UnboundVariableIdentifier)
symbol_tree.ingest_variable_binding(unbound_variable_name, None)
symbol_tree.update_payload_with_name(unbound_variable_name)
def _should_transform(comp, symbol_tree):
return (isinstance(comp, building_blocks.Reference) and
comp.name == unbound_variable_name and
symbol_tree.get_payload_with_name(comp.name).unbound)
def _rename_unbound_variable(comp, symbol_tree):
"""Updates the nonlocal tracker, and renames the unbound variable."""
if not _should_transform(comp, symbol_tree):
return comp, False
if unbound_reference_name_and_type_pair[0][1] is None:
name = six.next(name_generator)
unbound_reference_name_and_type_pair[0] = (name, comp.type_signature)
else:
name = unbound_reference_name_and_type_pair[0][0]
return building_blocks.Reference(name, comp.type_signature), True
renamed_comp, _ = transformation_utils.transform_postorder_with_symbol_bindings(
comp, _rename_unbound_variable, symbol_tree)
return renamed_comp, unbound_reference_name_and_type_pair[0]
def _remove_placement(comp):
"""Unwraps placement from `comp`.
`_remove_placement` embodies the main transform logic in
`unwrap_placement`, performing a pure AST transformation to replace
any nodes of federated type with equivalent non-federated versions.
Whether or not it is safe to do this is left to `unwrap_placement` to
handle.
One note on the implementation: the four cases in the internal `_transform`
switch here exactly case for the building blocks which explicitly take type
signatures as arguments to their constructors.
Args:
comp: Instance of `building_blocks.ComputationBuildingBlock` from which we
wish to remove placement.
Returns:
A transformed version of comp with its placements removed.
Raises:
NotImplementedError: In case a node of type
`building_blocks.Data` is encountered in the AST, as
handling of data objects is not yet implemented in TFF and so it is
unclear what this function should do in that case.
"""
def _remove_placement_from_type(type_spec):
if isinstance(type_spec, computation_types.FederatedType):
return type_spec.member, True
else:
return type_spec, False
def _remove_reference_placement(comp):
"""Unwraps placement from references and updates unbound reference info."""
new_type, _ = type_utils.transform_type_postorder(
comp.type_signature, _remove_placement_from_type)
return building_blocks.Reference(comp.name, new_type)
def _replace_intrinsics_with_functions(comp):
"""Helper to remove intrinsics from the AST."""
if (comp.uri == intrinsic_defs.FEDERATED_ZIP_AT_SERVER.uri or
comp.uri == intrinsic_defs.FEDERATED_ZIP_AT_CLIENTS.uri or
comp.uri == intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri or
comp.uri == intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri):
arg_name = six.next(name_generator)
arg_type = comp.type_signature.result.member
val = building_blocks.Reference(arg_name, arg_type)
lam = building_blocks.Lambda(arg_name, arg_type, val)
return lam
elif comp.uri not in (intrinsic_defs.FEDERATED_MAP.uri,
intrinsic_defs.FEDERATED_MAP_ALL_EQUAL.uri,
intrinsic_defs.FEDERATED_APPLY.uri):
raise ValueError('Disallowed intrinsic: {}'.format(comp))
arg_name = six.next(name_generator)
tuple_ref = building_blocks.Reference(arg_name, [
comp.type_signature.parameter[0],
comp.type_signature.parameter[1].member,
])
fn = building_blocks.Selection(tuple_ref, index=0)
arg = building_blocks.Selection(tuple_ref, index=1)
called_fn = building_blocks.Call(fn, arg)
return building_blocks.Lambda(arg_name, tuple_ref.type_signature,
called_fn)
def _remove_lambda_placement(comp):
"""Removes placement from Lambda's parameter."""
new_parameter_type, _ = type_utils.transform_type_postorder(
comp.parameter_type, _remove_placement_from_type)
return building_blocks.Lambda(comp.parameter_name, new_parameter_type,
comp.result)
def _transform(comp):
"""Dispatches to helpers above."""
if isinstance(comp, building_blocks.Reference):
return _remove_reference_placement(comp), True
elif isinstance(comp, building_blocks.Intrinsic):
return _replace_intrinsics_with_functions(comp), True
elif isinstance(comp, building_blocks.Lambda):
return _remove_lambda_placement(comp), True
elif (isinstance(comp, building_blocks.Data) and
isinstance(comp.type_signature, computation_types.FederatedType)):
# TODO(b/135126947): Design and implement Data constructs.
raise NotImplementedError
return comp, False
return transformation_utils.transform_postorder(comp, _transform)
if unbound_reference_name is None:
unbound_variable_renamed = comp
unbound_reference_name = None
unbound_reference_type = None
else:
(unbound_variable_renamed,
unbound_reference_info) = _rename_unbound_variable(comp,
unbound_reference_name)
(new_reference_name, unbound_reference_type) = unbound_reference_info
if not isinstance(unbound_reference_type, computation_types.FederatedType):
raise TypeError('The lone unbound reference is not of federated type; '
'this is disallowed. '
'The unbound type is {}'.format(unbound_reference_type))
placement_removed, _ = _remove_placement(unbound_variable_renamed)
if unbound_reference_name is None:
return building_block_factory.create_federated_value(
placement_removed, single_placement), True
ref_to_fed_arg = building_blocks.Reference(unbound_reference_name,
unbound_reference_type)
lambda_wrapping_placement_removal = building_blocks.Lambda(
new_reference_name, unbound_reference_type.member, placement_removed)
called_intrinsic = building_block_factory.create_federated_map_or_apply(
lambda_wrapping_placement_removal, ref_to_fed_arg)
return called_intrinsic, True
def get_map_of_unbound_references(comp):
"""Gets a Python `dict` of the unbound references in `comp`.
Compuations that are equal will have the same collections of unbounded
references, so it is safe to use `comp` as the key for this `dict` even though
a given compuation may appear in many positions in the AST.
Args:
comp: The computation building block to parse.
Returns:
A Python `dict` of elements where keys are the compuations in `comp` and
values are a Python `set` of the names of the unbound references in the
subtree of that compuation.
"""
py_typecheck.check_type(comp, building_blocks.ComputationBuildingBlock)
references = {}
def _update(comp):
"""Updates the Python dict of references."""
if isinstance(comp, building_blocks.Reference):
references[comp] = set((comp.name,))
elif isinstance(comp, building_blocks.Block):
references[comp] = set()
names = []
for name, variable in comp.locals:
elements = references[variable]
references[comp].update([e for e in elements if e not in names])
names.append(name)
elements = references[comp.result]
references[comp].update([e for e in elements if e not in names])
elif isinstance(comp, building_blocks.Call):
elements = references[comp.function]
if comp.argument is not None:
elements.update(references[comp.argument])
references[comp] = elements
elif isinstance(comp, building_blocks.Lambda):
elements = references[comp.result]
references[comp] = set([e for e in elements if e != comp.parameter_name])
elif isinstance(comp, building_blocks.Selection):
references[comp] = references[comp.source]
elif isinstance(comp, building_blocks.Tuple):
elements = [references[e] for e in comp]
references[comp] = set(itertools.chain.from_iterable(elements))
else:
references[comp] = set()
return comp, False
transformation_utils.transform_postorder(comp, _update)
return references
def _computations_equal(comp_1, comp_2):
"""Returns `True` if the computations are equal.
If you pass objects other than instances of
`building_blocks.ComputationBuildingBlock` this function will
return `False`. Structurally equaivalent computations with different variable
names are not considered to be equal.
NOTE: This function could be quite expensive if you do not
`extract_computations` first. Extracting all comptations reduces the equality
of two computations in most cases to an identity check. One notable exception
to this is `CompiledComputation` for which equality is delegated to the proto
object.
Args:
comp_1: A `building_blocks.ComputationBuildingBlock` to test.
comp_2: A `building_blocks.ComputationBuildingBlock` to test.
Raises:
TypeError: If `comp_1` or `comp_2` is not an instance of
`building_blocks.ComputationBuildingBlock`.
NotImplementedError: If `comp_1` and `comp_2` are an unexpected subclass of
`building_blocks.ComputationBuildingBlock`.
"""
py_typecheck.check_type(comp_1, building_blocks.ComputationBuildingBlock)
py_typecheck.check_type(comp_2, building_blocks.ComputationBuildingBlock)
if comp_1 is comp_2:
return True
# The unidiomatic-typecheck is intentional, for the purposes of equality this
# function requires that the types are identical and that a subclass will not
# be equal to it's baseclass.
if type(comp_1) != type(comp_2): # pylint: disable=unidiomatic-typecheck
return False
if comp_1.type_signature != comp_2.type_signature:
return False
if isinstance(comp_1, building_blocks.Block):
if not _computations_equal(comp_1.result, comp_2.result):
return False
if len(comp_1.locals) != len(comp_2.locals):
return False
for (name_1, value_1), (name_2, value_2) in zip(comp_1.locals,
comp_2.locals):
if name_1 != name_2 or not _computations_equal(value_1, value_2):
return False
return True
elif isinstance(comp_1, building_blocks.Call):
return (_computations_equal(comp_1.function, comp_2.function) and
(comp_1.argument is None and comp_2.argument is None or
_computations_equal(comp_1.argument, comp_2.argument)))
elif isinstance(comp_1, building_blocks.CompiledComputation):
return comp_1.proto == comp_2.proto
elif isinstance(comp_1, building_blocks.Data):
return comp_1.uri == comp_2.uri
elif isinstance(comp_1, building_blocks.Intrinsic):
return comp_1.uri == comp_2.uri
elif isinstance(comp_1, building_blocks.Lambda):
return (comp_1.parameter_name == comp_2.parameter_name and
comp_1.parameter_type == comp_2.parameter_type and
_computations_equal(comp_1.result, comp_2.result))
elif isinstance(comp_1, building_blocks.Placement):
return comp_1.uri == comp_2.uri
elif isinstance(comp_1, building_blocks.Reference):
return comp_1.name == comp_2.name
elif isinstance(comp_1, building_blocks.Selection):
return (_computations_equal(comp_1.source, comp_2.source) and
comp_1.name == comp_2.name and comp_1.index == comp_2.index)
elif isinstance(comp_1, building_blocks.Tuple):
# The element names are checked as part of the `type_signature`.
if len(comp_1) != len(comp_2):
return False
for element_1, element_2 in zip(comp_1, comp_2):
if not _computations_equal(element_1, element_2):
return False
return True
raise NotImplementedError('Unexpected type found: {}.'.format(type(comp_1)))
| 40.056299
| 84
| 0.698794
|
63ebb3707b33ed2b5a293abbc63eaee09278fd47
| 22,781
|
py
|
Python
|
tests/test_repo.py
|
onecommons/giterop
|
9d9c6730ac5bce63f26dd1fd1e151006bc8230dd
|
[
"MIT"
] | null | null | null |
tests/test_repo.py
|
onecommons/giterop
|
9d9c6730ac5bce63f26dd1fd1e151006bc8230dd
|
[
"MIT"
] | null | null | null |
tests/test_repo.py
|
onecommons/giterop
|
9d9c6730ac5bce63f26dd1fd1e151006bc8230dd
|
[
"MIT"
] | null | null | null |
import unittest
import os
import traceback
import six
from click.testing import CliRunner
from unfurl.__main__ import cli, _latestJobs
from unfurl.localenv import LocalEnv
from unfurl.repo import (
split_git_url,
is_url_or_git_path,
RepoView,
normalize_git_url,
GitRepo,
)
from git import Repo
from unfurl.configurator import Configurator, Status
from toscaparser.common.exception import URLException
import unfurl.configurators # python2.7 workaround
import unfurl.configurators.shell
import unfurl.yamlmanifest # python2.7 workaround
def createUnrelatedRepo(gitDir):
os.makedirs(gitDir)
repo = Repo.init(gitDir)
filename = "README"
filepath = os.path.join(gitDir, filename)
with open(filepath, "w") as f:
f.write("""just another git repository""")
repo.index.add([filename])
repo.index.commit("Initial Commit")
return repo
installDirs = [
("terraform", "0.13.6", "bin"),
("gcloud", "313.0.0", "bin"),
("helm", "3.3.4", "bin"),
]
def makeAsdfFixtures(base):
# make install dirs so we can pretend we already downloaded these
for subdir in installDirs:
path = os.path.join(base, "plugins", subdir[0])
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(base, "installs", *subdir)
if not os.path.exists(path):
os.makedirs(path)
class AConfigurator(Configurator):
def run(self, task):
assert self.can_run(task)
yield task.done(True, Status.ok)
manifestContent = """\
apiVersion: unfurl/v1alpha1
kind: Ensemble
+include:
file: ensemble-template.yaml
repository: spec
spec:
service_template:
topology_template:
node_templates:
my_server:
type: tosca.nodes.Compute
interfaces:
Standard:
create: A
status: {}
"""
awsTestManifest = """\
apiVersion: unfurl/v1alpha1
kind: Ensemble
+include:
file: ensemble-template.yaml
repository: spec
environment:
variables:
AWS_ACCESS_KEY_ID: mockAWS_ACCESS_KEY_ID
connections:
aws_test: primary_provider
changes: [] # set this so we save changes here instead of the job changelog files
spec:
service_template:
topology_template:
node_templates:
testNode:
type: tosca.nodes.Root
interfaces:
Install:
operations:
check:
implementation:
className: unfurl.configurators.TemplateConfigurator
inputs:
# test that the aws connection (defined in the home manifest and renamed in the context to aws_test)
# set its AWS_ACCESS_KEY_ID to the environment variable set in the current context
resultTemplate: |
- name: SELF
attributes:
# all connections available to the OPERATION_HOST as a dictionary
access_key: {{ "$connections::aws_test::AWS_ACCESS_KEY_ID" | eval }}
# the current connections between the OPERATION_HOST and the target or the target's HOSTs
access_key2: {{ "$connections::*::AWS_ACCESS_KEY_ID?" | eval }}
access_key3: {{ "$connections::AWSAccount::AWS_ACCESS_KEY_ID" | eval }}
"""
class GitRepoTest(unittest.TestCase):
"""
test that .gitignore, local/unfurl.yaml is created
test that init cmd committed the project config and related files
"""
def test_init_in_existing_repo(self):
runner = CliRunner()
with runner.isolated_filesystem():
repoDir = "./arepo"
repo = createUnrelatedRepo(repoDir)
os.chdir(repoDir)
# override home so to avoid interferring with other tests
result = runner.invoke(
cli,
[
"--home",
"../unfurl_home",
"init",
"--existing",
"--mono",
"deploy_dir",
],
)
# uncomment this to see output:
# print("result.output", result.exit_code, result.output)
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
self.assertEqual(result.exit_code, 0, result)
expectedCommittedFiles = {
"unfurl.yaml",
".unfurl-local-template.yaml",
"ensemble-template.yaml",
".gitignore",
".gitattributes",
}
expectedFiles = expectedCommittedFiles | {
"local",
"ensemble",
"secrets",
".secrets",
}
self.assertEqual(set(os.listdir("deploy_dir")), expectedFiles)
files = set(_path for (_path, _stage) in repo.index.entries)
expectedCommittedFiles.add("ensemble/ensemble.yaml")
expectedCommittedFiles.add(".secrets/secrets.yaml")
expected = {"deploy_dir/" + f for f in expectedCommittedFiles}
expected.add("README") # the original file in the repo
self.assertEqual(files, expected)
# for n in expectedFiles:
# with open("deploy_dir/" + n) as f:
# print(n)
# print(f.read())
with open("deploy_dir/ensemble/ensemble.yaml", "w") as f:
f.write(manifestContent)
result = runner.invoke(
cli,
[
"--home",
"../unfurl_home",
"git",
"--dir",
"deploy_dir",
"commit",
"-m",
"update manifest",
"deploy_dir/ensemble/ensemble.yaml",
],
)
# uncomment this to see output:
# print("commit result.output", result.exit_code, result.output)
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
# "-vvv",
args = [
"--home",
"../unfurl_home",
"deploy",
"deploy_dir",
"--jobexitcode",
"degraded",
]
result = runner.invoke(cli, args)
# print("result.output", result.exit_code, result.output)
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
self.assertEqual(result.exit_code, 0, result)
def test_split_repos(self):
"""
test that the init cli command sets git repos correctly in "polyrepo" mode.
"""
self.maxDiff = None
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ["--home", "", "init"])
# uncomment this to see output:
# print("result.output", result.exit_code, result.output)
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
self.assertEqual(result.exit_code, 0, result)
result = runner.invoke(cli, ["--home", "", "git", "ls-files"])
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
self.assertEqual(result.exit_code, 0, result)
output = u"""\
*** Running 'git ls-files' in '.'
.gitattributes
.gitignore
.secrets/secrets.yaml
.unfurl-local-template.yaml
ensemble-template.yaml
unfurl.yaml
*** Running 'git ls-files' in './ensemble'
.gitattributes
.gitignore
ensemble.yaml
"""
self.assertEqual(
output.strip(), result.output.strip(), result.output.strip()
)
with open(".git/info/exclude") as f:
contents = f.read()
self.assertIn("ensemble", contents)
result = runner.invoke(cli, ["--home", "", "deploy", "--commit"])
# uncomment this to see output:
# print("result.output", result.exit_code, result.output)
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
self.assertEqual(result.exit_code, 0, result)
def test_home_manifest(self):
"""
test that we can connect to AWS account
"""
runner = CliRunner()
with runner.isolated_filesystem():
# override home so to avoid interferring with other tests
result = runner.invoke(
cli,
[
"--home",
"./unfurl_home",
"init",
"--mono",
"--skeleton=aws",
],
)
# uncomment this to see output:
# print("result.output", result.exit_code, result.output)
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
assert os.path.isdir("./unfurl_home"), "home project not created"
assert os.path.isfile(
"./unfurl_home/unfurl.yaml"
), "home unfurl.yaml not created"
with open("ensemble/ensemble.yaml", "w") as f:
f.write(awsTestManifest)
result = runner.invoke(
cli,
[
"--home",
"./unfurl_home",
"git",
"commit",
"-m",
"update manifest",
"ensemble/ensemble.yaml",
],
)
# uncomment this to see output:
# print("commit result.output", result.exit_code, result.output)
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
args = [
# "-vvv",
"--home",
"./unfurl_home",
"check",
"--dirty=ok",
"--commit",
"--jobexitcode",
"degraded",
]
result = runner.invoke(cli, args)
# print("result.output", result.exit_code, result.output)
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
self.assertEqual(result.exit_code, 0, result)
assert _latestJobs
job = _latestJobs[-1]
attrs = job.rootResource.find_resource("testNode").attributes
access_key = attrs["access_key"]
self.assertEqual(access_key, "mockAWS_ACCESS_KEY_ID")
access_key2 = attrs["access_key2"]
self.assertEqual(access_key2, "mockAWS_ACCESS_KEY_ID", access_key2)
access_key3 = attrs["access_key3"]
self.assertEqual(access_key3, "mockAWS_ACCESS_KEY_ID", access_key3)
# check that these are the only recorded changes
expected = {
"::testNode": {
"access_key": "mockAWS_ACCESS_KEY_ID",
"access_key2": "mockAWS_ACCESS_KEY_ID",
"access_key3": "mockAWS_ACCESS_KEY_ID",
}
}
changes = job.manifest.manifest.config["changes"][0]["changes"]
self.assertEqual(expected, changes, changes)
# changeLogPath = (
# "ensemble/" + job.manifest.manifest.config["lastJob"]["changes"]
# )
# with open(changeLogPath) as f:
# print(f.read())
# tasks = list(job.workDone.values())
# print("task", tasks[0].summary())
# print("job", job.stats(), job.getOutputs())
# self.assertEqual(job.status.name, "ok")
# self.assertEqual(job.stats()["ok"], 1)
# self.assertEqual(job.getOutputs()["aOutput"], "set")
def test_repo_urls(self):
urls = {
"foo/file": None,
"git@github.com:onecommons/unfurl_site.git": (
"git@github.com:onecommons/unfurl_site.git",
"",
"",
),
"git-local://e67559c0bc47e8ed2afb11819fa55ecc29a87c97:spec/unfurl": (
"git-local://e67559c0bc47e8ed2afb11819fa55ecc29a87c97:spec",
"unfurl",
"",
),
"/home/foo/file": None,
"/home/foo/repo.git": ("/home/foo/repo.git", "", ""),
"/home/foo/repo.git#branch:unfurl": (
"/home/foo/repo.git",
"unfurl",
"branch",
),
"https://github.com/onecommons/": (
"https://github.com/onecommons/",
"",
"",
),
"foo/repo.git": ("foo/repo.git", "", ""),
"https://github.com/onecommons/base.git#branch:unfurl": (
"https://github.com/onecommons/base.git",
"unfurl",
"branch",
),
"file:foo/file": None,
"foo/repo.git#branch:unfurl": ("foo/repo.git", "unfurl", "branch"),
"https://github.com/onecommons/base.git": (
"https://github.com/onecommons/base.git",
"",
"",
),
"https://github.com/onecommons/base.git#ref": (
"https://github.com/onecommons/base.git",
"",
"ref",
),
"git@github.com:onecommons/unfurl_site.git#rev:unfurl": (
"git@github.com:onecommons/unfurl_site.git",
"unfurl",
"rev",
),
"file:foo/repo.git#branch:unfurl": (
"file:foo/repo.git",
"unfurl",
"branch",
),
"file:foo/repo.git": ("file:foo/repo.git", "", ""),
}
for url, expected in urls.items():
if expected:
isurl = expected[0] != "foo/repo.git"
assert is_url_or_git_path(url), url
self.assertEqual(split_git_url(url), expected)
else:
isurl = url[0] == "/" or ":" in url
assert not is_url_or_git_path(url), url
if isurl:
# relative urls aren't allowed here, skip those
rv = RepoView(dict(name="", url=url), None)
self.assertEqual(normalize_git_url(rv.url), normalize_git_url(url))
else:
self.assertRaises(URLException, RepoView, dict(name="", url=url), None)
def test_home_template(self):
# test creating and deploying the home template
runner = CliRunner()
with runner.isolated_filesystem():
# override home so to avoid interferring with other tests
result = runner.invoke(cli, ["--home", "./unfurl_home", "home", "--init"])
# uncomment this to see output:
# print("result.output", result.exit_code, result.output)
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
assert not os.path.exists("./unfurl_home/.tool_versions")
makeAsdfFixtures("test_asdf")
os.environ["ASDF_DATA_DIR"] = os.path.abspath("test_asdf")
args = [
# "-vvv",
"deploy",
"./unfurl_home",
"--jobexitcode",
"degraded",
]
result = runner.invoke(cli, args)
# print("result.output", result.exit_code, result.output)
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
self.assertEqual(result.exit_code, 0, result)
# test that invoking an ensemble will cause the project it is in to registered in the home project
# we use this ensemble because it is in a repository with a non-local origin set:
project = os.path.join(
os.path.dirname(__file__), "examples/testimport-ensemble.yaml"
)
# set starttime to suppress job logging to file
result = runner.invoke(
cli, ["--home", "./unfurl_home", "plan", "--starttime=1", project]
)
# print("result.output", result.exit_code, result.output)
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
self.assertEqual(result.exit_code, 0, result)
# XXX replace above command with deploying an ensemble with an implementation artifact
# that will trigger asdf to be installed so we can re-enable testing it:
# assert os.path.exists("unfurl_home/.tool-versions")
# assert LocalEnv("unfurl_home").get_manifest()
# paths = os.environ["PATH"].split(os.pathsep)
# assert len(paths) >= len(installDirs)
# for dirs, path in zip(installDirs, paths):
# self.assertIn(os.sep.join(dirs), path)
# assert added to projects
basedir = os.path.dirname(os.path.dirname(__file__))
repo = GitRepo(Repo(basedir))
gitUrl = normalize_git_url(repo.url) + "#:tests/examples"
# assert added to projects
# travis-ci does a shallow clone so it doesn't have the initial initial revision
initial = repo.get_initial_revision()
with open("./unfurl_home/unfurl.yaml") as f:
contents = f.read()
# print(contents)
for line in [
"examples:",
"url: " + gitUrl,
"initial: " + initial,
]:
self.assertIn(line, contents), line
# assert added to localRepositories
with open("./unfurl_home/local/unfurl.yaml") as f:
contents = f.read()
# print(contents)
for line in [
"url: " + normalize_git_url(gitUrl),
"initial: " + initial,
]:
self.assertIn(line, contents)
self.assertNotIn("origin:", contents)
externalProjectManifest = """
apiVersion: unfurl/v1alpha1
kind: Ensemble
environment:
external:
test:
manifest:
file: testimport-ensemble.yaml
project: examples
spec:
service_template:
topology_template:
node_templates:
testNode:
type: tosca.nodes.Root
properties:
externalEnsemble:
eval:
external: test
"""
with open("externalproject.yaml", "w") as f:
f.write(externalProjectManifest)
result = runner.invoke(
cli, ["--home", "./unfurl_home", "plan", "externalproject.yaml"]
)
# print("result.output", result.exit_code, result.output)
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
self.assertEqual(result.exit_code, 0, result)
# assert that we loaded the external ensemble from the test "examples" project
# and we're able to reference its outputs
assert _latestJobs
testNode = _latestJobs[-1].rootResource.find_resource("testNode")
assert testNode and testNode.attributes["externalEnsemble"]
externalEnsemble = testNode.attributes["externalEnsemble"]
assert "aOutput" in externalEnsemble.attributes["outputs"]
# make sure we loaded it from the source (not a local checkout)
assert externalEnsemble.base_dir.startswith(os.path.dirname(__file__))
def test_remote_git_repo(self):
runner = CliRunner()
with runner.isolated_filesystem():
result = runner.invoke(cli, ["--home", "./unfurl_home", "init", "--mono"])
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
with open("ensemble/ensemble.yaml", "w") as f:
f.write(repoManifestContent)
ensemble = LocalEnv().get_manifest()
# Updated origin/master to a319ac1914862b8ded469d3b53f9e72c65ba4b7f
path = "base-payments"
self.assertEqual(
os.path.abspath(path),
ensemble.rootResource.find_resource("my_server").attributes[
"repo_path"
],
)
assert os.path.isdir(os.path.join(path, ".git"))
def test_submodules(self):
runner = CliRunner()
with runner.isolated_filesystem():
# override home so to avoid interferring with other tests
result = runner.invoke(
cli, ["--home", "./unfurl_home", "init", "test", "--submodule"]
)
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
self.assertEqual(result.exit_code, 0, result)
result = runner.invoke(cli, ["clone", "test", "cloned"])
assert not result.exception, "\n".join(
traceback.format_exception(*result.exc_info)
)
self.assertEqual(result.exit_code, 0, result)
assert os.path.isfile("cloned/ensemble/.git") and not os.path.isdir(
"cloned/ensemble/.git"
)
assert not os.path.exists("cloned/ensemble1"), result.output
repoManifestContent = """\
apiVersion: unfurl/v1alpha1
kind: Ensemble
spec:
instances:
my_server:
template: my_server
service_template:
repositories:
remote-git-repo:
# use a remote git repository that is fast to download but big enough to test the fetching progress output
url: https://github.com/onecommons/base-payments.git
topology_template:
node_templates:
my_server:
type: tosca.nodes.Compute
properties:
repo_path:
eval:
get_dir: remote-git-repo
"""
| 36.743548
| 120
| 0.528379
|
c6405819725448cbb280780109423b9946bfb9d5
| 3,525
|
py
|
Python
|
Defensordelpueblo_bot.py
|
Joel1844/bot_twitter_telegram
|
e2bfa0f6a5decea210c00b9ee3c4a9026461d0f2
|
[
"MIT"
] | null | null | null |
Defensordelpueblo_bot.py
|
Joel1844/bot_twitter_telegram
|
e2bfa0f6a5decea210c00b9ee3c4a9026461d0f2
|
[
"MIT"
] | null | null | null |
Defensordelpueblo_bot.py
|
Joel1844/bot_twitter_telegram
|
e2bfa0f6a5decea210c00b9ee3c4a9026461d0f2
|
[
"MIT"
] | null | null | null |
import logging
from telegram import Update, ForceReply
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters, CallbackContext
import twint
from os import remove
from os import path
import datetime
import pandas as pd
import nest_asyncio
import time
nest_asyncio.apply()
# Enable logging
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
logger = logging.getLogger(__name__)
# Define a few command handlers. These usually take the two arguments update and
# context.
def start(update: Update, context: CallbackContext) -> None:
"""Send a message when the command /start is issued."""
user = update.effective_user
update.message.reply_markdown_v2(
fr'Hi {user.mention_markdown_v2()}\!',
reply_markup=ForceReply(selective=True),
)
def help_command(update: Update, context: CallbackContext) -> None:
"""Send a message when the command /help is issued."""
update.message.reply_text('Help!')
def echo(update: Update, context: CallbackContext) -> None:
"""Echo the user message."""
update.message.reply_text(update.message.text)
#fecha de ayer
def getYesterday():
today = datetime.date.today()
oneday = datetime.timedelta(days=1)
yesterday = today-oneday
return str(yesterday)
def tweet(update:Update, context:CallbackContext):
input = update.message.text.split(" ")
if not input[0].lower() == "tweet":
return
palabras_claves = []
palabras_claves.append(input[1])
#palabras_claves = ["defensorrd","vulnerable","defensor","derecho","pueblo","violacion","motin","carcel","educacion","medioambiente","salud","reclamo","queja","cuidadania","pobreza","usuarios","Trabajo","discriminacion","despido","fundamental","peulloa"]
c = twint.Config()
c.Since = getYesterday()
c.Lang = "es"
c.Near = "Republica Dominicana"
c.Limit = 100
c.Store_csv = True
c.Output = "filename.csv"
for palabras in palabras_claves:
c.Search = palabras
twint.run.Search(c)
data = pd.read_csv('filename.csv')
tweet_list = zip(list(data['link']),list(data['tweet']))
for link, tweet in tweet_list:
update.message.reply_text(f'{link} \n {tweet}')
if path.exists('filename.csv'):
remove('filename.csv')
def main() -> None:
try:
"""Start the bot."""
# Create the Updater and pass it your bot's token.
updater = Updater("2009767025:AAHeClxGi3tpiX55ki6PIQVTquLBUq1Vu1s")
# Get the dispatcher to register handlers
dispatcher = updater.dispatcher
# on different commands - answer in Telegram
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help_command))
dispatcher.add_handler(CommandHandler("tweet", tweet))
dispatcher.add_handler(CommandHandler("echo", echo))
# on non command i.e message - echo the message on Telegram
dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, tweet))
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
except Exception as e:
time.sleep(5)
main()
if __name__ == '__main__':
main()
| 32.638889
| 258
| 0.674043
|
a835c7806b661727b7cb4a53534adae8f5f30deb
| 14,582
|
py
|
Python
|
examples/structured_data/deep_neural_decision_forests.py
|
IMvision12/keras-io
|
44997b0610db078e1109d0dbca58db8319dbc744
|
[
"Apache-2.0"
] | null | null | null |
examples/structured_data/deep_neural_decision_forests.py
|
IMvision12/keras-io
|
44997b0610db078e1109d0dbca58db8319dbc744
|
[
"Apache-2.0"
] | null | null | null |
examples/structured_data/deep_neural_decision_forests.py
|
IMvision12/keras-io
|
44997b0610db078e1109d0dbca58db8319dbc744
|
[
"Apache-2.0"
] | 1
|
2022-01-21T11:34:34.000Z
|
2022-01-21T11:34:34.000Z
|
"""
Title: Classification with Neural Decision Forests
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2021/01/15
Last modified: 2021/01/15
Description: How to train differentiable decision trees for end-to-end learning in deep neural networks.
"""
"""
## Introduction
This example provides an implementation of the
[Deep Neural Decision Forest](https://ieeexplore.ieee.org/document/7410529)
model introduced by P. Kontschieder et al. for structured data classification.
It demonstrates how to build a stochastic and differentiable decision tree model,
train it end-to-end, and unify decision trees with deep representation learning.
## The dataset
This example uses the
[United States Census Income Dataset](https://archive.ics.uci.edu/ml/datasets/census+income)
provided by the
[UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php).
The task is binary classification
to predict whether a person is likely to be making over USD 50,000 a year.
The dataset includes 48,842 instances with 14 input features (such as age, work class, education, occupation, and so on): 5 numerical features
and 9 categorical features.
"""
"""
## Setup
"""
import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow import keras
from tensorflow.keras import layers
import math
"""
## Prepare the data
"""
CSV_HEADER = [
"age",
"workclass",
"fnlwgt",
"education",
"education_num",
"marital_status",
"occupation",
"relationship",
"race",
"gender",
"capital_gain",
"capital_loss",
"hours_per_week",
"native_country",
"income_bracket",
]
train_data_url = (
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data"
)
train_data = pd.read_csv(train_data_url, header=None, names=CSV_HEADER)
test_data_url = (
"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test"
)
test_data = pd.read_csv(test_data_url, header=None, names=CSV_HEADER)
print(f"Train dataset shape: {train_data.shape}")
print(f"Test dataset shape: {test_data.shape}")
"""
Remove the first record (because it is not a valid data example) and a trailing
'dot' in the class labels.
"""
test_data = test_data[1:]
test_data.income_bracket = test_data.income_bracket.apply(
lambda value: value.replace(".", "")
)
"""
We store the training and test data splits locally as CSV files.
"""
train_data_file = "train_data.csv"
test_data_file = "test_data.csv"
train_data.to_csv(train_data_file, index=False, header=False)
test_data.to_csv(test_data_file, index=False, header=False)
"""
## Define dataset metadata
Here, we define the metadata of the dataset that will be useful for reading and parsing
and encoding input features.
"""
# A list of the numerical feature names.
NUMERIC_FEATURE_NAMES = [
"age",
"education_num",
"capital_gain",
"capital_loss",
"hours_per_week",
]
# A dictionary of the categorical features and their vocabulary.
CATEGORICAL_FEATURES_WITH_VOCABULARY = {
"workclass": sorted(list(train_data["workclass"].unique())),
"education": sorted(list(train_data["education"].unique())),
"marital_status": sorted(list(train_data["marital_status"].unique())),
"occupation": sorted(list(train_data["occupation"].unique())),
"relationship": sorted(list(train_data["relationship"].unique())),
"race": sorted(list(train_data["race"].unique())),
"gender": sorted(list(train_data["gender"].unique())),
"native_country": sorted(list(train_data["native_country"].unique())),
}
# A list of the columns to ignore from the dataset.
IGNORE_COLUMN_NAMES = ["fnlwgt"]
# A list of the categorical feature names.
CATEGORICAL_FEATURE_NAMES = list(CATEGORICAL_FEATURES_WITH_VOCABULARY.keys())
# A list of all the input features.
FEATURE_NAMES = NUMERIC_FEATURE_NAMES + CATEGORICAL_FEATURE_NAMES
# A list of column default values for each feature.
COLUMN_DEFAULTS = [
[0.0] if feature_name in NUMERIC_FEATURE_NAMES + IGNORE_COLUMN_NAMES else ["NA"]
for feature_name in CSV_HEADER
]
# The name of the target feature.
TARGET_FEATURE_NAME = "income_bracket"
# A list of the labels of the target features.
TARGET_LABELS = [" <=50K", " >50K"]
"""
## Create `tf.data.Dataset` objects for training and validation
We create an input function to read and parse the file, and convert features and labels
into a [`tf.data.Dataset`](https://www.tensorflow.org/guide/datasets)
for training and validation. We also preprocess the input by mapping the target label
to an index.
"""
from tensorflow.keras.layers import StringLookup
target_label_lookup = StringLookup(
vocabulary=TARGET_LABELS, mask_token=None, num_oov_indices=0
)
def get_dataset_from_csv(csv_file_path, shuffle=False, batch_size=128):
dataset = tf.data.experimental.make_csv_dataset(
csv_file_path,
batch_size=batch_size,
column_names=CSV_HEADER,
column_defaults=COLUMN_DEFAULTS,
label_name=TARGET_FEATURE_NAME,
num_epochs=1,
header=False,
na_value="?",
shuffle=shuffle,
).map(lambda features, target: (features, target_label_lookup(target)))
return dataset.cache()
"""
## Create model inputs
"""
def create_model_inputs():
inputs = {}
for feature_name in FEATURE_NAMES:
if feature_name in NUMERIC_FEATURE_NAMES:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype=tf.float32
)
else:
inputs[feature_name] = layers.Input(
name=feature_name, shape=(), dtype=tf.string
)
return inputs
"""
## Encode input features
"""
def encode_inputs(inputs):
encoded_features = []
for feature_name in inputs:
if feature_name in CATEGORICAL_FEATURE_NAMES:
vocabulary = CATEGORICAL_FEATURES_WITH_VOCABULARY[feature_name]
# Create a lookup to convert a string values to an integer indices.
# Since we are not using a mask token, nor expecting any out of vocabulary
# (oov) token, we set mask_token to None and num_oov_indices to 0.
lookup = StringLookup(
vocabulary=vocabulary, mask_token=None, num_oov_indices=0
)
# Convert the string input values into integer indices.
value_index = lookup(inputs[feature_name])
embedding_dims = int(math.sqrt(lookup.vocabulary_size()))
# Create an embedding layer with the specified dimensions.
embedding = layers.Embedding(
input_dim=lookup.vocabulary_size(), output_dim=embedding_dims
)
# Convert the index values to embedding representations.
encoded_feature = embedding(value_index)
else:
# Use the numerical features as-is.
encoded_feature = inputs[feature_name]
if inputs[feature_name].shape[-1] is None:
encoded_feature = tf.expand_dims(encoded_feature, -1)
encoded_features.append(encoded_feature)
encoded_features = layers.concatenate(encoded_features)
return encoded_features
"""
## Deep Neural Decision Tree
A neural decision tree model has two sets of weights to learn. The first set is `pi`,
which represents the probability distribution of the classes in the tree leaves.
The second set is the weights of the routing layer `decision_fn`, which represents the probability
of going to each leave. The forward pass of the model works as follows:
1. The model expects input `features` as a single vector encoding all the features of an instance
in the batch. This vector can be generated from a Convolution Neural Network (CNN) applied to images
or dense transformations applied to structured data features.
2. The model first applies a `used_features_mask` to randomly select a subset of input features to use.
3. Then, the model computes the probabilities (`mu`) for the input instances to reach the tree leaves
by iteratively performing a *stochastic* routing throughout the tree levels.
4. Finally, the probabilities of reaching the leaves are combined by the class probabilities at the
leaves to produce the final `outputs`.
"""
class NeuralDecisionTree(keras.Model):
def __init__(self, depth, num_features, used_features_rate, num_classes):
super(NeuralDecisionTree, self).__init__()
self.depth = depth
self.num_leaves = 2**depth
self.num_classes = num_classes
# Create a mask for the randomly selected features.
num_used_features = int(num_features * used_features_rate)
one_hot = np.eye(num_features)
sampled_feature_indicies = np.random.choice(
np.arange(num_features), num_used_features, replace=False
)
self.used_features_mask = one_hot[sampled_feature_indicies]
# Initialize the weights of the classes in leaves.
self.pi = tf.Variable(
initial_value=tf.random_normal_initializer()(
shape=[self.num_leaves, self.num_classes]
),
dtype="float32",
trainable=True,
)
# Initialize the stochastic routing layer.
self.decision_fn = layers.Dense(
units=self.num_leaves, activation="sigmoid", name="decision"
)
def call(self, features):
batch_size = tf.shape(features)[0]
# Apply the feature mask to the input features.
features = tf.matmul(
features, self.used_features_mask, transpose_b=True
) # [batch_size, num_used_features]
# Compute the routing probabilities.
decisions = tf.expand_dims(
self.decision_fn(features), axis=2
) # [batch_size, num_leaves, 1]
# Concatenate the routing probabilities with their complements.
decisions = layers.concatenate(
[decisions, 1 - decisions], axis=2
) # [batch_size, num_leaves, 2]
mu = tf.ones([batch_size, 1, 1])
begin_idx = 1
end_idx = 2
# Traverse the tree in breadth-first order.
for level in range(self.depth):
mu = tf.reshape(mu, [batch_size, -1, 1]) # [batch_size, 2 ** level, 1]
mu = tf.tile(mu, (1, 1, 2)) # [batch_size, 2 ** level, 2]
level_decisions = decisions[
:, begin_idx:end_idx, :
] # [batch_size, 2 ** level, 2]
mu = mu * level_decisions # [batch_size, 2**level, 2]
begin_idx = end_idx
end_idx = begin_idx + 2 ** (level + 1)
mu = tf.reshape(mu, [batch_size, self.num_leaves]) # [batch_size, num_leaves]
probabilities = keras.activations.softmax(self.pi) # [num_leaves, num_classes]
outputs = tf.matmul(mu, probabilities) # [batch_size, num_classes]
return outputs
"""
## Deep Neural Decision Forest
The neural decision forest model consists of a set of neural decision trees that are
trained simultaneously. The output of the forest model is the average outputs of its trees.
"""
class NeuralDecisionForest(keras.Model):
def __init__(self, num_trees, depth, num_features, used_features_rate, num_classes):
super(NeuralDecisionForest, self).__init__()
self.ensemble = []
# Initialize the ensemble by adding NeuralDecisionTree instances.
# Each tree will have its own randomly selected input features to use.
for _ in range(num_trees):
self.ensemble.append(
NeuralDecisionTree(depth, num_features, used_features_rate, num_classes)
)
def call(self, inputs):
# Initialize the outputs: a [batch_size, num_classes] matrix of zeros.
batch_size = tf.shape(inputs)[0]
outputs = tf.zeros([batch_size, num_classes])
# Aggregate the outputs of trees in the ensemble.
for tree in self.ensemble:
outputs += tree(inputs)
# Divide the outputs by the ensemble size to get the average.
outputs /= len(self.ensemble)
return outputs
"""
Finally, let's set up the code that will train and evaluate the model.
"""
learning_rate = 0.01
batch_size = 265
num_epochs = 10
hidden_units = [64, 64]
def run_experiment(model):
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=learning_rate),
loss=keras.losses.SparseCategoricalCrossentropy(),
metrics=[keras.metrics.SparseCategoricalAccuracy()],
)
print("Start training the model...")
train_dataset = get_dataset_from_csv(
train_data_file, shuffle=True, batch_size=batch_size
)
model.fit(train_dataset, epochs=num_epochs)
print("Model training finished")
print("Evaluating the model on the test data...")
test_dataset = get_dataset_from_csv(test_data_file, batch_size=batch_size)
_, accuracy = model.evaluate(test_dataset)
print(f"Test accuracy: {round(accuracy * 100, 2)}%")
"""
## Experiment 1: train a decision tree model
In this experiment, we train a single neural decision tree model
where we use all input features.
"""
num_trees = 10
depth = 10
used_features_rate = 1.0
num_classes = len(TARGET_LABELS)
def create_tree_model():
inputs = create_model_inputs()
features = encode_inputs(inputs)
features = layers.BatchNormalization()(features)
num_features = features.shape[1]
tree = NeuralDecisionTree(depth, num_features, used_features_rate, num_classes)
outputs = tree(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
tree_model = create_tree_model()
run_experiment(tree_model)
"""
## Experiment 2: train a forest model
In this experiment, we train a neural decision forest with `num_trees` trees
where each tree uses randomly selected 50% of the input features. You can control the number
of features to be used in each tree by setting the `used_features_rate` variable.
In addition, we set the depth to 5 instead of 10 compared to the previous experiment.
"""
num_trees = 25
depth = 5
used_features_rate = 0.5
def create_forest_model():
inputs = create_model_inputs()
features = encode_inputs(inputs)
features = layers.BatchNormalization()(features)
num_features = features.shape[1]
forest_model = NeuralDecisionForest(
num_trees, depth, num_features, used_features_rate, num_classes
)
outputs = forest_model(features)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
forest_model = create_forest_model()
run_experiment(forest_model)
| 33.292237
| 142
| 0.699287
|
6267664955e6b1624cea7fb7bd68d04ba13065f2
| 1,053
|
py
|
Python
|
Source/FaceRecognition/Domain/Connection.py
|
robertkarol/ReDe-Multiagent-Face-Recognition-System
|
df17cebecc51b2fafb01e07a9bb68e9e4e04163a
|
[
"MIT"
] | null | null | null |
Source/FaceRecognition/Domain/Connection.py
|
robertkarol/ReDe-Multiagent-Face-Recognition-System
|
df17cebecc51b2fafb01e07a9bb68e9e4e04163a
|
[
"MIT"
] | 7
|
2020-04-24T08:22:20.000Z
|
2021-05-21T16:11:52.000Z
|
Source/FaceRecognition/Domain/Connection.py
|
robertkarol/ReDe-Multiagent-Face-Recognition-System
|
df17cebecc51b2fafb01e07a9bb68e9e4e04163a
|
[
"MIT"
] | 1
|
2020-04-26T15:05:07.000Z
|
2020-04-26T15:05:07.000Z
|
class Connection:
def __init__(self, conn_id, reader_stream, writer_stream, byte_order='big'):
self.__conn_id = conn_id
self.__reader_stream = reader_stream
self.__writer_stream = writer_stream
self.__byte_order = byte_order
@property
def connection_id(self):
return self.__conn_id
@property
def reader_stream(self):
return self.__reader_stream
@property
def writer_stream(self):
return self.__writer_stream
async def read_data(self):
data_len = int.from_bytes(await self.__reader_stream.read(4), byteorder=self.__byte_order)
if data_len == 0:
data = None
else:
data = await self.__reader_stream.readexactly(data_len)
return data
async def write_data(self, data) -> None:
self.__writer_stream.write(len(data).to_bytes(4, self.__byte_order))
self.__writer_stream.write(data)
await self.__writer_stream.drain()
def close(self) -> None:
self.__writer_stream.close()
| 30.085714
| 98
| 0.662868
|
4fd6ad005849ac2b841b28f1322899f3ce794655
| 3,385
|
py
|
Python
|
src/poetry/publishing/publisher.py
|
anthonymichaelclark/poetry
|
eb27f816f643c905eb8e8086691454d4490e3dfe
|
[
"MIT"
] | null | null | null |
src/poetry/publishing/publisher.py
|
anthonymichaelclark/poetry
|
eb27f816f643c905eb8e8086691454d4490e3dfe
|
[
"MIT"
] | 1
|
2022-02-22T05:52:32.000Z
|
2022-02-22T05:52:32.000Z
|
src/poetry/publishing/publisher.py
|
daobook/poetry
|
1732d4c14d97de8da4d25e9f4714bcd47c63329f
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from poetry.publishing.uploader import Uploader
from poetry.utils.authenticator import Authenticator
from poetry.utils.helpers import get_cert
from poetry.utils.helpers import get_client_cert
if TYPE_CHECKING:
from pathlib import Path
from cleo.io import BufferedIO
from cleo.io import ConsoleIO
from poetry.poetry import Poetry
logger = logging.getLogger(__name__)
class Publisher:
"""
Registers and publishes packages to remote repositories.
"""
def __init__(self, poetry: Poetry, io: BufferedIO | ConsoleIO) -> None:
self._poetry = poetry
self._package = poetry.package
self._io = io
self._uploader = Uploader(poetry, io)
self._authenticator = Authenticator(poetry.config, self._io)
@property
def files(self) -> list[Path]:
return self._uploader.files
def publish(
self,
repository_name: str | None,
username: str | None,
password: str | None,
cert: Path | None = None,
client_cert: Path | None = None,
dry_run: bool = False,
skip_existing: bool = False,
) -> None:
if not repository_name:
url = "https://upload.pypi.org/legacy/"
repository_name = "pypi"
else:
# Retrieving config information
url = self._poetry.config.get(f"repositories.{repository_name}.url")
if url is None:
raise RuntimeError(f"Repository {repository_name} is not defined")
if not (username and password):
# Check if we have a token first
token = self._authenticator.get_pypi_token(repository_name)
if token:
logger.debug(f"Found an API token for {repository_name}.")
username = "__token__"
password = token
else:
auth = self._authenticator.get_http_auth(repository_name)
if auth:
logger.debug(
f"Found authentication information for {repository_name}."
)
username = auth["username"]
password = auth["password"]
resolved_client_cert = client_cert or get_client_cert(
self._poetry.config, repository_name
)
# Requesting missing credentials but only if there is not a client cert defined.
if not resolved_client_cert:
if username is None:
username = self._io.ask("Username:")
# skip password input if no username is provided, assume unauthenticated
if username and password is None:
password = self._io.ask_hidden("Password:")
self._uploader.auth(username, password)
if repository_name == "pypi":
repository_name = "PyPI"
self._io.write_line(
f"Publishing <c1>{self._package.pretty_name}</c1>"
f" (<c2>{self._package.pretty_version}</c2>) to"
f" <info>{repository_name}</info>"
)
self._uploader.upload(
url,
cert=cert or get_cert(self._poetry.config, repository_name),
client_cert=resolved_client_cert,
dry_run=dry_run,
skip_existing=skip_existing,
)
| 32.548077
| 88
| 0.605318
|
c658098f59007107ed0093d954a6b16cd5ab6344
| 3,126
|
py
|
Python
|
detectron2/data/transforms/albumentations/external.py
|
KUASWoodyLIN/detectron2
|
9a444e03ec830fc306ff85e44072956e5122fa4c
|
[
"Apache-2.0"
] | null | null | null |
detectron2/data/transforms/albumentations/external.py
|
KUASWoodyLIN/detectron2
|
9a444e03ec830fc306ff85e44072956e5122fa4c
|
[
"Apache-2.0"
] | null | null | null |
detectron2/data/transforms/albumentations/external.py
|
KUASWoodyLIN/detectron2
|
9a444e03ec830fc306ff85e44072956e5122fa4c
|
[
"Apache-2.0"
] | 1
|
2021-12-28T02:26:08.000Z
|
2021-12-28T02:26:08.000Z
|
import numpy as np
from fvcore.transforms.transform import Transform, NoOpTransform
from ..augmentation import Augmentation
class AlbumentationsTransform(Transform):
def __init__(self, aug):
self.aug = aug
self.params = aug.get_params()
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
return coords
def apply_image(self, image):
self.params = self.prepare_param(image)
return self.aug.apply(image, **self.params)
def apply_box(self, box: np.ndarray) -> np.ndarray:
try:
return np.array(self.aug.apply_to_bboxes(box.tolist(), **self.params))
except AttributeError:
return box
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
try:
return self.aug.apply_to_mask(segmentation, **self.params)
except AttributeError:
return segmentation
def prepare_param(self, image):
params = self.aug.get_params()
if self.aug.targets_as_params:
targets_as_params = {"image": image}
params_dependent_on_targets = self.aug.get_params_dependent_on_targets(targets_as_params)
params.update(params_dependent_on_targets)
params = self.aug.update_params(params, **{"image": image})
return params
class AlbumentationsWrapper(Augmentation):
"""
Wrap an augmentor form the albumentations library: https://github.com/albu/albumentations.
Image, Bounding Box and Segmentation are supported.
Example:
.. code-block:: python
import albumentations as A
from detectron2.data import transforms as T
from detectron2.data.transforms.albumentations import AlbumentationsWrapper
augs = T.AugmentationList([
AlbumentationsWrapper(A.RandomCrop(width=256, height=256)),
AlbumentationsWrapper(A.HorizontalFlip(p=1)),
AlbumentationsWrapper(A.RandomBrightnessContrast(p=1)),
]) # type: T.Augmentation
# Transform XYXY_ABS -> XYXY_REL
h, w, _ = IMAGE.shape
bbox = np.array(BBOX_XYXY) / [w, h, w, h]
# Define the augmentation input ("image" required, others optional):
input = T.AugInput(IMAGE, boxes=bbox, sem_seg=IMAGE_MASK)
# Apply the augmentation:
transform = augs(input)
image_transformed = input.image # new image
sem_seg_transformed = input.sem_seg # new semantic segmentation
bbox_transformed = input.boxes # new bounding boxes
# Transform XYXY_REL -> XYXY_ABS
h, w, _ = image_transformed.shape
bbox_transformed = bbox_transformed * [w, h, w, h]
"""
def __init__(self, augmentor):
"""
Args:
augmentor (albumentations.BasicTransform):
"""
# super(Albumentations, self).__init__() - using python > 3.7 no need to call rng
self._aug = augmentor
def get_transform(self, image):
do = self._rand_range() < self._aug.p
if do:
return AlbumentationsTransform(self._aug)
else:
return NoOpTransform()
| 35.522727
| 101
| 0.649712
|
d19abe45e4c63771d74b64d0ef61090ea6a418f3
| 5,026
|
py
|
Python
|
blog/models.py
|
demostheneslld/wevebeeneverywhere
|
6fc6bee50099a0eb0dfde0818d227548ea954b81
|
[
"MIT"
] | null | null | null |
blog/models.py
|
demostheneslld/wevebeeneverywhere
|
6fc6bee50099a0eb0dfde0818d227548ea954b81
|
[
"MIT"
] | 5
|
2021-03-30T13:46:02.000Z
|
2022-03-12T01:03:44.000Z
|
blog/models.py
|
demostheneslld/wevebeeneverywhere
|
6fc6bee50099a0eb0dfde0818d227548ea954b81
|
[
"MIT"
] | null | null | null |
"""
Definition of models.
"""
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils import timezone
# USER RELATED
from django.contrib.auth.models import User
from blog.email_helper import send_email
User._meta.get_field('email')._unique = True
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
bio = models.TextField(max_length=500, blank=True)
greeting = models.CharField(max_length=30, default='Hi')
default_participants = models.CharField(max_length=120, blank=True)
map_icon_color = models.CharField(max_length=40, default='red')
is_subscribed_to_emails = models.BooleanField(default=False)
@receiver(post_save, sender=User)
# pylint: disable=unused-argument
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
# pylint: disable=unused-argument
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
# BLOG RELATED
class BlogPost(models.Model):
SCORE_CHOICES = [('1', 'Terrible!'), ('2', 'Meh'), ('3', 'Okay'), ('4', 'Good'),
('5', 'Great!'), ('6', 'AMAZING'), ('7', 'All Time Favorite')]
author = models.ForeignKey(User, on_delete=models.CASCADE)
event_date = models.DateField(default=timezone.now)
publish_date = models.DateTimeField(default=None, blank=True, null=True)
email_sent = models.BooleanField(default=False)
participants = models.CharField(max_length=400, blank=True, null=True)
loc_name = models.CharField(max_length=100, default='Name, CityOrState')
lat = models.DecimalField(
max_digits=10, decimal_places=4, blank=True, null=True)
lng = models.DecimalField(
max_digits=10, decimal_places=4, blank=True, null=True)
title = models.CharField(max_length=250, blank=True, null=True)
subtitle = models.CharField(max_length=400, blank=True, null=True)
content = models.TextField(blank=True, null=True)
score = models.CharField(
max_length=15, choices=SCORE_CHOICES, default=4, null=True)
def strict_validate(self):
errors = []
if not self.participants:
errors.append('Participants is required')
if not self.loc_name:
errors.append('Location Name is required')
if not self.title:
errors.append('Title is required')
if not self.subtitle:
errors.append('Subtitle is required')
if not self.content:
errors.append('You must write something in the post content')
return errors
def publish(self):
self.publish_date = timezone.now()
self.save()
def unpublish(self):
self.publish_date = None
self.save()
def send_email(self):
subscribed_users = Profile.objects.filter(is_subscribed_to_emails=True)
subject = f'''New Post: {self.title}!'''
body = f'''
<p>We have a new story!<br/>
Check it out :)</p>
<p>
<b>{self.title}</b><br/>
<i>{self.subtitle}</i><br/>
<a href='https://wevebeeneverywhere.com/stories?id={self.id}'>Click here to read the story!</a>
</p>
<p>Thanks for reading,<br/>
Nathan and Molly</p>
<p>Want to change how you receive these emails?<br/>
You can <a href='https://wevebeeneverywhere.com/accounts/profile'>update your preferences</a> or <a href='https://wevebeeneverywhere.com/accounts/unsubscribe'>unsubscribe from this list</a>.</p>
'''
for subscribed_user in subscribed_users:
recipient = subscribed_user.user.email
print('Sending email to ' + recipient)
send_email(recipient, subject, body)
self.email_sent = True
self.save()
return len(subscribed_users)
def comment_count(self):
obj_pk = self.id
return PostInteraction.objects.filter(type='comment', post_id=obj_pk).count()
def like_count(self):
obj_pk = self.id
return PostInteraction.objects.filter(type='like', post_id=obj_pk).count()
def get_comments(self):
obj_pk = self.id
return PostInteraction.objects.filter(type='comment', post_id=obj_pk).order_by('-interaction_date')
class MediaItem(models.Model):
MEDIA_TYPES = [
('picture', 'picture'),
('video', 'video')
]
ALLOWED_FILE_EXTENSIONS = [
('jpg', 'png'),
('png', 'jpg'),
('png', 'jpeg'),
]
post_id = models.ForeignKey(BlogPost, on_delete=models.CASCADE)
media_type = models.CharField(
max_length=10, choices=MEDIA_TYPES, default=1)
caption = models.CharField(max_length=250)
file_name = models.UUIDField()
file_extension = models.CharField(
max_length=10, choices=ALLOWED_FILE_EXTENSIONS, default=1)
source_url = models.CharField(max_length=400)
source_image_file = models.ImageField(upload_to='post_uploads')
class PostInteraction(models.Model):
type = models.CharField(max_length=100)
user_id = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
post_id = models.ForeignKey(BlogPost, on_delete=models.CASCADE)
interaction_date = models.DateTimeField(default=timezone.now)
content = models.TextField()
| 34.902778
| 194
| 0.717668
|
0364724ab4aa1530c8318387ad8f42061e7ae9dd
| 2,539
|
py
|
Python
|
customTools/customLayout.py
|
wqslucifer/MLTools
|
103925f4f7cc4c149bd72d4adf51eb93a95cd1fc
|
[
"Apache-2.0"
] | null | null | null |
customTools/customLayout.py
|
wqslucifer/MLTools
|
103925f4f7cc4c149bd72d4adf51eb93a95cd1fc
|
[
"Apache-2.0"
] | null | null | null |
customTools/customLayout.py
|
wqslucifer/MLTools
|
103925f4f7cc4c149bd72d4adf51eb93a95cd1fc
|
[
"Apache-2.0"
] | null | null | null |
from PyQt5.QtCore import QPoint, QRect, QSize, Qt
from PyQt5.QtWidgets import QLayout, QSizePolicy
class FlowLayout(QLayout):
def __init__(self, parent=None, margin=0, spacing=-1):
super(FlowLayout, self).__init__(parent)
if parent is not None:
self.setContentsMargins(margin, margin, margin, margin)
self.setSpacing(spacing)
self.itemList = []
def __del__(self):
item = self.takeAt(0)
while item:
item = self.takeAt(0)
def addItem(self, item):
self.itemList.append(item)
def count(self):
return len(self.itemList)
def itemAt(self, index):
if 0<= index < len(self.itemList):
return self.itemList[index]
return None
def takeAt(self, index):
if 0<= index < len(self.itemList):
return self.itemList.pop(index)
return None
def expandingDirections(self):
return Qt.Orientations(Qt.Orientation(0))
def hasHeightForWidth(self):
return True
def heightForWidth(self, width):
height = self.doLayout(QRect(0, 0, width, 0), True)
return height
def setGeometry(self, rect):
super(FlowLayout, self).setGeometry(rect)
self.doLayout(rect, False)
def sizeHint(self):
return self.minimumSize()
def minimumSize(self):
size = QSize()
for item in self.itemList:
size = size.expandedTo(item.minimumSize())
margin, _, _, _ = self.getContentsMargins()
size += QSize(2 * margin, 2 * margin)
return size
def doLayout(self, rect, testOnly):
x = rect.x()
y = rect.y()
lineHeight = 0
for item in self.itemList:
wid = item.widget()
spaceX = self.spacing() + wid.style().layoutSpacing(QSizePolicy.PushButton, QSizePolicy.PushButton, Qt.Horizontal)
spaceY = self.spacing() + wid.style().layoutSpacing(QSizePolicy.PushButton, QSizePolicy.PushButton, Qt.Vertical)
nextX = x + item.sizeHint().width() + spaceX
if nextX - spaceX > rect.right() and lineHeight > 0:
x = rect.x()
y = y + lineHeight + spaceY
nextX = x + item.sizeHint().width() + spaceX
lineHeight = 0
if not testOnly:
item.setGeometry(QRect(QPoint(x, y), item.sizeHint()))
x = nextX
lineHeight = max(lineHeight, item.sizeHint().height())
return y + lineHeight - rect.y()
| 30.22619
| 126
| 0.587239
|
edaf9907d73fbf56563a7b73dbf0180a9a82c494
| 866
|
py
|
Python
|
app/core/admin.py
|
rikkhanna/dj-recipe-app-api
|
626f9762666aae1f243be0035a66be5dc0986a51
|
[
"MIT"
] | null | null | null |
app/core/admin.py
|
rikkhanna/dj-recipe-app-api
|
626f9762666aae1f243be0035a66be5dc0986a51
|
[
"MIT"
] | null | null | null |
app/core/admin.py
|
rikkhanna/dj-recipe-app-api
|
626f9762666aae1f243be0035a66be5dc0986a51
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from core import models
class UserAdmin(BaseUserAdmin):
ordering = ["id"]
list_display = ["email", "name"]
# define sections for our fieldsets in our change and create page
fieldsets = (
(None, {"fields": ("email", "password")}),
(_("Personal Info"), {"fields": ("name",)}),
(
_("Permissions"),
{"fields": ("is_active", "is_staff", "is_superuser")}
),
(_("Important dates"), {"fields": ("last_login",)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(models.User, UserAdmin)
| 27.935484
| 69
| 0.583141
|
6f9c5a3471822f696865dae90e037e0da7ff030e
| 7,038
|
py
|
Python
|
textencodefunc.py
|
therealproprogrammer/Text-Encoding-Kit-DEPRECATED-
|
ea4626573e35685e79ec3b3226563b5c7146d418
|
[
"MIT"
] | null | null | null |
textencodefunc.py
|
therealproprogrammer/Text-Encoding-Kit-DEPRECATED-
|
ea4626573e35685e79ec3b3226563b5c7146d418
|
[
"MIT"
] | 1
|
2017-06-25T03:19:49.000Z
|
2017-06-25T03:19:49.000Z
|
textencodefunc.py
|
therealproprogrammer/Text-Encoding-Kit
|
ea4626573e35685e79ec3b3226563b5c7146d418
|
[
"MIT"
] | null | null | null |
def look_up_word_value(words):
"""
---------------------------------------------------------------------
DESCRIPTION
Translates the word (string) array into a floating-point value array.
---------------------------------------------------------------------
PARAMETERS
words (string array): The array of words to convert into a floating-
point value array.
---------------------------------------------------------------------
"""
the_dictionary = {}
word_num = 0
the_list_of_words = open("C:/YourShortListOfWords.txt", "r")
the_text_within = the_list_of_words.read()
for line in the_text_within.split('\n'):
# print(line+":"+str(word_num))
the_dictionary[line] = word_num
word_num = word_num + 1
looked_up_array = []
for word in words:
looked_up_array.append(int(the_dictionary[word]))
# print(looked_up_array)
real_looked_up_array = []
for word_val in looked_up_array:
real_looked_up_array.append(word_val / 10000)
return real_looked_up_array
def look_up_word_for_value(word_values):
"""
---------------------------------------------------------------------
DESCRIPTION
Translates the floating-point value array into a word (string) array.
---------------------------------------------------------------------
PARAMETERS
wordvalues (floating-point value array): The array of floating-point
values to convert into a word (string) array.
---------------------------------------------------------------------
"""
word_list_here = []
the_list_of_words_here = open("C:/YourShortListOfWords.txt", "r")
the_word_list_within = the_list_of_words_here.read()
for line in the_word_list_within.split('\n'):
word_list_here.append(line)
output_word_list_here = []
for word_value in word_values:
output_word_list_here.append(word_list_here[int(word_value * 10000)])
return output_word_list_here
def is_valid_word_array(words_to_check):
"""
---------------------------------------------------------------------
DESCRIPTION
Checks if the words in the word (string) array are part of the
dictionary.
---------------------------------------------------------------------
PARAMETERS
words_to_check (string array): The array of words to check for in the
dictionary.
---------------------------------------------------------------------
"""
valid = True
try:
look_up_word_value(words_to_check)
except:
valid = False
return valid
def add_word_to_dictionary(word_to_add):
"""
---------------------------------------------------------------------
DESCRIPTION
Adds a word to the dictionary file, if it does not already exist.
---------------------------------------------------------------------
PARAMETERS
word_to_add (string): The word to add to the dictionary.
---------------------------------------------------------------------
"""
list_of_exist_words = open("C:/YourShortListOfWords.txt", "r")
existing_words = list_of_exist_words.read()
not_taken = True
for ExistLine in existing_words.split('\n'):
if ExistLine.lower() == word_to_add:
not_taken = False
if not_taken:
ready_to_add = open("C:/YourShortListOfWords.txt", "a")
ready_to_add.write("\n" + word_to_add.lower())
def pad_word_array(word_array_to_pad, input_size):
"""
---------------------------------------------------------------------
DESCRIPTION
Pads the word array with ^ to reshape it to the network's input size,
or trims it if necessary. Otherwise, leaves it unchanged.
---------------------------------------------------------------------
PARAMETERS
word_array_to_pad (string array): The word array to pad.
input_size (integer): The input size the neural network expects.
---------------------------------------------------------------------
"""
if len(word_array_to_pad) > input_size:
return word_array_to_pad[0:input_size]
elif len(word_array_to_pad) == input_size:
return word_array_to_pad
elif len(word_array_to_pad) < input_size:
padded_word_array = word_array_to_pad
for PadChar in range(input_size - len(word_array_to_pad)):
padded_word_array.append("^")
return padded_word_array
def easy_convert_sentence_to_values(sentence_array, input_size):
"""
---------------------------------------------------------------------
DESCRIPTION
Converts the array of sentences to an array of word value arrays. If
necessary, they might be padded.
---------------------------------------------------------------------
PARAMETERS
sentence_array (string array): The sentence array to convert.
input_size (integer): The input size the neural network expects.
---------------------------------------------------------------------
"""
arr_of_token_wrd_arrs = []
# Tokenizes each sentence in arr_of_token_wrd_arrs
import nltk
for SentenceToTokenize in sentence_array:
arr_of_token_wrd_arrs.append(pad_word_array(nltk.word_tokenize(SentenceToTokenize), input_size))
# Checks the validity of arr_of_token_wrd_arrs, extending the dictionary if necessary
for WordArray in arr_of_token_wrd_arrs:
for Word in WordArray:
if is_valid_word_array([Word]):
print(Word + " is a valid word.")
else:
add_word_to_dictionary(Word)
# Converts arr_of_token_wrd_arrs to an array of word value arrays
arr_of_wrd_val_arrs = []
for WordArrayToConvert in arr_of_token_wrd_arrs:
arr_of_wrd_val_arrs.append(look_up_word_value(WordArrayToConvert))
return arr_of_wrd_val_arrs
'''
#Keras Example Below
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
import numpy as np
#The idea here is to output only one of the inputs (remove redundancy).
#For some reason, the outputs I got had similar values (so the outputs started with the same letter) when the dictionary file
#contained a long list of alphabetically-arranged words.
#I would appreciate it if anyone can help fix this bug.
#Here is the input data
X = np.array(EasyConvertSentenceToValues(["code code","program program","pet pet"],9))
#Here is the output data
y = np.array(EasyConvertSentenceToValues(["code","program","pet"],1))
model = Sequential()
model.add(Dense(8, input_dim=9))
model.add(Activation('tanh'))
model.add(Dense(6))
model.add(Activation('sigmoid'))
model.add(Dense(1))
model.add(Activation('sigmoid'))
sgd = SGD(lr=0.1)
model.compile(loss='binary_crossentropy', optimizer=sgd)
model.fit(X, y, batch_size=1, nb_epoch=100)
print(model.predict_proba(X))
for whatever in model.predict_proba(X).tolist():
for theThing in whatever:
print(LookUpWordForValue([round(theThing,1000)]))
'''
| 35.014925
| 125
| 0.573743
|
083ba8dc35d6e6fa463e65c08cc32e58c6abf152
| 12,183
|
py
|
Python
|
mozi/train_object.py
|
hycis/Mozi
|
7f2eccbe3169c10d231e07edf8bc650039fa4eb2
|
[
"MIT"
] | 122
|
2015-07-24T09:29:06.000Z
|
2022-02-22T02:51:00.000Z
|
mozi/train_object.py
|
hycis/Mozi
|
7f2eccbe3169c10d231e07edf8bc650039fa4eb2
|
[
"MIT"
] | 4
|
2015-07-27T04:37:11.000Z
|
2020-04-04T08:05:00.000Z
|
mozi/train_object.py
|
hycis/Mozi
|
7f2eccbe3169c10d231e07edf8bc650039fa4eb2
|
[
"MIT"
] | 27
|
2015-07-24T12:59:35.000Z
|
2020-04-14T00:21:43.000Z
|
import theano
import theano.tensor as T
floatX = theano.config.floatX
import numpy as np
import time, datetime
import sys
import logging
internal_logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
from mozi.log import Log
from mozi.utils.theano_utils import shared_zeros
from mozi.utils.train_object_utils import split_list, generate_shared_list, merge_lists, \
get_shared_values, is_shared_var, merge_var
from mozi.utils.check_memory import get_mem_usage
from mozi.utils.progbar import Progbar
class TrainObject():
def __init__(self, model, dataset, train_cost, valid_cost, learning_method, stop_criteria, log=None, verbose=True):
self.model = model
self.dataset = dataset
self.train_cost = train_cost
self.valid_cost = valid_cost
self.learning_method = learning_method
self.stop_criteria = stop_criteria
self.log = log
self.verbose = verbose
if self.log is None:
# use default Log setting
self.log = Log(logger=internal_logger)
elif self.log.save_to_database:
self.log.print_records()
self.log.info('\n')
def setup(self):
self.log.info( '..begin setting up train object')
#===================[ build params and deltas list ]==================#
params = []
deltas = []
for i, layer in enumerate(self.model.layers):
layer_name = "{}_{}".format(layer.__class__.__name__, i)
if hasattr(layer, 'params'):
for param in layer.params:
# checked that the param to be updated is shared variable
if is_shared_var(param):
param.name = str(i) + '_' + str(param.name)
param.name += '_' + layer.__class__.__name__
params += [param]
deltas += [shared_zeros(shape=param.shape.eval())]
#=====================[ training params updates ]=====================#
self.log.info("..update params: " + str(params))
train_y_pred, train_layers_stats = self.model.train_fprop(self.model.input_var)
train_cost = self.train_cost(self.model.output_var, train_y_pred).astype(floatX)
gparams = T.grad(train_cost, params)
train_updates = self.learning_method.update(deltas, params, gparams)
#=================[ append updates from each layer ]==================#
for i, layer in enumerate(self.model.layers):
layer_name = "{}_{}".format(layer.__class__.__name__, i)
if hasattr(layer, 'updates') and len(layer.updates) > 0:
self.log.info("..{}: has shared variable updates".format(layer_name))
train_updates += layer.updates
#----[ append updates of stats from each layer to train updates ]-----#
self.train_stats_names, train_stats_vars = split_list(train_layers_stats)
train_stats_vars = [var.astype(floatX) for var in train_stats_vars]
self.train_stats_shared = generate_shared_list(train_stats_vars)
train_stats_updates = merge_lists(self.train_stats_shared, train_stats_vars)
if self.verbose:
train_updates += train_stats_updates
#-------------------------[ train functions ]-------------------------#
self.log.info('..begin compiling functions')
self.training = theano.function(inputs=merge_var(self.model.input_var, self.model.output_var),
outputs=train_cost,
updates=train_updates,
on_unused_input='warn',
allow_input_downcast=True)
self.log.info('..training function compiled')
#=============================[ testing ]=============================#
test_y_pred, test_layers_stats = self.model.test_fprop(self.model.input_var)
#-----[ append updates of stats from each layer to test updates ]-----#
self.test_stats_names, test_stats_vars = split_list(test_layers_stats)
test_stats_vars = [var.astype(floatX) for var in test_stats_vars]
self.test_stats_shared = generate_shared_list(test_stats_vars)
test_stats_updates = []
if self.verbose:
test_stats_updates = merge_lists(self.test_stats_shared, test_stats_vars)
#-------------------------[ test functions ]--------------------------#
test_stopping_error = self.valid_cost(self.model.output_var, test_y_pred).astype(floatX)
test_cost = self.train_cost(self.model.output_var, test_y_pred).astype(floatX)
self.testing = theano.function(inputs=merge_var(self.model.input_var, self.model.output_var),
outputs=(test_stopping_error, test_cost),
updates=test_stats_updates,
on_unused_input='warn',
allow_input_downcast=True)
self.log.info('..testing function compiled')
def run(self):
best_valid_error = float(sys.maxint)
valid_error = float(sys.maxint)
train_cost = float(sys.maxint)
valid_cost = float(sys.maxint)
train_stats_values = []
valid_stats_values = []
epoch = 0
error_dcr = 0
self.best_epoch_last_update = 0
self.best_valid_last_update = float(sys.maxint)
train_stats_names = ['train_' + name for name in self.train_stats_names]
valid_stats_names = ['valid_' + name for name in self.test_stats_names]
job_start = time.time()
while (self.continue_learning(epoch, error_dcr, best_valid_error)):
if epoch > 0:
self.log.info("best_epoch_last_update: %d"%self.best_epoch_last_update)
self.log.info("valid_error_decrease: %f"%error_dcr)
self.log.info("best_valid_last_update: %f"%self.best_valid_last_update)
self.log.info("========[ End of Epoch ]========\n\n")
epoch += 1
start_time = time.time()
num_train_examples = 0
total_train_cost = 0.
train_stats_values = np.zeros(len(train_stats_names), dtype=floatX)
num_valid_examples = 0
total_valid_cost = 0.
total_valid_stopping_cost = 0.
valid_stats_values = np.zeros(len(valid_stats_names), dtype=floatX)
blk = 0
for block in self.dataset:
block_time = time.time()
blk += 1
train_set = block.get_train()
valid_set = block.get_valid()
#====================[ Training Progress ]====================#
if train_set.dataset_size > 0:
self.log.info('..training '+ self.dataset.__class__.__name__
+ ' block %s/%s'%(blk, self.dataset.nblocks))
progbar = Progbar(target=train_set.dataset_size)
blk_sz = 0
for idx in train_set:
cost = self.training(*train_set[idx])
total_train_cost += cost * len(idx)
num_train_examples += len(idx)
train_stats_values += len(idx) * get_shared_values(self.train_stats_shared)
blk_sz += len(idx)
progbar.update(blk_sz)
print
#===================[ Validating Progress ]===================#
if valid_set.dataset_size > 0:
self.log.info('..validating ' + self.dataset.__class__.__name__
+ ' block %s/%s'%(blk, self.dataset.nblocks))
progbar = Progbar(target=valid_set.dataset_size)
blk_sz = 0
for idx in valid_set:
stopping_cost, cost = self.testing(*valid_set[idx])
total_valid_cost += cost * len(idx)
total_valid_stopping_cost += stopping_cost * len(idx)
num_valid_examples += len(idx)
valid_stats_values += len(idx) * get_shared_values(self.test_stats_shared)
blk_sz += len(idx)
progbar.update(blk_sz)
print
self.log.info('block time: %0.2fs'%(time.time()-block_time))
self.log.info(get_mem_usage())
#-------[ Update train best cost and error values ]-------#
if num_train_examples > 0:
train_cost = total_train_cost / num_train_examples
train_stats_values /= num_train_examples
#-------[ Update valid best cost and error values ]-------#
if num_valid_examples > 0:
valid_error = total_valid_stopping_cost / num_valid_examples
valid_cost = total_valid_cost / num_valid_examples
valid_stats_values /= num_valid_examples
if valid_error < best_valid_error:
best_valid_error = valid_error
self.log.info('..best validation error so far')
if self.log.save_model:
self.log._save_model(self.model)
self.log.info('..model saved')
if valid_error < self.best_valid_last_update:
error_dcr = self.best_valid_last_update - valid_error
else:
error_dcr = 0
#==============[ save to database, save epoch error]==============#
if self.log.save_to_database:
self.log._save_to_database(epoch, train_cost, valid_error, best_valid_error)
self.log.info('..sent to database: %s:%s' % (self.log.save_to_database['name'],
self.log.experiment_name))
if self.log.save_epoch_error:
self.log._save_epoch_error(epoch, train_cost, valid_cost, valid_error)
self.log.info('..epoch error saved')
end_time = time.time()
#=====================[ log outputs to file ]=====================#
merged_train = merge_lists(train_stats_names, train_stats_values)
merged_valid = merge_lists(valid_stats_names, valid_stats_values)
outputs = [('epoch', epoch),
('runtime(s)', int(end_time-start_time)),
('train_' + self.train_cost.func_name, train_cost),
('valid_' + self.train_cost.func_name, valid_cost),
('valid_' + self.valid_cost.func_name, valid_error),
('best_valid_' + self.valid_cost.func_name, best_valid_error)]
outputs += merged_train + merged_valid
self.log._log_outputs(outputs)
job_end = time.time()
self.log.info('Job Completed on %s'%time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime(job_end)))
ttl_time = int(job_end - job_start)
dt = datetime.timedelta(seconds=ttl_time)
self.log.info('Total Time Taken: %s'%str(dt))
self.log.info("========[ End of Job ]========\n\n")
def continue_learning(self, epoch, error_dcr, best_valid_error):
if epoch > self.stop_criteria['max_epoch']:
return False
elif self.stop_criteria['percent_decrease'] is None or \
self.stop_criteria['epoch_look_back'] is None:
return True
elif np.abs(float(error_dcr) / self.best_valid_last_update) \
>= self.stop_criteria['percent_decrease']:
self.best_valid_last_update = best_valid_error
self.best_epoch_last_update = epoch
return True
elif epoch - self.best_epoch_last_update > \
self.stop_criteria['epoch_look_back']:
return False
else:
return True
| 41.020202
| 119
| 0.554872
|
b5b460fb427176499d24692dd9787ea513b1609f
| 587
|
py
|
Python
|
song_downloader.py
|
pauldardeau/cloud-jukebox
|
3e82e3e9ab9bf5bbdab6bdf65e158638456e05cc
|
[
"BSD-3-Clause"
] | 1
|
2016-11-01T16:01:43.000Z
|
2016-11-01T16:01:43.000Z
|
song_downloader.py
|
pauldardeau/cloud-jukebox
|
3e82e3e9ab9bf5bbdab6bdf65e158638456e05cc
|
[
"BSD-3-Clause"
] | 24
|
2015-11-26T03:37:24.000Z
|
2022-01-12T12:27:21.000Z
|
song_downloader.py
|
pauldardeau/cloud-jukebox
|
3e82e3e9ab9bf5bbdab6bdf65e158638456e05cc
|
[
"BSD-3-Clause"
] | null | null | null |
import threading
class SongDownloader(threading.Thread):
def __init__(self, jb, list_songs):
super(SongDownloader, self).__init__()
self.jukebox = jb
self.list_songs = list_songs
def run(self):
if self.jukebox is not None and self.list_songs is not None:
self.jukebox.batch_download_start()
for song in self.list_songs:
if self.jukebox.exit_requested:
break
else:
self.jukebox.download_song(song)
self.jukebox.batch_download_complete()
| 30.894737
| 68
| 0.603066
|
fea6917694c97f63ac94a537837d37043a44f60d
| 1,357
|
py
|
Python
|
app/migrations/0006_friendrequest_remotefriend_remotefriendrequest.py
|
olivaC/group-cmput404-project
|
bc5c9429d35f97f07d6550196eab1a1193b7e983
|
[
"MIT"
] | 2
|
2019-02-12T01:02:35.000Z
|
2019-11-05T23:33:16.000Z
|
app/migrations/0006_friendrequest_remotefriend_remotefriendrequest.py
|
olivaC/group-cmput404-project
|
bc5c9429d35f97f07d6550196eab1a1193b7e983
|
[
"MIT"
] | 55
|
2019-02-19T19:16:25.000Z
|
2022-01-13T01:07:50.000Z
|
app/migrations/0006_friendrequest_remotefriend_remotefriendrequest.py
|
olivaC/group-cmput404-project
|
bc5c9429d35f97f07d6550196eab1a1193b7e983
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.5 on 2019-04-03 03:21
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
dependencies = [
('app', '0005_remotecomment'),
]
operations = [
migrations.CreateModel(
name='RemoteFriend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('friend', models.URLField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='remote_author', to='app.Author')),
],
),
migrations.CreateModel(
name='RemoteFriendRequest',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('author', models.URLField(blank=True, null=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('friend', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='RemoteFriend', to='app.Author')),
('server', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='RemoteFriendServer', to='app.Server')),
],
),
]
| 39.911765
| 143
| 0.616065
|
39450b9cd0aa0ffcf9cc7073dbd98d92ad562c33
| 482
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/parcoords/line/colorbar/_tickvals.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/parcoords/line/colorbar/_tickvals.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/parcoords/line/colorbar/_tickvals.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="tickvals", parent_name="parcoords.line.colorbar", **kwargs
):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "data"),
**kwargs
)
| 32.133333
| 85
| 0.649378
|
f15e44b1a08302c42bb4d3af1a815e42372aabd5
| 3,045
|
py
|
Python
|
myspiders/spider_hr/hfbank_spider.py
|
zhouhongf/bank_hr
|
a42e5e18f3ec36b1ec65931415fe476c9690e0a0
|
[
"MIT"
] | 2
|
2021-11-27T06:40:47.000Z
|
2022-01-06T03:12:46.000Z
|
myspiders/spider_hr/hfbank_spider.py
|
zhouhongf/bank_hr
|
a42e5e18f3ec36b1ec65931415fe476c9690e0a0
|
[
"MIT"
] | null | null | null |
myspiders/spider_hr/hfbank_spider.py
|
zhouhongf/bank_hr
|
a42e5e18f3ec36b1ec65931415fe476c9690e0a0
|
[
"MIT"
] | null | null | null |
from myspiders.ruia import JsonField, Item, Spider, Bs4HtmlField, Bs4AttrField, Bs4TextField
from urllib.parse import urlencode, urlparse, urljoin, quote
import re
from constants import BankDict
from config import Target
import os
'''
Ids: "https://applyjob.chinahr.com/apply/job/wish?jobId=5ebca903c7fd7a04d9bcb256&projectId=5cf735b98534db03dab6a00b"
JobId: ""
PublishDate: "2020-05-14 11:13:17"
gzdd: "济南市"
jiezhiriqi: "2020-06-10"
link: "shzp/zwcx/284564.shtml"
npgw: "专业类-高级专员"
title: "员工发展管理岗"
xuqiubm: "人力资源部(党委组织部)"
zpjg: "总行"
zplb: "社会招聘"
'title': '对公客户经理岗(本部)',
'xuqiubm': '郑州分行',
'gzdd': '郑州市',
'JobId': '',
'link': 'shzp/zwcx/284260.shtml',
'npgw': '——',
'jiezhiriqi': '2020-04-23',
'Ids': 'https://applyjob.chinahr.com/apply/job/wish?jobId=5e8544cac5dad405596775f8&projectId=5cf735b98534db03dab6a00b',
'zpjg': '郑州分行',
'PublishDate': '2020-04-08 09:04:34',
'zplb': '社会招聘',
'''
class HfbankItem(Item):
target_item = JsonField(json_select='rows')
bank_name = JsonField(default='恒丰银行')
type_main = JsonField(json_select='zplb')
name = JsonField(json_select='title')
position = JsonField(json_select='npgw')
branch_name = JsonField(json_select='zpjg')
department = JsonField(json_select='xuqiubm')
url = JsonField(json_select='link')
date_publish = JsonField(json_select='PublishDate')
date_close = JsonField(json_select='jiezhiriqi')
place = JsonField(json_select='gzdd')
async def clean_date_close(self, value):
return value + ' 00:00:00'
async def clean_url(self, value):
one = os.path.split(value)[-1]
job_id = one.split('.')[0]
self.results['job_id'] = job_id
if not value.startswith('http'):
value = 'http://career.hfbank.com.cn/' + value
return value
def make_form_data(page_index: int, page_size: int):
form_data = {
'SiteId': '312',
'col': 'title|link|PublishDate|zpjg|zplb|xuqiubm|gzdd|Ids|JobId|npgw|jiezhiriqi',
'catalogId': '11190',
'zpjg': '',
'npgw': '',
'newtime': '',
'jobad_category': '社会招聘',
'jobad_jobcategory': '',
'jobad_workingplace': '',
'search_txt': '',
'pageIndex': page_index,
'pagesize': page_size
}
return form_data
# 每次爬取前3页
class HfbankWorker(Spider):
name = 'HfbankWorker'
bank_name = '恒丰银行'
start_urls = ['http://career.hfbank.com.cn/ucms/RecruitmentServlet']
page_size = 10
form_data = [make_form_data(one + 1, 10) for one in range(3)]
headers = {'Referer': 'http://career.hfbank.com.cn/shzp/zwcx/index.shtml'}
async def parse(self, response):
jsondata = await response.json(content_type='text/html')
async for item in HfbankItem.get_json(jsondata=jsondata):
data = item.results
target = Target(bank_name=self.bank_name, url=data['url'], metadata={'data': data})
await self.redis.insert(field=target.id, value=target.do_dump())
def start():
# HfbankWorker.start()
pass
| 30.148515
| 120
| 0.655172
|
ea60d0e3e19f46a64050171bbffa62ed4e48ed32
| 9,639
|
py
|
Python
|
pymatflow/qe/post/scf.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 6
|
2020-03-06T16:13:08.000Z
|
2022-03-09T07:53:34.000Z
|
pymatflow/qe/post/scf.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-10-02T02:23:08.000Z
|
2021-11-08T13:29:37.000Z
|
pymatflow/qe/post/scf.py
|
DeqiTang/pymatflow
|
bd8776feb40ecef0e6704ee898d9f42ded3b0186
|
[
"MIT"
] | 1
|
2021-07-10T16:28:14.000Z
|
2021-07-10T16:28:14.000Z
|
# ==============================================================================
import datetime
import matplotlib.pyplot as plt
from pymatflow.base.atom import Atom
from pymatflow.base.xyz import BaseXyz
class ScfOut:
"""
"""
def __init__(self):
"""
output is the output file of scf run
"""
self.file = None
self.scf_params = {}
self.run_info = {}
def get_info(self, file):
"""
get the general information of scf run from scf run output file
which is now stored in self.lines
"""
self.clean()
self.file = file
with open(self.file, 'r') as fout:
self.lines = fout.readlines()
self.get_scf_params_and_run_info()
def clean(self):
self.file = None
self.scf_params = {}
self.run_info = {}
#
def get_scf_params_and_run_info(self):
"""
self.run_info[]
start_time: the task start time
stop_time: the task stop time
scf_energies: all the energies during the scf procedure
#fermi_energy: fermi energy of the system (if output)
"""
self.run_info["scf_energies"] = []
for i in range(len(self.lines)):
# if it is an empty line continue to next line
if len(self.lines[i].split()) == 0:
continue
if self.lines[i].split()[0] == "Program" and self.lines[i].split()[1] == "PWSCF" and self.lines[i].split()[3] == "starts":
self.run_info["start_time"] = self.lines[i].split("\n")[0]
elif self.lines[i].split()[0] == "This" and self.lines[i].split()[1] == "run" and self.lines[i].split()[3] == "terminated":
self.run_info["stop_time"] = self.lines[i].split("\n")[0]
elif self.lines[i].split()[0] == "Parallel" and self.lines[i].split()[-1] == "processors":
self.run_info["processors"] = int(self.lines[i].split()[-2])
elif self.lines[i].split()[0] == "MPI" and self.lines[i].split()[-1] == "nodes":
self.run_info["nodes"] = int(self.lines[i].split()[-2])
elif self.lines[i].split()[0] == "bravais-lattice" and self.lines[i].split()[1] == "index":
self.scf_params["alat_au"] = float(self.lines[i+1].split()[4])
self.scf_params["nat"] = int(self.lines[i+3].split()[4])
self.scf_params["nelectron"] = float(self.lines[i+5].split()[4])
self.scf_params["n_ks_state"] = int(self.lines[i+6].split("=")[1])
self.scf_params["ecutwfc"] = int(float(self.lines[i+7].split()[3]))
self.scf_params["ecutrho"] = int(float(self.lines[i+8].split()[4]))
self.scf_params["conv_thr"] = float(self.lines[i+9].split()[3])
self.scf_params["mixing_beta"] = float(self.lines[i+10].split()[3])
elif self.lines[i].split()[0] == "crystal" and self.lines[i].split()[1] == "axes:" and self.lines[i].split()[-1] =="alat)":
self.scf_params["cell_a_alat"] = []
self.scf_params["cell_a_alat"].append([float(self.lines[i+1].split()[3]), float(self.lines[i+1].split()[4]), float(self.lines[i+1].split()[5])])
self.scf_params["cell_a_alat"].append([float(self.lines[i+2].split()[3]), float(self.lines[i+2].split()[4]), float(self.lines[i+2].split()[5])])
self.scf_params["cell_a_alat"].append([float(self.lines[i+3].split()[3]), float(self.lines[i+3].split()[4]), float(self.lines[i+3].split()[5])])
elif self.lines[i].split()[0] == "reciprocal" and self.lines[i].split()[1] == "axes:" and self.lines[i].split()[-1] == "pi/alat)": # actually '2 pi/alat'
self.scf_params["cell_b_2pi_alat"] = []
self.scf_params["cell_b_2pi_alat"].append([float(self.lines[i+1].split()[3]), float(self.lines[i+1].split()[4]), float(self.lines[i+1].split()[5])])
self.scf_params["cell_b_2pi_alat"].append([float(self.lines[i+2].split()[3]), float(self.lines[i+2].split()[4]), float(self.lines[i+2].split()[5])])
self.scf_params["cell_b_2pi_alat"].append([float(self.lines[i+3].split()[3]), float(self.lines[i+3].split()[4]), float(self.lines[i+3].split()[5])])
elif self.lines[i].split()[0] == "site" and self.lines[i].split()[-1] == "units)" and self.lines[i].split()[-2] == "(alat":
self.run_info["site_line_number"] = i
elif self.lines[i].split()[0] == "number" and self.lines[i].split()[2] == 'k':
if self.lines[i].split()[5] == "(tetrahedron":
self.scf_params["degauss"] = "tetrahedron method: degauss not needed"
else:
self.scf_params["degauss"] = float(self.lines[i].split()[9])
self.run_info["number-of-k-points"] = int(self.lines[i].split()[4])
elif self.lines[i].split()[0] == "Estimated" and self.lines[i].split()[1] == "max":
self.run_info["ram_per_process"] = self.lines[i].split()[7] + " " + self.lines[i].split()[8]
self.run_info["total_ram"] = self.lines[i+2].split()[5] + " " + self.lines[i+2].split()[6]
elif self.lines[i].split()[0] == "total" and self.lines[i].split()[1] == "energy":
# the total energy of the last iteration is not print like the previous scf iteration
# it begin with a ! total energy
self.run_info["scf_energies"].append(float(self.lines[i].split()[3]))
elif self.lines[i].split()[0] == "!" and self.lines[i].split()[5] == "Ry":
self.run_info["scf_final_energy"] = float(self.lines[i].split()[4])
elif self.lines[i].split()[0] == "convergence" and self.lines[i].split()[3] == "achieved":
self.run_info["scf_iterations"] = int(self.lines[i].split()[5])
elif self.lines[i].split()[0] == "the" and self.lines[i].split()[1] == "Fermi":
self.run_info["fermi_energy"] = float(self.lines[i].split()[4])
elif self.lines[i].split()[0] == "Total" and self.lines[i].split()[1] == "force":
self.run_info["total_force"] = float(self.lines[i].split()[3])
elif self.lines[i].split()[0] == "Computing" and self.lines[i].split()[-1] == "pressure":
self.run_info["total_stress_ry_bohr_3"] = []
self.run_info["total_stress_kbar"] = []
self.run_info["pressure"] = float(self.lines[i+2].split()[-1])
self.run_info["total_stress_ry_bohr_3"].append([float(self.lines[i+3].split()[0]), float(self.lines[i+3].split()[1]), float(self.lines[i+3].split()[2])])
self.run_info["total_stress_ry_bohr_3"].append([float(self.lines[i+4].split()[0]), float(self.lines[i+4].split()[1]), float(self.lines[i+4].split()[2])])
self.run_info["total_stress_ry_bohr_3"].append([float(self.lines[i+5].split()[0]), float(self.lines[i+5].split()[1]), float(self.lines[i+5].split()[2])])
self.run_info["total_stress_kbar"].append([float(self.lines[i+3].split()[3]), float(self.lines[i+3].split()[4]), float(self.lines[i+3].split()[5])])
self.run_info["total_stress_kbar"].append([float(self.lines[i+4].split()[3]), float(self.lines[i+4].split()[4]), float(self.lines[i+4].split()[5])])
self.run_info["total_stress_kbar"].append([float(self.lines[i+5].split()[3]), float(self.lines[i+5].split()[4]), float(self.lines[i+5].split()[5])])
# note: at present len(self.run_info["scf_energies"]) = len(self.run_info["scf_iterations"]) - 1
# because the total energy of the last step is not printed in format like the previous scf step,
# and it is printed as the '! total energy = ' where there is a "!" in the beginning
# now we append the final scf step energy to self.run_info["scf_energies"]
self.run_info["scf_energies"].append(self.run_info["scf_final_energy"])
# ----------------------------------------------------------------------
# get the xyz structure from information extracted above:
self.xyz = base_xyz()
self.xyz.natom = self.scf_params["nat"]
begin = self.run_info["site_line_number"] + 1
# Warning:
# there are numeric erros when obtaining atom coordinated from qe output
# in unit of alat and multiplied by alat and bohr. namely the acquired
# atomic coordinates have numeric errors compared to the input xyz
# so be cautious when use it.
bohr = 0.529177208 # 1 Bohr = 0.529177208 Angstrom
for i in range(self.xyz.natom):
self.xyz.atoms.append(Atom(
self.lines[begin+i].split()[1],
self.scf_params["alat_au"] * bohr * float(self.lines[begin+i].split()[6]),
self.scf_params["alat_au"] * bohr * float(self.lines[begin+i].split()[7]),
self.scf_params["alat_au"] * bohr * float(self.lines[begin+i].split()[8])))
self.xyz.cell = self.scf_params["cell_a_alat"] # now in unit of alat
for i in range(3):
for j in range(3):
self.xyz.cell[i][j] = self.scf_params["cell_a_alat"][i][i] * self.scf_params["alat_au"] * bohr
# now self.xyz.cell are in unit of Angstrom
# ----------------------------------------------------------------------
# def
# stronger please
| 66.020548
| 170
| 0.549642
|
62e454a11e29dcc45eb7f4faf46acb2c2dbea70d
| 246
|
py
|
Python
|
netask/serializer.py
|
pearcore/vsite
|
bc38213241c2dfb5939599dc02876ed4716ae6d1
|
[
"Apache-2.0"
] | null | null | null |
netask/serializer.py
|
pearcore/vsite
|
bc38213241c2dfb5939599dc02876ed4716ae6d1
|
[
"Apache-2.0"
] | null | null | null |
netask/serializer.py
|
pearcore/vsite
|
bc38213241c2dfb5939599dc02876ed4716ae6d1
|
[
"Apache-2.0"
] | null | null | null |
from rest_framework import serializers
from netask.models import Config
class ConfigSerializer(serializers.ModelSerializer):
class Meta:
model = Config
fields = ('id', 'Name', 'CreateDate', 'stResult','itResult','jsResult')
| 27.333333
| 79
| 0.715447
|
9d6ae926687e3c117301bc3c3c7d453cb80e01c4
| 2,091
|
py
|
Python
|
intake/catalog/tests/test_auth_integration.py
|
raybellwaves/intake
|
8acc70d9adb19344ca15dee948315828b61e87b2
|
[
"BSD-2-Clause"
] | 578
|
2019-02-22T11:45:28.000Z
|
2022-03-31T08:32:22.000Z
|
intake/catalog/tests/test_auth_integration.py
|
raybellwaves/intake
|
8acc70d9adb19344ca15dee948315828b61e87b2
|
[
"BSD-2-Clause"
] | 336
|
2019-02-21T16:24:33.000Z
|
2022-03-30T09:23:53.000Z
|
intake/catalog/tests/test_auth_integration.py
|
raybellwaves/intake
|
8acc70d9adb19344ca15dee948315828b61e87b2
|
[
"BSD-2-Clause"
] | 99
|
2019-02-22T18:31:09.000Z
|
2022-03-22T03:27:54.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
import os
import os.path
import shutil
import tempfile
import time
import pytest
from intake import open_catalog
from intake.auth.secret import SecretClientAuth
from intake.auth.base import AuthenticationFailure
TMP_DIR = tempfile.mkdtemp()
CONF_DIR = os.path.join(TMP_DIR, 'conf')
os.mkdir(CONF_DIR)
TEST_CATALOG_PATH = [TMP_DIR]
YAML_FILENAME = 'intake_test_catalog.yml'
# Create server configuration using shared-secret Auth
TEST_SERVER_CONF = os.path.join(CONF_DIR, 'config.yaml')
conf = '''
auth:
cls: intake.auth.secret.SecretAuth
kwargs:
secret: test_secret
'''
with open(TEST_SERVER_CONF, 'w') as f:
f.write(conf)
@pytest.fixture
def intake_server_with_auth(intake_server):
fullname = os.path.join(TMP_DIR, YAML_FILENAME)
try:
os.makedirs(os.path.join(TMP_DIR, 'data'))
except:
pass
with open(fullname, 'w') as f:
f.write('''
sources:
example:
description: example1 source plugin
driver: csv
args:
urlpath: "{{ CATALOG_DIR }}/data/example.csv"
''')
csv_name = os.path.join(TMP_DIR, 'data', 'example.csv')
with open(csv_name, 'w') as f:
f.write('a,b,c\n1,2,3\n4,5,6')
time.sleep(2)
yield intake_server
try:
shutil.rmtree(TMP_DIR)
except:
pass
def test_secret_auth(intake_server_with_auth):
auth = SecretClientAuth(secret='test_secret')
catalog = open_catalog(intake_server_with_auth, auth=auth)
entries = list(catalog)
assert entries == ['example']
catalog.example.read()
def test_secret_auth_fail(intake_server_with_auth):
auth = SecretClientAuth(secret='test_wrong_secret')
with pytest.raises(AuthenticationFailure):
list(open_catalog(intake_server_with_auth, auth=auth))
| 24.313953
| 78
| 0.653754
|
e4a806ad4cde136340b16b3dd67113f767af4c9c
| 1,701
|
py
|
Python
|
airbyte-integrations/bases/source-acceptance-test/setup.py
|
ravitejasaidus/airbyte
|
2f7c15a540ad16824ac80f8674e30c63b915a690
|
[
"MIT"
] | 2
|
2021-08-04T03:17:38.000Z
|
2021-11-15T10:16:08.000Z
|
airbyte-integrations/bases/source-acceptance-test/setup.py
|
ravitejasaidus/airbyte
|
2f7c15a540ad16824ac80f8674e30c63b915a690
|
[
"MIT"
] | 52
|
2021-06-11T12:39:05.000Z
|
2022-03-30T04:59:35.000Z
|
airbyte-integrations/bases/source-acceptance-test/setup.py
|
ravitejasaidus/airbyte
|
2f7c15a540ad16824ac80f8674e30c63b915a690
|
[
"MIT"
] | 1
|
2021-08-04T03:25:02.000Z
|
2021-08-04T03:25:02.000Z
|
#
# MIT License
#
# Copyright (c) 2020 Airbyte
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import setuptools
MAIN_REQUIREMENTS = [
"airbyte-cdk~=0.1",
"docker~=4.4",
"PyYAML~=5.4",
"icdiff~=1.9",
"inflection~=0.5",
"pdbpp~=0.10",
"pydantic~=1.6",
"pytest~=6.1",
"pytest-sugar~=0.9",
"pytest-timeout~=1.4",
"pprintpp~=0.4",
]
setuptools.setup(
name="source-acceptance-test",
description="Contains acceptance tests for source connectors.",
author="Airbyte",
author_email="contact@airbyte.io",
url="https://github.com/airbytehq/airbyte",
packages=setuptools.find_packages(),
install_requires=MAIN_REQUIREMENTS,
)
| 33.352941
| 80
| 0.72428
|
48b9578c8f16349293799dfdd13bd1a2e8545bab
| 10,616
|
py
|
Python
|
object_service/views.py
|
ehenneken/object_service
|
8da7be631d5fb962d120d4706ffb5faeeef97757
|
[
"MIT"
] | null | null | null |
object_service/views.py
|
ehenneken/object_service
|
8da7be631d5fb962d120d4706ffb5faeeef97757
|
[
"MIT"
] | 1
|
2017-10-16T19:52:13.000Z
|
2017-10-16T19:52:13.000Z
|
object_service/views.py
|
ehenneken/object_service
|
8da7be631d5fb962d120d4706ffb5faeeef97757
|
[
"MIT"
] | null | null | null |
from flask import current_app, request
from flask_restful import Resource
from flask_discoverer import advertise
from flask import Response
from SIMBAD import get_simbad_data
from SIMBAD import do_position_query
from SIMBAD import parse_position_string
from NED import get_ned_data
from NED import get_NED_refcodes
from utils import get_objects_from_query_string
from utils import translate_query
import time
import timeout_decorator
class IncorrectPositionFormatError(Exception):
pass
class ObjectSearch(Resource):
"""Return object identifiers for a given object string"""
scopes = []
rate_limit = [1000, 60 * 60 * 24]
decorators = [advertise('scopes', 'rate_limit')]
def post(self):
stime = time.time()
# Get the supplied list of identifiers
identifiers = []
input_type = None
# determine whether a source for the data was specified
try:
source = request.json['source'].lower()
except:
source = 'simbad'
# We only deal with SIMBAD or NED as source
if source not in ['simbad','ned']:
current_app.logger.error('Unsupported source for object data specified: %s'%source)
return {"Error": "Unable to get results!",
"Error Info": "Unsupported source for object data specified: %s"%source}, 200
for itype in ['identifiers', 'objects']:
try:
identifiers = request.json[itype]
identifiers = map(str, identifiers)
input_type = itype
except:
pass
if not input_type:
current_app.logger.error('No identifiers and objects were specified for SIMBAD object query')
return {"Error": "Unable to get results!",
"Error Info": "No identifiers/objects found in POST body"}, 200
# We should either have a list of identifiers or a list of object names
if len(identifiers) == 0:
current_app.logger.error('No identifiers or objects were specified for SIMBAD object query')
return {"Error": "Unable to get results!",
"Error Info": "No identifiers/objects found in POST body"}, 200
# We have a known object data source and a list of identifiers. Let's start!
# We have identifiers
if source == 'simbad':
result = get_simbad_data(identifiers, input_type)
else:
if input_type == 'identifiers':
input_type = 'simple'
result = get_ned_data(identifiers, input_type)
if 'Error' in result:
# An error was returned!
err_msg = result['Error Info']
current_app.logger.error('Failed to find data for %s %s query (%s)!'%(source.upper(), input_type,err_msg))
return result
else:
# We have results!
duration = time.time() - stime
current_app.logger.info('Found objects for %s %s in %s user seconds.' % (source.upper(), input_type, duration))
# Now pick the entries in the results that correspond with the original object names
if input_type == 'objects':
# result['data'] = {k: result['data'].get(k.upper()) for k in identifiers}
result['data'] = {k: result['data'].get(k) or result['data'].get(k.upper()) for k in identifiers}
# Send back the results
return result.get('data',{})
class PositionSearch(Resource):
"""Return publication information for a cone search"""
scopes = []
rate_limit = [1000, 60 * 60 * 24]
decorators = [advertise('scopes', 'rate_limit')]
def get(self, pstring):
# The following position strings are supported
# 1. 05 23 34.6 -69 45 22:0 6 (or 05h23m34.6s -69d45m22s:0m6s)
# 2. 05 23 34.6 -69 45 22:0.166666 (or 05h23m34.6s -69d45m22s:0.166666)
# 3. 80.89416667 -69.75611111:0.166666
stime = time.time()
# If we're given a string with qualifiers ('h', etc), convert to one without
current_app.logger.info('Attempting SIMBAD position search: %s'%pstring)
try:
RA, DEC, radius = parse_position_string(pstring)
except Exception, err:
current_app.logger.error('Position string could not be parsed: %s' % pstring)
return {'Error': 'Unable to get results!',
'Error Info': 'Invalid position string: %s'%pstring}, 200
try:
result = do_position_query(RA, DEC, radius)
except timeout_decorator.timeout_decorator.TimeoutError:
current_app.logger.error('Position query %s timed out' % pstring)
return {'Error': 'Unable to get results!',
'Error Info': 'Position query timed out'}, 200
return result
class QuerySearch(Resource):
"""Given a Solr query with object names, return a Solr query with SIMBAD identifiers"""
scopes = []
rate_limit = [1000, 60 * 60 * 24]
decorators = [advertise('scopes', 'rate_limit')]
def post(self):
stime = time.time()
# Get the supplied list of identifiers
identifiers = []
query = None
itype = None
name2id = {}
try:
query = request.json['query']
input_type = 'query'
except:
current_app.logger.error('No query was specified for the object search')
return {"Error": "Unable to get results!",
"Error Info": "No identifiers/objects found in POST body"}, 200
# If we get the request from BBB, the value of 'query' is actually an array
if isinstance(query, list):
solr_query = query[0]
else:
solr_query = query
current_app.logger.info('Received object query: %s'%solr_query)
# This query will be split up into two components: a SIMBAD and a NED object query
simbad_query = solr_query.replace('object:','simbid:')
ned_query = solr_query.replace('object:','nedid:')
# Check if an explicit target service was specified
try:
target = request.json['target']
except:
target = 'all'
# If we receive a (Solr) query string, we need to parse out the object names
try:
identifiers = get_objects_from_query_string(solr_query)
except Exception, err:
current_app.logger.error('Parsing the identifiers out of the query string blew up!')
return {"Error": "Unable to get results!",
"Error Info": "Parsing the identifiers out of the query string blew up! (%s)"%str(err)}, 200
identifiers = [iden for iden in identifiers if iden.lower() not in ['object',':']]
# How many object names did we fid?
id_num = len(identifiers)
# Keep a list with the object names we found
identifiers_orig = identifiers
# If we did not find any object names, there is nothing to do!
if id_num == 0:
return {"Error": "Unable to get results!",
"Error Info": "No identifiers/objects found in Solr object query"}, 200
# Get translations
simbad_query = ''
ned_query = ''
translated_query = ''
if target.lower() in ['simbad', 'all']:
name2simbid = {}
for ident in identifiers:
result = get_simbad_data([ident], 'objects')
if 'Error' in result or 'data' not in result:
# An error was returned!
current_app.logger.error('Failed to find data for SIMBAD object {0}!: {1}'.format(ident, result.get('Error Info','NA')))
name2simbid[ident] = 0
continue
try:
SIMBADid =[e.get('id',0) for e in result['data'].values()][0]
except:
SIMBADid = "0"
name2simbid[ident] = SIMBADid
simbad_query = translate_query(solr_query, identifiers, name2simbid, 'simbid:')
if target.lower() in ['ned', 'all']:
name2nedid = {}
for ident in identifiers:
result = get_ned_data([ident], 'objects')
if 'Error' in result or 'data' not in result:
# An error was returned!
current_app.logger.error('Failed to find data for NED object {0}!: {1}'.format(ident, result.get('Error Info','NA')))
name2nedid[ident] = 0
continue
try:
NEDid =[e.get('id',0) for e in result['data'].values()][0]
except:
NEDid = 0
name2nedid[ident] = str(NEDid)
ned_query = translate_query(solr_query, identifiers, name2nedid, 'nedid:')
if simbad_query and ned_query:
translated_query = '({0}) OR ({1})'.format(simbad_query, ned_query)
elif simbad_query:
translated_query = simbad_query
elif ned_query:
translated_query = ned_query
else:
translated_query = 'simbid:0'
return {'query': translated_query}
class ClassicObjectSearch(Resource):
"""Return object NED refcodes for a given object list"""
scopes = []
rate_limit = [1000, 60 * 60 * 24]
decorators = [advertise('scopes', 'rate_limit')]
def post(self):
stime = time.time()
results = {}
# Get the supplied list of identifiers
if not request.json or 'objects' not in request.json:
current_app.logger.error('No objects were provided to Classic Object Search')
return {'Error': 'Unable to get results!',
'Error Info': 'No object names found in POST body'}, 200
results = get_NED_refcodes(request.json)
if "Error" in results:
current_app.logger.error('Classic Object Search request request blew up')
return results, 500
duration = time.time() - stime
current_app.logger.info('Classic Object Search request successfully completed in %s real seconds'%duration)
# what output format?
try:
oformat = request.json['output_format']
except:
oformat = 'json'
# send the results back in the requested format
if oformat == 'json':
return results
else:
output = "\n".join(results['data'])
return Response(output, mimetype='text/plain; charset=us-ascii')
| 44.233333
| 140
| 0.588169
|
7786501a40951867c432fdde497571fc5e43d066
| 1,836
|
py
|
Python
|
build-site.py
|
allisonmargaret/popolwuj
|
7faacab02ae7cee4d4664b96e6d62eb743dcb9ab
|
[
"MIT"
] | 3
|
2019-10-27T08:29:09.000Z
|
2020-07-02T14:27:58.000Z
|
build-site.py
|
allisonmargaret/popolwuj
|
7faacab02ae7cee4d4664b96e6d62eb743dcb9ab
|
[
"MIT"
] | 26
|
2018-09-21T19:18:04.000Z
|
2022-03-01T22:10:33.000Z
|
build-site.py
|
allisonmargaret/popolwuj
|
7faacab02ae7cee4d4664b96e6d62eb743dcb9ab
|
[
"MIT"
] | 4
|
2018-12-17T15:41:13.000Z
|
2020-10-12T23:00:33.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import configparser
from jinja2 import Environment, FileSystemLoader
import glob
# Import config
config = onfig = configparser.ConfigParser()
config.read('./config.ini')
# Open template parser
ENV = Environment(loader=FileSystemLoader('./templates'))
# Define assets -- PUT THESE IN A CONFIG
scripts = dict()
styles = dict()
scripts['index'] = [
dict(src="assets/jquery-3.3.1.slim.min.js"),
dict(src="assets/popper.min.js"),
dict(src="assets/bootstrap.min.js")
]
styles['index'] = [
dict(href="assets/bootstrap.min.css"),
dict(href="pw-default.css")
]
scripts['paragraphs'] = [
dict(src="assets/jquery-3.3.1.slim.min.js"),
dict(src="assets/popper.min.js"),
dict(src="assets/bootstrap.min.js"),
dict(src="./xom-paragraphs.js")
]
styles['paragraphs'] = [
dict(href="assets/bootstrap.min.css"),
dict(href="./xom-paragraphs.css")
]
docmap = {
'index.html': {
'page_title': 'Home',
'assets':'index'
},
'xom-paragraphs.html': {
'page_title': 'Paragraphs Version',
'assets': 'paragraphs'
}
}
# Convert all templates to pages
for tfile in glob.glob("./templates/*.html"):
fname = tfile.split('/')[-1]
if fname == 'base.html':
continue
try:
mapkey = docmap[fname]['assets']
except KeyError:
#mapkey = 'default'
print('{} does not have a docmap entry; skipping.'.format(fname))
continue
print(fname, '+', mapkey)
template = ENV.get_template(fname)
data = dict(
site_title = config['DEFAULT']['site_title'],
page_title = docmap[fname]['page_title'],
styles = styles[mapkey],
scripts = scripts[mapkey]
)
html = template.render(**data)
with open(fname, 'w') as pfile:
pfile.write(html)
| 26.608696
| 73
| 0.618192
|
662965ae465e81e40f94e9dfbd0032b1c4adebd2
| 4,117
|
py
|
Python
|
hpOneView/resources/storage/sas_logical_jbods.py
|
doziya/hpeOneView
|
ef9bee2a0e1529e93bd6e8d84eff07fb8533049d
|
[
"MIT"
] | 107
|
2015-02-16T12:40:36.000Z
|
2022-03-09T05:27:58.000Z
|
hpOneView/resources/storage/sas_logical_jbods.py
|
doziya/hpeOneView
|
ef9bee2a0e1529e93bd6e8d84eff07fb8533049d
|
[
"MIT"
] | 148
|
2015-03-17T16:09:39.000Z
|
2020-02-09T16:28:06.000Z
|
hpOneView/resources/storage/sas_logical_jbods.py
|
doziya/hpeOneView
|
ef9bee2a0e1529e93bd6e8d84eff07fb8533049d
|
[
"MIT"
] | 80
|
2015-01-03T22:58:53.000Z
|
2021-04-16T11:37:03.000Z
|
# -*- coding: utf-8 -*-
###
# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPpaOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from hpOneView.resources.resource import ResourceClient
class SasLogicalJbods(object):
"""
SAS Logical JBODs API client.
Note:
This resource is only available on HPE Synergy
"""
URI = '/rest/sas-logical-jbods'
DRIVES_PATH = '/drives'
def __init__(self, con):
self._connection = con
self._client = ResourceClient(con, self.URI)
def get_all(self, start=0, count=-1, filter='', sort=''):
"""
Gets a paginated collection of SAS logical JBODs based on optional sorting and filtering and constrained by
start and count parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: A list of all SAS logical JBODs.
"""
return self._client.get_all(start=start, count=count, filter=filter, sort=sort)
def get(self, id_or_uri):
"""
Gets the specified SAS logical JBODs resource by ID or by URI.
Args:
id_or_uri: Can be either the SAS logical JBOD ID or the SAS logical JBOD URI.
Returns:
dict: The SAS logical JBOD.
"""
return self._client.get(id_or_uri=id_or_uri)
def get_by(self, field, value):
"""
Gets all SAS Logical JBODs that match the filter.
The search is case-insensitive.
Args:
field: Field name to filter.
value: Value to filter.
Returns:
list: A list of SAS Logical JBODs.
"""
return self._client.get_by(field, value)
def get_drives(self, id_or_uri):
"""
Gets the list of drives allocated to this SAS logical JBOD.
Args:
id_or_uri: Can be either the SAS logical JBOD ID or the SAS logical JBOD URI.
Returns:
list: A list of Drives
"""
uri = self._client.build_uri(id_or_uri=id_or_uri) + self.DRIVES_PATH
return self._client.get(id_or_uri=uri)
| 35.491379
| 115
| 0.664319
|
b5e6fa66938bea5928d416e5a40342193dcd2f27
| 7,077
|
py
|
Python
|
src/datadog_api_client/v1/model/widget_change_type.py
|
rchenzheng/datadog-api-client-python
|
2e86ac098c6f0c7fdd90ed218224587c0f8eafef
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v1/model/widget_change_type.py
|
rchenzheng/datadog-api-client-python
|
2e86ac098c6f0c7fdd90ed218224587c0f8eafef
|
[
"Apache-2.0"
] | null | null | null |
src/datadog_api_client/v1/model/widget_change_type.py
|
rchenzheng/datadog-api-client-python
|
2e86ac098c6f0c7fdd90ed218224587c0f8eafef
|
[
"Apache-2.0"
] | null | null | null |
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
import re # noqa: F401
import sys # noqa: F401
from datadog_api_client.v1.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class WidgetChangeType(ModelSimple):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("value",): {
"ABSOLUTE": "absolute",
"RELATIVE": "relative",
},
}
validations = {}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"value": (str,),
}
@cached_property
def discriminator():
return None
attribute_map = {}
_composed_schemas = None
required_properties = set(
[
"_data_store",
"_check_type",
"_spec_property_naming",
"_path_to_item",
"_configuration",
"_visited_composed_classes",
]
)
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
"""WidgetChangeType - a model defined in OpenAPI
Note that value can be passed either in args or in kwargs, but not in both.
Args:
args[0] (str): Show the absolute or the relative change.., must be one of ["absolute", "relative", ] # noqa: E501
Keyword Args:
value (str): Show the absolute or the relative change.., must be one of ["absolute", "relative", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
# required up here when default value is not given
_path_to_item = kwargs.pop("_path_to_item", ())
if "value" in kwargs:
value = kwargs.pop("value")
elif args:
args = list(args)
value = args.pop(0)
else:
raise ApiTypeError(
"value is required, but not passed in args or kwargs and doesn't have default",
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
_check_type = kwargs.pop("_check_type", True)
_spec_property_naming = kwargs.pop("_spec_property_naming", False)
_configuration = kwargs.pop("_configuration", None)
_visited_composed_classes = kwargs.pop("_visited_composed_classes", ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments."
% (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
if kwargs:
raise ApiTypeError(
"Invalid named arguments=%s passed to %s. Remove those invalid named arguments."
% (
kwargs,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
| 39.099448
| 126
| 0.569309
|
f9b8c7e14a147aa41094c96274e88669ba4cb42a
| 390
|
py
|
Python
|
foobar/cli.py
|
knowsuchagency/foobar
|
b02bea7e6a9af232175443e6b1512fc531b61f40
|
[
"MIT"
] | null | null | null |
foobar/cli.py
|
knowsuchagency/foobar
|
b02bea7e6a9af232175443e6b1512fc531b61f40
|
[
"MIT"
] | null | null | null |
foobar/cli.py
|
knowsuchagency/foobar
|
b02bea7e6a9af232175443e6b1512fc531b61f40
|
[
"MIT"
] | null | null | null |
"""foobar v0.1.0
just foo
Usage:
foobar [options] <argument>
foobar -h | --help
foobar -V | --version
Options:
-h --help show help and exit
-V --version show version and exit
"""
from docopt import docopt
def main(argv=None):
args = docopt(__doc__, argv=argv, version='0.1.0')
print(args)
if __name__ == "__main__":
main()
| 16.956522
| 54
| 0.574359
|
eede0f9e243fb541a62a837d9b419c466545b264
| 9,186
|
py
|
Python
|
fate_flow/db/db_models.py
|
bukexiusi/FATE
|
5c7e0dc4c108707872d37b871de86c911daaf793
|
[
"Apache-2.0"
] | 32
|
2020-06-12T08:39:58.000Z
|
2022-03-20T06:57:08.000Z
|
fate_flow/db/db_models.py
|
bukexiusi/FATE
|
5c7e0dc4c108707872d37b871de86c911daaf793
|
[
"Apache-2.0"
] | 10
|
2020-11-13T18:55:48.000Z
|
2022-02-10T02:00:12.000Z
|
fate_flow/db/db_models.py
|
bukexiusi/FATE
|
5c7e0dc4c108707872d37b871de86c911daaf793
|
[
"Apache-2.0"
] | 16
|
2020-06-12T06:51:46.000Z
|
2022-03-29T10:23:42.000Z
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import inspect
import os
import sys
import __main__
from peewee import Model, CharField, IntegerField, BigIntegerField, TextField, CompositeKey, BigAutoField
from playhouse.apsw_ext import APSWDatabase
from playhouse.pool import PooledMySQLDatabase
from arch.api.utils import log_utils
from arch.api.utils.core_utils import current_timestamp
from fate_flow.entity.constant_config import WorkMode
from fate_flow.settings import DATABASE, WORK_MODE, stat_logger, USE_LOCAL_DATABASE
from fate_flow.entity.runtime_config import RuntimeConfig
LOGGER = log_utils.getLogger()
def singleton(cls, *args, **kw):
instances = {}
def _singleton():
key = str(cls) + str(os.getpid())
if key not in instances:
instances[key] = cls(*args, **kw)
return instances[key]
return _singleton
@singleton
class BaseDataBase(object):
def __init__(self):
database_config = DATABASE.copy()
db_name = database_config.pop("name")
if WORK_MODE == WorkMode.STANDALONE:
if USE_LOCAL_DATABASE:
self.database_connection = APSWDatabase('fate_flow_sqlite.db')
RuntimeConfig.init_config(USE_LOCAL_DATABASE=True)
stat_logger.info('init sqlite database on standalone mode successfully')
else:
self.database_connection = PooledMySQLDatabase(db_name, **database_config)
stat_logger.info('init mysql database on standalone mode successfully')
RuntimeConfig.init_config(USE_LOCAL_DATABASE=False)
elif WORK_MODE == WorkMode.CLUSTER:
self.database_connection = PooledMySQLDatabase(db_name, **database_config)
stat_logger.info('init mysql database on cluster mode successfully')
RuntimeConfig.init_config(USE_LOCAL_DATABASE=False)
else:
raise Exception('can not init database')
if __main__.__file__.endswith('fate_flow_server.py') or __main__.__file__.endswith('task_executor.py'):
DB = BaseDataBase().database_connection
else:
# Initialize the database only when the server is started.
DB = None
def close_connection(db_connection):
try:
if db_connection:
db_connection.close()
except Exception as e:
LOGGER.exception(e)
class DataBaseModel(Model):
class Meta:
database = DB
def to_json(self):
return self.__dict__['__data__']
def save(self, *args, **kwargs):
if hasattr(self, "update_date"):
self.update_date = datetime.datetime.now()
if hasattr(self, "update_time"):
self.update_time = current_timestamp()
super(DataBaseModel, self).save(*args, **kwargs)
def init_database_tables():
with DB.connection_context():
members = inspect.getmembers(sys.modules[__name__], inspect.isclass)
table_objs = []
for name, obj in members:
if obj != DataBaseModel and issubclass(obj, DataBaseModel):
table_objs.append(obj)
DB.create_tables(table_objs)
class Queue(DataBaseModel):
f_job_id = CharField(max_length=100)
f_event = CharField(max_length=500)
f_is_waiting = IntegerField(default=1)
class Meta:
db_table = "t_queue"
class Job(DataBaseModel):
f_job_id = CharField(max_length=25)
f_name = CharField(max_length=500, null=True, default='')
f_description = TextField(null=True, default='')
f_tag = CharField(max_length=50, null=True, index=True, default='')
f_role = CharField(max_length=10, index=True)
f_party_id = CharField(max_length=10, index=True)
f_roles = TextField()
f_work_mode = IntegerField()
f_initiator_party_id = CharField(max_length=50, index=True, default=-1)
f_is_initiator = IntegerField(null=True, index=True, default=-1)
f_dsl = TextField()
f_runtime_conf = TextField()
f_train_runtime_conf = TextField(null=True)
f_run_ip = CharField(max_length=100)
f_status = CharField(max_length=50)
f_current_steps = CharField(max_length=500, null=True) # record component id in DSL
f_current_tasks = CharField(max_length=500, null=True) # record task id
f_progress = IntegerField(null=True, default=0)
f_create_time = BigIntegerField()
f_update_time = BigIntegerField(null=True)
f_start_time = BigIntegerField(null=True)
f_end_time = BigIntegerField(null=True)
f_elapsed = BigIntegerField(null=True)
class Meta:
db_table = "t_job"
primary_key = CompositeKey('f_job_id', 'f_role', 'f_party_id')
class Task(DataBaseModel):
f_job_id = CharField(max_length=25)
f_component_name = TextField()
f_task_id = CharField(max_length=100)
f_role = CharField(max_length=10, index=True)
f_party_id = CharField(max_length=10, index=True)
f_operator = CharField(max_length=100, null=True)
f_run_ip = CharField(max_length=100, null=True)
f_run_pid = IntegerField(null=True)
f_status = CharField(max_length=50)
f_create_time = BigIntegerField()
f_update_time = BigIntegerField(null=True)
f_start_time = BigIntegerField(null=True)
f_end_time = BigIntegerField(null=True)
f_elapsed = BigIntegerField(null=True)
class Meta:
db_table = "t_task"
primary_key = CompositeKey('f_job_id', 'f_task_id', 'f_role', 'f_party_id')
class DataView(DataBaseModel):
f_job_id = CharField(max_length=25)
f_role = CharField(max_length=10, index=True)
f_party_id = CharField(max_length=10, index=True)
f_table_name = CharField(max_length=500, null=True)
f_table_namespace = CharField(max_length=500, null=True)
f_component_name = TextField()
f_create_time = BigIntegerField()
f_update_time = BigIntegerField(null=True)
f_table_count_upload = IntegerField(null=True)
f_table_count_actual = IntegerField(null=True)
f_partition = IntegerField(null=True)
f_task_id = CharField(max_length=100)
f_type = CharField(max_length=50, null=True)
f_ttl = IntegerField(default=0)
f_party_model_id = CharField(max_length=100, null=True)
f_model_version = CharField(max_length=100, null=True)
f_size = BigIntegerField(default=0)
f_description = TextField(null=True, default='')
f_tag = CharField(max_length=50, null=True, index=True, default='')
class Meta:
db_table = "t_data_view"
primary_key = CompositeKey('f_job_id', 'f_task_id', 'f_role', 'f_party_id')
class MachineLearningModelMeta(DataBaseModel):
f_id = BigIntegerField(primary_key=True)
f_role = CharField(max_length=10, index=True)
f_party_id = CharField(max_length=10, index=True)
f_roles = TextField()
f_job_id = CharField(max_length=25)
f_model_id = CharField(max_length=100, index=True)
f_model_version = CharField(max_length=100, index=True)
f_size = BigIntegerField(default=0)
f_create_time = BigIntegerField(default=0)
f_update_time = BigIntegerField(default=0)
f_description = TextField(null=True, default='')
f_tag = CharField(max_length=50, null=True, index=True, default='')
class Meta:
db_table = "t_machine_learning_model_meta"
class TrackingMetric(DataBaseModel):
_mapper = {}
@classmethod
def model(cls, table_index=None, date=None):
if not table_index:
table_index = date.strftime(
'%Y%m%d') if date else datetime.datetime.now().strftime(
'%Y%m%d')
class_name = 'TrackingMetric_%s' % table_index
ModelClass = TrackingMetric._mapper.get(class_name, None)
if ModelClass is None:
class Meta:
db_table = '%s_%s' % ('t_tracking_metric', table_index)
attrs = {'__module__': cls.__module__, 'Meta': Meta}
ModelClass = type("%s_%s" % (cls.__name__, table_index), (cls,),
attrs)
TrackingMetric._mapper[class_name] = ModelClass
return ModelClass()
f_id = BigAutoField(primary_key=True)
f_job_id = CharField(max_length=25)
f_component_name = TextField()
f_task_id = CharField(max_length=100)
f_role = CharField(max_length=10, index=True)
f_party_id = CharField(max_length=10, index=True)
f_metric_namespace = CharField(max_length=180, index=True)
f_metric_name = CharField(max_length=180, index=True)
f_key = CharField(max_length=200)
f_value = TextField()
f_type = IntegerField(index=True) # 0 is data, 1 is meta
f_create_time = BigIntegerField()
f_update_time = BigIntegerField(null=True)
| 37.040323
| 105
| 0.698563
|
617e39cb4006eedfa359f6ca06f03334cddb301e
| 12,990
|
py
|
Python
|
airflow/contrib/hooks/gcp_compute_hook.py
|
RSEnergyGroup/incubator-airflow
|
e947c6c034238ede29a6c8f51307458d3e40c1b5
|
[
"Apache-2.0"
] | 4
|
2018-12-14T05:14:02.000Z
|
2022-01-23T15:48:13.000Z
|
airflow/contrib/hooks/gcp_compute_hook.py
|
RSEnergyGroup/incubator-airflow
|
e947c6c034238ede29a6c8f51307458d3e40c1b5
|
[
"Apache-2.0"
] | 4
|
2018-03-20T21:24:26.000Z
|
2020-05-03T04:23:02.000Z
|
airflow/contrib/hooks/gcp_compute_hook.py
|
RSEnergyGroup/incubator-airflow
|
e947c6c034238ede29a6c8f51307458d3e40c1b5
|
[
"Apache-2.0"
] | 6
|
2020-06-09T02:16:58.000Z
|
2021-12-27T15:46:32.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import time
from googleapiclient.discovery import build
from airflow import AirflowException
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
# Number of retries - used by googleapiclient method calls to perform retries
# For requests that are "retriable"
NUM_RETRIES = 5
# Time to sleep between active checks of the operation results
TIME_TO_SLEEP_IN_SECONDS = 1
class GceOperationStatus:
PENDING = "PENDING"
RUNNING = "RUNNING"
DONE = "DONE"
# noinspection PyAbstractClass
class GceHook(GoogleCloudBaseHook):
"""
Hook for Google Compute Engine APIs.
"""
_conn = None
def __init__(self,
api_version,
gcp_conn_id='google_cloud_default',
delegate_to=None):
super(GceHook, self).__init__(gcp_conn_id, delegate_to)
self.api_version = api_version
def get_conn(self):
"""
Retrieves connection to Google Compute Engine.
:return: Google Compute Engine services object
:rtype: dict
"""
if not self._conn:
http_authorized = self._authorize()
self._conn = build('compute', self.api_version,
http=http_authorized, cache_discovery=False)
return self._conn
def start_instance(self, project_id, zone, resource_id):
"""
Starts an existing instance defined by project_id, zone and resource_id.
:param project_id: Google Cloud Platform project ID where the Compute Engine
Instance exists
:type project_id: str
:param zone: Google Cloud Platform zone where the instance exists
:type zone: str
:param resource_id: Name of the Compute Engine instance resource
:type resource_id: str
:return: True if the operation succeeded, raises an error otherwise.
:rtype: bool
"""
response = self.get_conn().instances().start(
project=project_id,
zone=zone,
instance=resource_id
).execute(num_retries=NUM_RETRIES)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(
"Wrong response '{}' returned - it should contain "
"'name' field".format(response))
return self._wait_for_operation_to_complete(project_id, operation_name, zone)
def stop_instance(self, project_id, zone, resource_id):
"""
Stops an instance defined by project_id, zone and resource_id
:param project_id: Google Cloud Platform project ID where the Compute Engine
Instance exists
:type project_id: str
:param zone: Google Cloud Platform zone where the instance exists
:type zone: str
:param resource_id: Name of the Compute Engine instance resource
:type resource_id: str
:return: True if the operation succeeded, raises an error otherwise.
:rtype: bool
"""
response = self.get_conn().instances().stop(
project=project_id,
zone=zone,
instance=resource_id
).execute(num_retries=NUM_RETRIES)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(
"Wrong response '{}' returned - it should contain "
"'name' field".format(response))
return self._wait_for_operation_to_complete(project_id, operation_name, zone)
def set_machine_type(self, project_id, zone, resource_id, body):
"""
Sets machine type of an instance defined by project_id, zone and resource_id.
:param project_id: Google Cloud Platform project ID where the Compute Engine
Instance exists
:type project_id: str
:param zone: Google Cloud Platform zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource
:type resource_id: str
:param body: Body required by the Compute Engine setMachineType API,
as described in
https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMachineType
:type body: dict
:return: True if the operation succeeded, raises an error otherwise.
:rtype: bool
"""
response = self._execute_set_machine_type(project_id, zone, resource_id, body)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(
"Wrong response '{}' returned - it should contain "
"'name' field".format(response))
return self._wait_for_operation_to_complete(project_id, operation_name, zone)
def _execute_set_machine_type(self, project_id, zone, resource_id, body):
return self.get_conn().instances().setMachineType(
project=project_id, zone=zone, instance=resource_id, body=body)\
.execute(num_retries=NUM_RETRIES)
def get_instance_template(self, project_id, resource_id):
"""
Retrieves instance template by project_id and resource_id.
:param project_id: Google Cloud Platform project ID where the Compute Engine
Instance template exists
:type project_id: str
:param resource_id: Name of the instance template
:type resource_id: str
:return: Instance template representation as object according to
https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates
:rtype: dict
"""
response = self.get_conn().instanceTemplates().get(
project=project_id,
instanceTemplate=resource_id
).execute(num_retries=NUM_RETRIES)
return response
def insert_instance_template(self, project_id, body, request_id=None):
"""
Inserts instance template using body specified
:param project_id: Google Cloud Platform project ID where the Compute Engine
Instance exists
:type project_id: str
:param body: Instance template representation as object according to
https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates
:type body: dict
:param request_id: Optional, unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again)
It should be in UUID format as defined in RFC 4122
:type request_id: str
:return: True if the operation succeeded
:rtype: bool
"""
response = self.get_conn().instanceTemplates().insert(
project=project_id,
body=body,
requestId=request_id
).execute(num_retries=NUM_RETRIES)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(
"Wrong response '{}' returned - it should contain "
"'name' field".format(response))
return self._wait_for_operation_to_complete(project_id, operation_name)
def get_instance_group_manager(self, project_id, zone, resource_id):
"""
Retrieves Instance Group Manager by project_id, zone and resource_id.
:param project_id: Google Cloud Platform project ID where the Compute Engine
Instance Group Manager exists
:type project_id: str
:param zone: Google Cloud Platform zone where the Instance Group Manager exists
:type zone: str
:param resource_id: Name of the Instance Group Manager
:type resource_id: str
:return: Instance group manager representation as object according to
https://cloud.google.com/compute/docs/reference/rest/beta/instanceGroupManagers
:rtype: dict
"""
response = self.get_conn().instanceGroupManagers().get(
project=project_id,
zone=zone,
instanceGroupManager=resource_id
).execute(num_retries=NUM_RETRIES)
return response
def patch_instance_group_manager(self, project_id, zone, resource_id,
body, request_id=None):
"""
Patches Instance Group Manager with the specified body.
:param project_id: Google Cloud Platform project ID where the Compute Engine
Instance Group Manager exists
:type project_id: str
:param zone: Google Cloud Platform zone where the Instance Group Manager exists
:type zone: str
:param resource_id: Name of the Instance Group Manager
:type resource_id: str
:param body: Instance Group Manager representation as json-merge-patch object
according to
https://cloud.google.com/compute/docs/reference/rest/beta/instanceTemplates/patch
:type body: dict
:param request_id: Optional, unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again).
It should be in UUID format as defined in RFC 4122
:type request_id: str
:return: True if the operation succeeded
:rtype: bool
"""
response = self.get_conn().instanceGroupManagers().patch(
project=project_id,
zone=zone,
instanceGroupManager=resource_id,
body=body,
requestId=request_id
).execute(num_retries=NUM_RETRIES)
try:
operation_name = response["name"]
except KeyError:
raise AirflowException(
"Wrong response '{}' returned - it should contain "
"'name' field".format(response))
return self._wait_for_operation_to_complete(project_id, operation_name, zone)
def _wait_for_operation_to_complete(self, project_id, operation_name, zone=None):
"""
Waits for the named operation to complete - checks status of the async call.
:param operation_name: name of the operation
:type operation_name: str
:param zone: optional region of the request (might be None for global operations)
:type zone: str
:return: True if the operation succeeded, raises an error otherwise
:rtype: bool
"""
service = self.get_conn()
while True:
if zone is None:
# noinspection PyTypeChecker
operation_response = self._check_global_operation_status(
service, operation_name, project_id)
else:
# noinspection PyTypeChecker
operation_response = self._check_zone_operation_status(
service, operation_name, project_id, zone)
if operation_response.get("status") == GceOperationStatus.DONE:
error = operation_response.get("error")
if error:
code = operation_response.get("httpErrorStatusCode")
msg = operation_response.get("httpErrorMessage")
# Extracting the errors list as string and trimming square braces
error_msg = str(error.get("errors"))[1:-1]
raise AirflowException("{} {}: ".format(code, msg) + error_msg)
# No meaningful info to return from the response in case of success
return True
time.sleep(TIME_TO_SLEEP_IN_SECONDS)
@staticmethod
def _check_zone_operation_status(service, operation_name, project_id, zone):
return service.zoneOperations().get(
project=project_id, zone=zone, operation=operation_name).execute(
num_retries=NUM_RETRIES)
@staticmethod
def _check_global_operation_status(service, operation_name, project_id):
return service.globalOperations().get(
project=project_id, operation=operation_name).execute(
num_retries=NUM_RETRIES)
| 42.175325
| 93
| 0.651347
|
b0d43897674e8451499c03eaaf3244e7ac28bda7
| 177
|
py
|
Python
|
src/constants.py
|
rajatgupta310198/BlockChain
|
03884c0e95e6504431da1e935dde28bc99c9f0c3
|
[
"MIT"
] | 2
|
2021-03-15T12:38:18.000Z
|
2021-06-16T17:44:42.000Z
|
src/constants.py
|
rajatgupta310198/BlockChain
|
03884c0e95e6504431da1e935dde28bc99c9f0c3
|
[
"MIT"
] | null | null | null |
src/constants.py
|
rajatgupta310198/BlockChain
|
03884c0e95e6504431da1e935dde28bc99c9f0c3
|
[
"MIT"
] | 1
|
2018-09-28T19:08:58.000Z
|
2018-09-28T19:08:58.000Z
|
"""
Author: Rajat Gupta
Email: rajat15101@iiitnr.edu.in
Time & Date: 11:28 hrs, Sat, 3rd March 2018
"""
MAX_TRANSACTIONS_PER_BLOCK = 5
WARNING_CODE = 100
SUCCESS_CODE = 101
| 13.615385
| 43
| 0.728814
|
ef8840583faac7e077d59f84d8cc46edf5a9580d
| 2,163
|
py
|
Python
|
dse_do_utils/utilities.py
|
Bhaskers-Blu-Org1/dse-decision-optimization-utilities
|
85efa515d1d93c1aa7b4898486bbb56306f8527a
|
[
"Apache-2.0"
] | 2
|
2020-01-16T14:58:34.000Z
|
2021-11-17T21:33:22.000Z
|
dse_do_utils/utilities.py
|
Bhaskers-Blu-Org1/dse-decision-optimization-utilities
|
85efa515d1d93c1aa7b4898486bbb56306f8527a
|
[
"Apache-2.0"
] | 26
|
2019-07-09T23:05:57.000Z
|
2022-03-21T15:46:24.000Z
|
dse_do_utils/utilities.py
|
IBM/dse-decision-optimization-utilities
|
45d315bc8201932ad99504ef65434a5ab5582d25
|
[
"Apache-2.0"
] | 6
|
2019-11-02T16:42:01.000Z
|
2022-03-17T16:43:38.000Z
|
# Copyright IBM All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# General utilities module
# Contains functions
def add_sys_path(new_path):
""" Adds a directory to Python's sys.path
Does not add the directory if it does not exist or if it's already on
sys.path. Returns 1 if OK, -1 if new_path does not exist, 0 if it was
already on sys.path.
Based on: https://www.oreilly.com/library/view/python-cookbook/0596001673/ch04s23.html
Challenge: in order to use this function, we need to import the dse_do_utils package
and thus we need to add it's location it to sys.path!
This will work better once we can do a pip install dse-do_utils.
"""
import sys
import os
# Avoid adding nonexistent paths
if not os.path.exists(new_path):
return -1
# Standardize the path. Windows is case-insensitive, so lowercase
# for definiteness.
new_path = os.path.abspath(new_path)
if sys.platform == 'win32':
new_path = new_path.lower( )
# Check against all currently available paths
for x in sys.path:
x = os.path.abspath(x)
if sys.platform == 'win32':
x = x.lower( )
if new_path in (x, x + os.sep):
return 0
sys.path.append(new_path)
return 1
def list_file_hierarchy(startpath: str) -> None:
"""Hierarchically print the contents of the folder tree, starting with the `startpath`.
Usage::
current_dir = os.getcwd()
parent_dir = os.path.abspath(os.path.join(current_dir, os.pardir))
parent_dir_2 = os.path.abspath(os.path.join(parent_dir, os.pardir))
list_file_hierarchy(parent_dir_2) #List tree starting at the grand-parent of the current directory
Args:
startpath (str): Root of the tree
Returns:
None
"""
import os
for root, dirs, files in os.walk(startpath):
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
print('{}{}'.format(subindent, f))
| 30.041667
| 106
| 0.642164
|
349f83e31907067633d44b64457c30805c03869b
| 2,534
|
py
|
Python
|
examples/Redfish/ex28_set_ilo_timezone.py
|
moecloud/ILO4
|
44fbdf5affa3ecbd1c49718f04d336eaea9cae00
|
[
"Apache-2.0"
] | null | null | null |
examples/Redfish/ex28_set_ilo_timezone.py
|
moecloud/ILO4
|
44fbdf5affa3ecbd1c49718f04d336eaea9cae00
|
[
"Apache-2.0"
] | null | null | null |
examples/Redfish/ex28_set_ilo_timezone.py
|
moecloud/ILO4
|
44fbdf5affa3ecbd1c49718f04d336eaea9cae00
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from _redfishobject import RedfishObject
from ilorest.rest.v1_helper import ServerDownOrUnreachableError
def ex28_set_ilo_timezone(redfishobj, olson_timezone):
sys.stdout.write("\nEXAMPLE 28: Set iLO's Timezone\n")
sys.stdout.write("\tNOTE: This only works if iLO is NOT configured to " \
"take time settings from DHCP v4 or v6\n")
instances = redfishobj.search_for_type("HpiLODateTime.")
for instance in instances:
response = redfishobj.redfish_get(instance["@odata.id"])
for timezone in response.dict["TimeZoneList"]:
if timezone["Name"].startswith(olson_timezone):
body = {"TimeZone": {"Index": timezone["Index"]}}
response = redfishobj.redfish_patch(instance["@odata.id"], body)
redfishobj.error_handler(response)
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_https_url = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
# iLO_https_url acceptable examples:
# "https://10.0.0.100"
# "https://f250asha.americas.hpqcorp.net"
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
# Create a REDFISH object
try:
REDFISH_OBJ = RedfishObject(iLO_https_url, iLO_account, iLO_password)
except ServerDownOrUnreachableError, excp:
sys.stderr.write("ERROR: server not reachable or doesn't support " \
"RedFish.\n")
sys.exit()
except Exception, excp:
raise excp
ex28_set_ilo_timezone(REDFISH_OBJ, "America/Chicago")
| 42.233333
| 81
| 0.658642
|
9cb0db2284dcd0974307d84119e384e0ae93aed7
| 1,343
|
py
|
Python
|
tests/gsl.pkg/vector_partition.py
|
avalentino/pyre
|
7e1f0287eb7eba1c6d1ef385e5160079283ac363
|
[
"BSD-3-Clause"
] | 25
|
2018-04-23T01:45:39.000Z
|
2021-12-10T06:01:23.000Z
|
tests/gsl.pkg/vector_partition.py
|
avalentino/pyre
|
7e1f0287eb7eba1c6d1ef385e5160079283ac363
|
[
"BSD-3-Clause"
] | 53
|
2018-05-31T04:55:00.000Z
|
2021-10-07T21:41:32.000Z
|
tests/gsl.pkg/vector_partition.py
|
avalentino/pyre
|
7e1f0287eb7eba1c6d1ef385e5160079283ac363
|
[
"BSD-3-Clause"
] | 12
|
2018-04-23T22:50:40.000Z
|
2022-02-20T17:27:23.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2021 all rights reserved
#
"""
Exercise the partitioner
"""
def test():
# setup the workload
samplesPerTask = 8
workload = samplesPerTask
# externals
import mpi
import gsl
# initialize
mpi.init()
# get the world communicator
world = mpi.world
# figure out its geometry
rank = world.rank
tasks = world.size
# decide which task is the source
source = 0
# at the source task
if rank == source:
# allocate a vector
θ = gsl.vector(shape=tasks*samplesPerTask)
# initialize it
for task in range(tasks):
for sample in range(samplesPerTask):
offset = task*samplesPerTask + sample
θ[offset] = offset
# print it out
# θ.print(format="{}")
# the other tasks
else:
# have a dummy source vector
θ = None
# make a partition
part = gsl.vector(shape=workload)
part.excerpt(communicator=world, source=source, vector=θ)
# verify that i got the correct part
for index in range(samplesPerTask):
assert part[index] == rank*samplesPerTask + index
# all done
return
# main
if __name__ == "__main__":
# do...
test()
# end of file
| 19.463768
| 61
| 0.594192
|
dc7fccec53b1c02cd8625adae2b88d71b2fc3139
| 911
|
py
|
Python
|
utilities/kubectl_delete_task_pods.py
|
Eo300/react_flask_pdb2pqr
|
2df89d1df8e8d5b14e4364b10e1b63cb654d7653
|
[
"CC0-1.0"
] | 3
|
2019-08-09T20:30:19.000Z
|
2020-06-11T20:55:26.000Z
|
utilities/kubectl_delete_task_pods.py
|
Eo300/react_flask_pdb2pqr
|
2df89d1df8e8d5b14e4364b10e1b63cb654d7653
|
[
"CC0-1.0"
] | 38
|
2019-11-20T01:33:45.000Z
|
2021-11-30T19:40:56.000Z
|
utilities/kubectl_delete_task_pods.py
|
Eo300/react_flask_pdb2pqr
|
2df89d1df8e8d5b14e4364b10e1b63cb654d7653
|
[
"CC0-1.0"
] | 7
|
2019-10-22T18:51:37.000Z
|
2021-04-13T03:17:10.000Z
|
import subprocess
if __name__ == "__main__":
# Get kubectl pods
command = 'kubectl get pods'
p1 = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
# Parse kubectl pods which begin with "task-"
command = 'grep ^task-'
p2 = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stdin=p1.stdout)
output, err = p2.communicate()
# print(output)
# Remove dead pods from cluster
if output:
output = output.decode('utf-8')
for line in output.split('\n'):
spl_line = line.split()
if len(spl_line) > 0:
task_name = spl_line[0]
command = 'kubectl delete pods %s' % spl_line[0]
p3 = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
output, err = p3.communicate()
output = output.decode('utf-8')
print(output.strip())
| 33.740741
| 83
| 0.581778
|
598ac5e5564653d7ca61c39836f41788b6f10fef
| 1,469
|
py
|
Python
|
server_3_0.py
|
jessejohn01/CSCI446PA2
|
2b905cbb2ae116adffdab9b1848a520084672332
|
[
"Apache-2.0"
] | null | null | null |
server_3_0.py
|
jessejohn01/CSCI446PA2
|
2b905cbb2ae116adffdab9b1848a520084672332
|
[
"Apache-2.0"
] | null | null | null |
server_3_0.py
|
jessejohn01/CSCI446PA2
|
2b905cbb2ae116adffdab9b1848a520084672332
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import rdt_3_0
import time
def makePigLatin(word):
m = len(word)
vowels = "a", "e", "i", "o", "u", "y"
if m<3 or word=="the":
return word
else:
for i in vowels:
if word.find(i) < m and word.find(i) != -1:
m = word.find(i)
if m==0:
return word+"way"
else:
return word[m:]+word[:m]+"ay"
def piglatinize(message):
essagemay = ""
message = message.strip(".")
for word in message.split(' '):
essagemay += " "+makePigLatin(word)
return essagemay.strip()+"."
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Pig Latin conversion server.')
parser.add_argument('port', help='Port.', type=int)
args = parser.parse_args()
timeout = 5 #close connection if no new data within 5 seconds
time_of_last_data = time.time()
rdt = rdt_3_0.RDT('server', None, args.port)
while(True):
#try to receiver message before timeout
msg_S = rdt.rdt_3_0_receive()
if msg_S is None:
if time_of_last_data + timeout < time.time():
break
else:
continue
time_of_last_data = time.time()
#convert and reply
rep_msg_S = piglatinize(msg_S)
print('Converted %s \nto \n%s\n' % (msg_S, rep_msg_S))
rdt.rdt_3_0_send(rep_msg_S)
rdt.disconnect()
| 26.232143
| 81
| 0.552757
|
ea1d48be3b20a677ad2ba2d8d51976420cbb585b
| 706
|
py
|
Python
|
algorithms/HackerRank/level 1/dictionaries/twoStrings.py
|
clemencegoh/Python_Algorithms
|
0e3492447add6af49b185679da83552ff46e76f8
|
[
"MIT"
] | null | null | null |
algorithms/HackerRank/level 1/dictionaries/twoStrings.py
|
clemencegoh/Python_Algorithms
|
0e3492447add6af49b185679da83552ff46e76f8
|
[
"MIT"
] | null | null | null |
algorithms/HackerRank/level 1/dictionaries/twoStrings.py
|
clemencegoh/Python_Algorithms
|
0e3492447add6af49b185679da83552ff46e76f8
|
[
"MIT"
] | null | null | null |
"""
Given two strings, determine if they share a common substring.
A substring may be as small as one character.
For example, the words "a", "and", "art" share the common substring .
The words "be" and "cat" do not share a substring.
:input: "Hello", "World"
:output: "Yes"
"""
class TwoStringsSolution:
def solve(self, s1, s2):
"""
Concept:
Look at character level
"""
store = set([c for c in s1])
for char in s2:
if char in store:
return "Yes"
return "No"
test_cases = [
("hello", "world", "Yes"),
("hi", "world", "No"),
]
sol = TwoStringsSolution()
for t in test_cases:
sol.solve(t[0], t[1])
| 20.764706
| 69
| 0.566572
|
6d98632e0b3301b12acc2568d0e565a5af26ba9d
| 561
|
py
|
Python
|
example/test_sort.py
|
cclauss/PyZ3950
|
a28937455e1b53cebde3d8d6b487e4e7f56c49ca
|
[
"MIT"
] | null | null | null |
example/test_sort.py
|
cclauss/PyZ3950
|
a28937455e1b53cebde3d8d6b487e4e7f56c49ca
|
[
"MIT"
] | null | null | null |
example/test_sort.py
|
cclauss/PyZ3950
|
a28937455e1b53cebde3d8d6b487e4e7f56c49ca
|
[
"MIT"
] | null | null | null |
#!/usr/local/bin/python2.3 -i
from PyZ3950.zoom import *
c = Connection('gondolin.hist.liv.ac.uk', 210)
c.databaseName = 'l5r'
#c = Connection('z3950.copac.ac.uk', 210)
#c.databaseName = 'COPAC'
c.preferredRecordSyntax = 'SUTRS'
q = Query('pqf', '@attr 1=4 "sword"')
q2 = Query('pqf', '@attr 1=3201 foo')
rs = c.search(q)
print(len(rs))
print(rs[0].data)
sk = SortKey()
#sk.sequence = q2
sk.type = "private"
sk.sequence = "/card/name"
sk.relation = "descending"
#sk.sequence = "rank"
#sk.type = "elementSetName"
rs2 = c.sort([rs], [sk])
print(rs2[0].data)
| 20.777778
| 46
| 0.655971
|
ec1eec6b8dfa5ae7d9f499b846e2f959d8d69787
| 2,305
|
py
|
Python
|
extlinks/links/helpers.py
|
suecarmol/externallinks
|
388771924f0e0173237393226cb7549a02ae40e3
|
[
"MIT"
] | 6
|
2019-12-05T13:14:45.000Z
|
2022-03-13T18:22:00.000Z
|
extlinks/links/helpers.py
|
WikipediaLibrary/externallinks
|
6519719a8b01ab121bf77c465c587af3762e99af
|
[
"MIT"
] | 97
|
2019-07-01T14:42:51.000Z
|
2022-03-29T04:09:34.000Z
|
extlinks/links/helpers.py
|
suecarmol/externallinks
|
388771924f0e0173237393226cb7549a02ae40e3
|
[
"MIT"
] | 8
|
2019-12-03T01:52:41.000Z
|
2020-08-19T00:26:46.000Z
|
from urllib.parse import unquote
from .models import URLPattern
def split_url_for_query(url):
"""
Given a URL pattern, split it into two components:
url_optimised: URL and domain name in the el_index format
(https://www.mediawiki.org/wiki/Manual:Externallinks_table#el_index)
url_pattern_end: Anything following the domain name
"""
url = url.strip() # Catch any trailing spaces
# Start after *. if present
if url.startswith("*."):
url = url[2:]
url_start = url.split("/")[0].split(".")[::-1]
url_optimised = ".".join(url_start) + ".%"
if "/" in url:
url_end = "/".join(url.split("/")[1:])
url_pattern_end = "%./" + url_end + "%"
else:
url_pattern_end = "%"
return url_optimised, url_pattern_end
def link_is_tracked(link):
all_urlpatterns = URLPattern.objects.all()
tracked_links_list = list(all_urlpatterns.values_list("url", flat=True))
proxied_url = False
# If this looks like a TWL proxied URL we're going to need to match
# it against a longer list of strings
if "wikipedialibrary.idm.oclc" in link:
proxied_url = True
proxied_urls = [urlpattern.get_proxied_url for urlpattern in all_urlpatterns]
tracked_links_list.extend(proxied_urls)
# This is a quick check so we can filter the majority of events
# which won't be matching our filters
if any(links in link for links in tracked_links_list):
# Then we do a more detailed check, to make sure this is the
# root URL.
for tracked_link in tracked_links_list:
# If we track apa.org, we don't want to match iaapa.org
# so we make sure the URL is actually pointing at apa.org
url_starts = ["//" + tracked_link, "." + tracked_link]
if proxied_url:
# Proxy URLs may contain //www- not //www.
url_starts.append("-" + tracked_link)
# We want to avoid link additions from e.g. InternetArchive
# where the URL takes the structure
# https://web.archive.org/https://test.com/
protocol_count = link.count("//")
if any(start in link for start in url_starts) and protocol_count < 2:
return True
else:
return False
| 36.015625
| 85
| 0.632972
|
547130c2b16684d9abac9a3f20d5b03eb2ef25e7
| 757
|
py
|
Python
|
src/tileapp/forms.py
|
MattEding/Tile-Pattern
|
a1e58b9f0600f5a52dd85cd39808b90c0e684e16
|
[
"MIT"
] | null | null | null |
src/tileapp/forms.py
|
MattEding/Tile-Pattern
|
a1e58b9f0600f5a52dd85cd39808b90c0e684e16
|
[
"MIT"
] | null | null | null |
src/tileapp/forms.py
|
MattEding/Tile-Pattern
|
a1e58b9f0600f5a52dd85cd39808b90c0e684e16
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import FloatField, IntegerField, SelectField, StringField, SubmitField
from wtforms.validators import DataRequired, NumberRange, Regexp
from wtforms.widgets import TextArea
class PatternForm(FlaskForm):
pattern = StringField('Pattern', widget=TextArea(),
validators=[DataRequired(), Regexp('^[Oo01l\\/|.\s-]+$')])
fignum = IntegerField('Figure Number', validators=[NumberRange(0, 10)])
cms = 'gnuplot summer autumn winter spring rainbow gray'.split()
colormap = SelectField('Color Palette', choices=[(cm.lower(), cm.title()) for cm in cms])
alpha = FloatField('Transparency', default=1, validators=[NumberRange(0, 1)])
submit = SubmitField('Generate Figure')
| 50.466667
| 93
| 0.714663
|
46953b271c45f56a4f451242e9a5f20b9023c453
| 571
|
py
|
Python
|
problems/g6_tests/Sudoku9.py
|
cprudhom/pycsp3
|
980927188f4262c9ea48a6534795712f09d731d6
|
[
"MIT"
] | null | null | null |
problems/g6_tests/Sudoku9.py
|
cprudhom/pycsp3
|
980927188f4262c9ea48a6534795712f09d731d6
|
[
"MIT"
] | null | null | null |
problems/g6_tests/Sudoku9.py
|
cprudhom/pycsp3
|
980927188f4262c9ea48a6534795712f09d731d6
|
[
"MIT"
] | null | null | null |
from pycsp3 import *
clues = data.clues # if not 0, clues[i][j] is a value imposed at row i and col j
# x[i][j] is the value in cell at row i and col j.
x = VarArray(size=[9, 9], dom=range(1, 10))
satisfy(
# imposing distinct values on each row and each column
AllDifferent(x, matrix=True),
# imposing distinct values on each block tag(blocks)
[AllDifferent(x[i:i + 3, j:j + 3]) for i in [0, 3, 6] for j in [0, 3, 6]],
# imposing clues tag(clues)
[x[i][j] == clues[i][j] for i in range(9) for j in range(9) if clues and clues[i][j] > 0]
)
| 31.722222
| 93
| 0.621716
|
9e930755a59a6cc698a97a40cbbd5ca889aff7ea
| 4,781
|
py
|
Python
|
client/pdf_generator.py
|
True-Demon/king-phisher-plugins
|
bafdba6f069b85d7b45d531fdc100d50fbb9628e
|
[
"BSD-3-Clause"
] | null | null | null |
client/pdf_generator.py
|
True-Demon/king-phisher-plugins
|
bafdba6f069b85d7b45d531fdc100d50fbb9628e
|
[
"BSD-3-Clause"
] | null | null | null |
client/pdf_generator.py
|
True-Demon/king-phisher-plugins
|
bafdba6f069b85d7b45d531fdc100d50fbb9628e
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import time
import xml.sax.saxutils as saxutils
import king_phisher.client.gui_utilities as gui_utilities
import king_phisher.client.mailer as mailer
import king_phisher.client.plugins as plugins
import king_phisher.client.widget.extras as extras
import jinja2.exceptions
try:
from reportlab import platypus
from reportlab.lib import styles
from reportlab.lib.enums import TA_JUSTIFY
from reportlab.lib.pagesizes import letter
from reportlab.lib.units import inch
except ImportError:
has_reportlab = False
else:
has_reportlab = True
class Plugin(getattr(plugins, 'ClientPluginMailerAttachment', plugins.ClientPlugin)):
authors = ['Jeremy Schoeneman']
title = 'Generate PDF'
description = """
Generates a PDF file with a link which includes the campaign URL with the
individual message_id required to track individual visits to a website.
Visit https://github.com/y4utj4/pdf_generator for example template files to
use with this plugin.
"""
homepage = 'https://github.com/securestate/king-phisher-plugins'
options = [
plugins.ClientOptionPath(
'logo',
'Image to include into the pdf',
display_name='Logo / Inline Image',
path_type='file-open'
),
plugins.ClientOptionString(
'link_text',
'Text for inserted link',
default='Click here to accept',
display_name='Link Text'
)
]
req_min_version = '1.8.0b0'
req_packages = {
'reportlab': has_reportlab
}
version = '1.1'
def initialize(self):
mailer_tab = self.application.main_tabs['mailer']
self.add_menu_item('Tools > Create PDF Preview', self.make_preview)
return True
def make_preview(self, _):
input_path = self.application.config['mailer.attachment_file']
if not os.path.isfile(input_path) and os.access(input_path, os.R_OK):
gui_utilities.show_dialog_error(
'PDF Build Error',
self.application.get_active_window(),
'An invalid attachment file is specified.'
)
return
dialog = extras.FileChooserDialog('Save Generated PDF File', self.application.get_active_window())
response = dialog.run_quick_save('preview.pdf')
dialog.destroy()
if response is None:
return
output_path = response['target_path']
if not self.process_attachment_file(input_path, output_path):
gui_utilities.show_dialog_error(
'PDF Build Error',
self.application.get_active_window(),
'Failed to create the PDF file.'
)
return
gui_utilities.show_dialog_info(
'PDF Created',
self.application.get_active_window(),
'Successfully created the PDF file.'
)
def process_attachment_file(self, input_path, output_path, target=None):
output_path, _ = os.path.splitext(output_path)
output_path += '.pdf'
pdf_file = platypus.SimpleDocTemplate(
output_path,
pagesize=letter,
rightMargin=72,
leftMargin=72,
topMargin=72,
bottomMargin=18
)
url = self.application.config['mailer.webserver_url']
if target is not None:
url += '?uid=' + target.uid
try:
pdf_template = self.get_template(input_path, url)
pdf_file.multiBuild(pdf_template)
except Exception as err:
self.logger.error('failed to build the pdf document', exc_info=True)
return
self.logger.info('wrote pdf file to: ' + output_path + ('' if target is None else ' with uid: ' + target.uid))
return output_path
def get_template(self, template_file, url):
formatted_time = time.ctime()
company = self.application.config['mailer.company_name']
sender = self.application.config['mailer.source_email_alias']
story = []
click_me = saxutils.escape(self.config['link_text'])
link = '<font color=blue><link href="' + url + '">' + click_me + '</link></font>'
logo_path = self.config['logo']
if logo_path:
img = platypus.Image(logo_path, 2 * inch, inch)
story.append(img)
style_sheet = styles.getSampleStyleSheet()
style_sheet.add(styles.ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))
ptext = '<font size=10>' + formatted_time + '</font>'
story.append(platypus.Spacer(1, 12))
story.append(platypus.Paragraph(ptext, style_sheet['Normal']))
story.append(platypus.Spacer(1, 12))
with open(template_file, 'r') as file_h:
for line in file_h:
story.append(platypus.Paragraph(line, style_sheet['Normal']))
story.append(platypus.Spacer(1, 8))
story.append(platypus.Paragraph(link, style_sheet['Justify']))
story.append(platypus.Spacer(1, 12))
ptext = '<font size=10>Sincerely,</font>'
story.append(platypus.Paragraph(ptext, style_sheet['Normal']))
story.append(platypus.Spacer(1, 12))
ptext = '<font size=10>' + sender + '</font>'
story.append(platypus.Paragraph(ptext, style_sheet['Normal']))
story.append(platypus.Spacer(1, 12))
ptext = '<font size=10>' + company + '</font>'
story.append(platypus.Paragraph(ptext, style_sheet['Normal']))
return story
| 32.304054
| 112
| 0.732692
|
48efd6d8be97709033490c9552817f65bcb64678
| 12,478
|
py
|
Python
|
networkapi/ip/resource/NetworkIPv6AddResource.py
|
JohnVict0r/GloboNetworkAPI
|
e8f5b868cf71cf35df797bb8f31bddcb2158ecf7
|
[
"Apache-2.0"
] | 1
|
2021-03-14T23:25:59.000Z
|
2021-03-14T23:25:59.000Z
|
networkapi/ip/resource/NetworkIPv6AddResource.py
|
leopoldomauricio/GloboNetworkAPI
|
3b5b2e336d9eb53b2c113977bfe466b23a50aa29
|
[
"Apache-2.0"
] | null | null | null |
networkapi/ip/resource/NetworkIPv6AddResource.py
|
leopoldomauricio/GloboNetworkAPI
|
3b5b2e336d9eb53b2c113977bfe466b23a50aa29
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from networkapi.admin_permission import AdminPermission
from networkapi.ambiente.models import ConfigEnvironmentInvalidError
from networkapi.ambiente.models import EnvironmentVip
from networkapi.auth import has_perm
from networkapi.config.models import Configuration
from networkapi.equipamento.models import EquipamentoAmbiente
from networkapi.exception import EnvironmentVipNotFoundError
from networkapi.exception import InvalidValueError
from networkapi.grupo.models import GrupoError
from networkapi.infrastructure.ip_subnet_utils import get_prefix_IPV6
from networkapi.infrastructure.ip_subnet_utils import MAX_IPV6_HOSTS
from networkapi.infrastructure.xml_utils import dumps_networkapi
from networkapi.infrastructure.xml_utils import loads
from networkapi.infrastructure.xml_utils import XMLError
from networkapi.ip.models import IpError
from networkapi.ip.models import IpNotAvailableError
from networkapi.ip.models import Ipv6
from networkapi.ip.models import Ipv6Equipament
from networkapi.ip.models import NetworkIPv6
from networkapi.ip.models import NetworkIPv6AddressNotAvailableError
from networkapi.ip.models import NetworkIPv6Error
from networkapi.ip.models import NetworkIPv6NotFoundError
from networkapi.rest import RestResource
from networkapi.util import is_valid_int_greater_zero_param
from networkapi.vlan.models import NetworkTypeNotFoundError
from networkapi.vlan.models import TipoRede
from networkapi.vlan.models import VlanError
from networkapi.vlan.models import VlanNotFoundError
class NetworkIPv6AddResource(RestResource):
log = logging.getLogger('NetworkIPv6AddResource')
def handle_post(self, request, user, *args, **kwargs):
"""
Treat requests POST to add a network IPv6
URL: network/ipv6/add/
"""
# Commons Validations
# User permission
if not has_perm(user, AdminPermission.VLAN_MANAGEMENT, AdminPermission.WRITE_OPERATION):
self.log.error(
u'User does not have permission to perform the operation.')
return self.not_authorized()
# Business Validations
# Load XML data
xml_map, _ = loads(request.raw_post_data)
# XML data format
networkapi_map = xml_map.get('networkapi')
if networkapi_map is None:
msg = u'There is no value to the networkapi tag of XML request.'
self.log.error(msg)
return self.response_error(3, msg)
vlan_map = networkapi_map.get('vlan')
if vlan_map is None:
msg = u'There is no value to the vlan tag of XML request.'
self.log.error(msg)
return self.response_error(3, msg)
# Get XML data
vlan_id = vlan_map.get('id_vlan')
network_type = vlan_map.get('id_tipo_rede')
environment_vip = vlan_map.get('id_ambiente_vip')
prefix = vlan_map.get('prefix')
# Valid prefix
if not is_valid_int_greater_zero_param(prefix, False) or (prefix and int(prefix) > 128):
self.log.error(u'Parameter prefix is invalid. Value: %s.', prefix)
return self.response_error(269, 'prefix', prefix)
return self.network_ipv6_add(user, vlan_id, network_type, environment_vip, prefix)
def handle_put(self, request, user, *args, **kwargs):
"""
Treat requests PUT to add a network IPv6
URL: network/ipv6/add/
"""
# Commons Validations
# User permission
if not has_perm(user, AdminPermission.VLAN_MANAGEMENT, AdminPermission.WRITE_OPERATION):
self.log.error(
u'User does not have permission to perform the operation.')
return self.not_authorized()
# Business Validations
# Load XML data
xml_map, _ = loads(request.raw_post_data)
# XML data format
networkapi_map = xml_map.get('networkapi')
if networkapi_map is None:
msg = u'There is no value to the networkapi tag of XML request.'
self.log.error(msg)
return self.response_error(3, msg)
vlan_map = networkapi_map.get('vlan')
if vlan_map is None:
msg = u'There is no value to the vlan tag of XML request.'
self.log.error(msg)
return self.response_error(3, msg)
# Get XML data
vlan_id = vlan_map.get('id_vlan')
network_type = vlan_map.get('id_tipo_rede')
environment_vip = vlan_map.get('id_ambiente_vip')
num_hosts = vlan_map.get('num_hosts')
# Valid num_hosts
if not is_valid_int_greater_zero_param(num_hosts) or int(num_hosts) > MAX_IPV6_HOSTS:
self.log.error(
u'Parameter num_hosts is invalid. Value: %s.', num_hosts)
return self.response_error(269, 'num_hosts', num_hosts)
num_hosts = int(num_hosts)
# Get configuration
conf = Configuration.get()
num_hosts += conf.IPv6_MIN + conf.IPv6_MAX
prefix = get_prefix_IPV6(num_hosts)
self.log.info(u'Prefix for %s hosts: %s' % (num_hosts, prefix))
return self.network_ipv6_add(user, vlan_id, network_type, environment_vip, prefix)
def network_ipv6_add(self, user, vlan_id, network_type, environment_vip, prefix=None):
try:
# Valid vlan ID
if not is_valid_int_greater_zero_param(vlan_id):
self.log.error(
u'Parameter id_vlan is invalid. Value: %s.', vlan_id)
raise InvalidValueError(None, 'id_vlan', vlan_id)
# Network Type
# Valid network_type ID
"""
if not is_valid_int_greater_zero_param(network_type):
self.log.error(
u'Parameter id_tipo_rede is invalid. Value: %s.', network_type)
raise InvalidValueError(None, 'id_tipo_rede', network_type)
"""
# Find network_type by ID to check if it exist
net = None
if network_type:
net = TipoRede.get_by_pk(network_type)
# Environment Vip
if environment_vip is not None:
# Valid environment_vip ID
if not is_valid_int_greater_zero_param(environment_vip):
self.log.error(
u'Parameter id_ambiente_vip is invalid. Value: %s.', environment_vip)
raise InvalidValueError(
None, 'id_ambiente_vip', environment_vip)
# Find Environment VIP by ID to check if it exist
evip = EnvironmentVip.get_by_pk(environment_vip)
else:
evip = None
# Business Rules
# New NetworkIPv6
network_ipv6 = NetworkIPv6()
vlan_map = network_ipv6.add_network_ipv6(user, vlan_id, net, evip, prefix)
list_equip_routers_ambient = EquipamentoAmbiente.get_routers_by_environment(
vlan_map['vlan']['id_ambiente'])
if list_equip_routers_ambient:
# Add Adds the first available ipv6 on all equipment
# that is configured as a router for the environment related to
# network
ipv6 = Ipv6.get_first_available_ip6(
vlan_map['vlan']['id_network'])
ipv6 = str(ipv6).split(':')
ipv6_model = Ipv6()
ipv6_model.block1 = ipv6[0]
ipv6_model.block2 = ipv6[1]
ipv6_model.block3 = ipv6[2]
ipv6_model.block4 = ipv6[3]
ipv6_model.block5 = ipv6[4]
ipv6_model.block6 = ipv6[5]
ipv6_model.block7 = ipv6[6]
ipv6_model.block8 = ipv6[7]
ipv6_model.networkipv6_id = vlan_map['vlan']['id_network']
ipv6_model.save()
if len(list_equip_routers_ambient) > 1:
multiple_ips = True
else:
multiple_ips = False
if vlan_map.get('vlan').get('vxlan'):
logging.debug('vxlan')
for equip in list_equip_routers_ambient:
Ipv6Equipament().create(user, ipv6_model.id, equip.equipamento.id)
if multiple_ips:
router_ip = Ipv6.get_first_available_ip6(vlan_map['vlan']['id_network'], True)
ipv6s = Ipv6()
ipv6s.block1, ipv6s.block2, ipv6s.block3, ipv6s.block4, ipv6s.block5, \
ipv6s.block6, ipv6s.block7, ipv6s.block8 = str(router_ip).split(':')
ipv6s.networkipv6_id = vlan_map['vlan']['id_network']
ipv6s.descricao = "IPv6 alocado para debug"
ipv6s.save(user)
Ipv6Equipament().create(user,
ipv6s.id,
list_equip_routers_ambient[0].equipamento.id)
else:
for equip in list_equip_routers_ambient:
Ipv6Equipament().create(user, vlan_map['vlan']['id_network'], equip.equipamento.id)
if multiple_ips:
router_ip = Ipv6.get_first_available_ip6(vlan_map['vlan']['id_network'], True)
router_ip = str(router_ip).split(':')
ipv6_model2 = Ipv6()
ipv6_model2.block1 = router_ip[0]
ipv6_model2.block2 = router_ip[1]
ipv6_model2.block3 = router_ip[2]
ipv6_model2.block4 = router_ip[3]
ipv6_model2.block5 = router_ip[4]
ipv6_model2.block6 = router_ip[5]
ipv6_model2.block7 = router_ip[6]
ipv6_model2.block8 = router_ip[7]
ipv6_model2.networkipv6_id = vlan_map['vlan']['id_network']
ipv6_model2.save(user)
Ipv6Equipament().create(user,
ipv6_model2.id,
list_equip_routers_ambient[0].equipamento.id)
# Return XML
return self.response(dumps_networkapi(vlan_map))
except XMLError, e:
self.log.error(u'Error reading the XML request.')
return self.response_error(3, e)
except InvalidValueError, e:
return self.response_error(269, e.param, e.value)
except NetworkTypeNotFoundError, e:
self.log.error(u'The network_type parameter does not exist.')
return self.response_error(111)
except VlanNotFoundError, e:
self.log.error(u'Vlan not found')
return self.response_error(116)
except NetworkTypeNotFoundError, e:
return self.response_error(111)
except EnvironmentVipNotFoundError:
return self.response_error(283)
except NetworkIPv6AddressNotAvailableError:
return self.response_error(296)
except NetworkIPv6NotFoundError:
return self.response_error(286)
except ConfigEnvironmentInvalidError:
return self.response_error(294)
except IpNotAvailableError, e:
return self.response_error(150, e.message)
except (IpError, NetworkIPv6Error, GrupoError, VlanError):
return self.response_error(1)
| 40.644951
| 107
| 0.614682
|
ca856999d63d7d6d0658c2d43c8f5b8c6df94230
| 11,480
|
py
|
Python
|
src/oci/data_catalog/models/namespace.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/data_catalog/models/namespace.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/data_catalog/models/namespace.py
|
LaudateCorpus1/oci-python-sdk
|
b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class Namespace(object):
"""
Namespace Definition
"""
#: A constant which can be used with the lifecycle_state property of a Namespace.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a Namespace.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a Namespace.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the lifecycle_state property of a Namespace.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a Namespace.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a Namespace.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a Namespace.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the lifecycle_state property of a Namespace.
#: This constant has a value of "MOVING"
LIFECYCLE_STATE_MOVING = "MOVING"
def __init__(self, **kwargs):
"""
Initializes a new Namespace object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param key:
The value to assign to the key property of this Namespace.
:type key: str
:param display_name:
The value to assign to the display_name property of this Namespace.
:type display_name: str
:param description:
The value to assign to the description property of this Namespace.
:type description: str
:param is_service_defined:
The value to assign to the is_service_defined property of this Namespace.
:type is_service_defined: bool
:param lifecycle_state:
The value to assign to the lifecycle_state property of this Namespace.
Allowed values for this property are: "CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED", "MOVING", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param time_created:
The value to assign to the time_created property of this Namespace.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this Namespace.
:type time_updated: datetime
:param created_by_id:
The value to assign to the created_by_id property of this Namespace.
:type created_by_id: str
:param updated_by_id:
The value to assign to the updated_by_id property of this Namespace.
:type updated_by_id: str
"""
self.swagger_types = {
'key': 'str',
'display_name': 'str',
'description': 'str',
'is_service_defined': 'bool',
'lifecycle_state': 'str',
'time_created': 'datetime',
'time_updated': 'datetime',
'created_by_id': 'str',
'updated_by_id': 'str'
}
self.attribute_map = {
'key': 'key',
'display_name': 'displayName',
'description': 'description',
'is_service_defined': 'isServiceDefined',
'lifecycle_state': 'lifecycleState',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'created_by_id': 'createdById',
'updated_by_id': 'updatedById'
}
self._key = None
self._display_name = None
self._description = None
self._is_service_defined = None
self._lifecycle_state = None
self._time_created = None
self._time_updated = None
self._created_by_id = None
self._updated_by_id = None
@property
def key(self):
"""
**[Required]** Gets the key of this Namespace.
Unique namespace key that is immutable.
:return: The key of this Namespace.
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""
Sets the key of this Namespace.
Unique namespace key that is immutable.
:param key: The key of this Namespace.
:type: str
"""
self._key = key
@property
def display_name(self):
"""
Gets the display_name of this Namespace.
Name of the Namespace
:return: The display_name of this Namespace.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this Namespace.
Name of the Namespace
:param display_name: The display_name of this Namespace.
:type: str
"""
self._display_name = display_name
@property
def description(self):
"""
Gets the description of this Namespace.
Description for the namespace
:return: The description of this Namespace.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this Namespace.
Description for the namespace
:param description: The description of this Namespace.
:type: str
"""
self._description = description
@property
def is_service_defined(self):
"""
Gets the is_service_defined of this Namespace.
If this field is defined by service or by a user
:return: The is_service_defined of this Namespace.
:rtype: bool
"""
return self._is_service_defined
@is_service_defined.setter
def is_service_defined(self, is_service_defined):
"""
Sets the is_service_defined of this Namespace.
If this field is defined by service or by a user
:param is_service_defined: The is_service_defined of this Namespace.
:type: bool
"""
self._is_service_defined = is_service_defined
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this Namespace.
The current state of the namespace.
Allowed values for this property are: "CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED", "MOVING", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this Namespace.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this Namespace.
The current state of the namespace.
:param lifecycle_state: The lifecycle_state of this Namespace.
:type: str
"""
allowed_values = ["CREATING", "ACTIVE", "INACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED", "MOVING"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def time_created(self):
"""
Gets the time_created of this Namespace.
The date and time the namespace was created, in the format defined by `RFC3339`__.
Example: `2019-03-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:return: The time_created of this Namespace.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this Namespace.
The date and time the namespace was created, in the format defined by `RFC3339`__.
Example: `2019-03-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:param time_created: The time_created of this Namespace.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
Gets the time_updated of this Namespace.
The last time that any change was made to the namespace. An `RFC3339`__ formatted datetime string.
__ https://tools.ietf.org/html/rfc3339
:return: The time_updated of this Namespace.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this Namespace.
The last time that any change was made to the namespace. An `RFC3339`__ formatted datetime string.
__ https://tools.ietf.org/html/rfc3339
:param time_updated: The time_updated of this Namespace.
:type: datetime
"""
self._time_updated = time_updated
@property
def created_by_id(self):
"""
Gets the created_by_id of this Namespace.
OCID of the user who created the namespace.
:return: The created_by_id of this Namespace.
:rtype: str
"""
return self._created_by_id
@created_by_id.setter
def created_by_id(self, created_by_id):
"""
Sets the created_by_id of this Namespace.
OCID of the user who created the namespace.
:param created_by_id: The created_by_id of this Namespace.
:type: str
"""
self._created_by_id = created_by_id
@property
def updated_by_id(self):
"""
Gets the updated_by_id of this Namespace.
OCID of the user who last modified the namespace.
:return: The updated_by_id of this Namespace.
:rtype: str
"""
return self._updated_by_id
@updated_by_id.setter
def updated_by_id(self, updated_by_id):
"""
Sets the updated_by_id of this Namespace.
OCID of the user who last modified the namespace.
:param updated_by_id: The updated_by_id of this Namespace.
:type: str
"""
self._updated_by_id = updated_by_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 31.111111
| 245
| 0.637979
|
64c7a1fb284aa56acdde351d23d3918341a4fcb4
| 1,073
|
py
|
Python
|
mysite/wsgi.py
|
gbriones1/django-skelleton
|
ee067594e3994f1bac5bf754f618d365bb5248d8
|
[
"BSD-3-Clause"
] | null | null | null |
mysite/wsgi.py
|
gbriones1/django-skelleton
|
ee067594e3994f1bac5bf754f618d365bb5248d8
|
[
"BSD-3-Clause"
] | 10
|
2020-06-05T16:38:25.000Z
|
2022-03-11T23:12:12.000Z
|
mysite/wsgi.py
|
gbriones1/django-skelleton
|
ee067594e3994f1bac5bf754f618d365bb5248d8
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 33.53125
| 78
| 0.7726
|
ef99246bdb95cd5a1de07b9fd0c4fb9d101a64b3
| 3,402
|
py
|
Python
|
nailgun/nailgun/api/v1/handlers/cluster_plugin_link.py
|
dnikishov/fuel-web
|
152c2072cf585fc61d7e157ccf9a7ea1d0377daa
|
[
"Apache-2.0"
] | null | null | null |
nailgun/nailgun/api/v1/handlers/cluster_plugin_link.py
|
dnikishov/fuel-web
|
152c2072cf585fc61d7e157ccf9a7ea1d0377daa
|
[
"Apache-2.0"
] | null | null | null |
nailgun/nailgun/api/v1/handlers/cluster_plugin_link.py
|
dnikishov/fuel-web
|
152c2072cf585fc61d7e157ccf9a7ea1d0377daa
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nailgun.api.v1.handlers import base
from nailgun.api.v1.handlers.base import content
from nailgun.api.v1.validators import plugin_link
from nailgun.errors import errors
from nailgun import objects
class ClusterPluginLinkHandler(base.SingleHandler):
validator = plugin_link.PluginLinkValidator
single = objects.ClusterPluginLink
def GET(self, cluster_id, obj_id):
""":returns: JSONized REST object.
:http: * 200 (OK)
* 404 (dashboard entry not found in db)
"""
self.get_object_or_404(objects.Cluster, cluster_id)
obj = self.get_object_or_404(self.single, obj_id)
return self.single.to_json(obj)
@content
def PUT(self, cluster_id, obj_id):
""":returns: JSONized REST object.
:http: * 200 (OK)
* 400 (invalid object data specified)
* 404 (object not found in db)
"""
obj = self.get_object_or_404(self.single, obj_id)
data = self.checked_data(
self.validator.validate_update,
instance=obj
)
self.single.update(obj, data)
return self.single.to_json(obj)
def PATCH(self, cluster_id, obj_id):
""":returns: JSONized REST object.
:http: * 200 (OK)
* 400 (invalid object data specified)
* 404 (object not found in db)
"""
return self.PUT(cluster_id, obj_id)
@content
def DELETE(self, cluster_id, obj_id):
""":returns: JSONized REST object.
:http: * 204 (OK)
* 404 (object not found in db)
"""
d_e = self.get_object_or_404(self.single, obj_id)
self.single.delete(d_e)
raise self.http(204)
class ClusterPluginLinkCollectionHandler(base.CollectionHandler):
collection = objects.ClusterPluginLinkCollection
validator = plugin_link.PluginLinkValidator
@content
def GET(self, cluster_id):
""":returns: Collection of JSONized ClusterPluginLink objects.
:http: * 200 (OK)
* 404 (cluster not found in db)
"""
self.get_object_or_404(objects.Cluster, cluster_id)
return self.collection.to_json(
self.collection.get_by_cluster_id(cluster_id)
)
@content
def POST(self, cluster_id):
""":returns: JSONized REST object.
:http: * 201 (object successfully created)
* 400 (invalid object data specified)
"""
data = self.checked_data()
try:
new_obj = self.collection.create_with_cluster_id(data, cluster_id)
except errors.CannotCreate as exc:
raise self.http(400, exc.message)
raise self.http(201, self.collection.single.to_json(new_obj))
| 31.211009
| 78
| 0.638448
|
b230dc159c9b009cb9c26ec992c061062940f95b
| 1,037
|
py
|
Python
|
mosei.py
|
jakehlee/alexa-stop
|
066b198bee4f1a39c8a75de4e297b614995f284c
|
[
"MIT"
] | null | null | null |
mosei.py
|
jakehlee/alexa-stop
|
066b198bee4f1a39c8a75de4e297b614995f284c
|
[
"MIT"
] | null | null | null |
mosei.py
|
jakehlee/alexa-stop
|
066b198bee4f1a39c8a75de4e297b614995f284c
|
[
"MIT"
] | null | null | null |
# y_test has sentiment from strongly negative -3 to strongly positive +#
import mmsdk
from mmsdk import mmdatasdk
import sys
import csv
# https://github.com/A2Zadeh/CMU-MultimodalSDK/issues/51
mydict={'myfeatures': 'CMU_MOSI_Opinion_Labels.csd'}
mydataset=mmdatasdk.mmdataset(mydict)
texts = open("../Downloads/files.txt")
label_file = open("labels_file.csv", "w")
writer = csv.writer(label_file, delimiter=',')
writer.writerow(["segment", "start", "end", "label"])
# https://github.com/A2Zadeh/CMU-MultimodalSDK/issues/54
for row in texts:
labels = mydataset.computational_sequences['myfeatures'].data[row.split(".")[0]]['features'][:]
intervals = mydataset.computational_sequences['myfeatures'].data[row.split(".")[0]]['intervals'][:]
for i in range(len(labels)):
if float(labels[i][0]) < 0:
writer.writerow([row.split(".")[0] + "_" + str(i+1), intervals[i][0], intervals[i][1], 0])
else:
writer.writerow([row.split(".")[0] + "_" + str(i+1), intervals[i][0], intervals[i][1], 1])
| 41.48
| 103
| 0.672131
|
3dd93a1aff131a4b8b558f12034955ce759dedbf
| 1,494
|
py
|
Python
|
neutron/tests/unit/services/trunk/fakes.py
|
p0i0/openstack-neutron
|
df2ee28ae9a43cc511482bd6ece5396eb1288814
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/services/trunk/fakes.py
|
p0i0/openstack-neutron
|
df2ee28ae9a43cc511482bd6ece5396eb1288814
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/services/trunk/fakes.py
|
p0i0/openstack-neutron
|
df2ee28ae9a43cc511482bd6ece5396eb1288814
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.services.trunk.drivers import base
class FakeDriver(base.DriverBase):
@property
def is_loaded(self):
return True
@classmethod
def create(cls):
return cls('foo_name', ('foo_intfs',), ('foo_seg_types',))
class FakeDriver2(base.DriverBase):
@property
def is_loaded(self):
return True
@classmethod
def create(cls):
return cls('foo_name2', ('foo_intf2',), ('foo_seg_types',))
class FakeDriverCanTrunkBoundPort(base.DriverBase):
@property
def is_loaded(self):
return True
@classmethod
def create(cls):
return cls('foo_name3', ('foo_intfs',),
('foo_seg_types',), can_trunk_bound_port=True)
class FakeDriverWithAgent(base.DriverBase):
@property
def is_loaded(self):
return True
@classmethod
def create(cls):
return cls('foo_name4', ('foo_intfs',), ('foo_seg_types',), "foo_type")
| 24.9
| 79
| 0.682062
|
0dbc812b4dea8e9405b34a3e191b55d98a732ad0
| 9,501
|
py
|
Python
|
dlcv/object_detection/tensorflow_detect/core/matcher_test.py
|
Loonride/deeplens-cv
|
9e5b31c1a269d364e4912ba8266415fa04277e11
|
[
"MIT"
] | 11
|
2019-10-07T22:06:30.000Z
|
2020-08-26T22:10:53.000Z
|
dlcv/object_detection/tensorflow_detect/core/matcher_test.py
|
Loonride/deeplens-cv
|
9e5b31c1a269d364e4912ba8266415fa04277e11
|
[
"MIT"
] | 16
|
2019-11-02T00:32:00.000Z
|
2022-02-10T00:23:32.000Z
|
dlcv/object_detection/tensorflow_detect/core/matcher_test.py
|
Loonride/deeplens-cv
|
9e5b31c1a269d364e4912ba8266415fa04277e11
|
[
"MIT"
] | 9
|
2019-10-07T13:33:13.000Z
|
2020-09-27T09:50:58.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.matcher."""
import numpy as np
import tensorflow as tf
from object_detection.tensorflow_detect.core import matcher
class MatchTest(tf.test.TestCase):
def test_get_correct_matched_columnIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [0, 1, 3, 5]
matched_column_indices = match.matched_column_indices()
self.assertEquals(matched_column_indices.dtype, tf.int32)
with self.test_session() as sess:
matched_column_indices = sess.run(matched_column_indices)
self.assertAllEqual(matched_column_indices, expected_column_indices)
def test_get_correct_counts(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
exp_num_matched_columns = 4
exp_num_unmatched_columns = 2
exp_num_ignored_columns = 1
num_matched_columns = match.num_matched_columns()
num_unmatched_columns = match.num_unmatched_columns()
num_ignored_columns = match.num_ignored_columns()
self.assertEquals(num_matched_columns.dtype, tf.int32)
self.assertEquals(num_unmatched_columns.dtype, tf.int32)
self.assertEquals(num_ignored_columns.dtype, tf.int32)
with self.test_session() as sess:
(num_matched_columns_out, num_unmatched_columns_out,
num_ignored_columns_out) = sess.run(
[num_matched_columns, num_unmatched_columns, num_ignored_columns])
self.assertAllEqual(num_matched_columns_out, exp_num_matched_columns)
self.assertAllEqual(num_unmatched_columns_out, exp_num_unmatched_columns)
self.assertAllEqual(num_ignored_columns_out, exp_num_ignored_columns)
def testGetCorrectUnmatchedColumnIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [2, 4]
unmatched_column_indices = match.unmatched_column_indices()
self.assertEquals(unmatched_column_indices.dtype, tf.int32)
with self.test_session() as sess:
unmatched_column_indices = sess.run(unmatched_column_indices)
self.assertAllEqual(unmatched_column_indices, expected_column_indices)
def testGetCorrectMatchedRowIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_row_indices = [3, 1, 0, 5]
matched_row_indices = match.matched_row_indices()
self.assertEquals(matched_row_indices.dtype, tf.int32)
with self.test_session() as sess:
matched_row_inds = sess.run(matched_row_indices)
self.assertAllEqual(matched_row_inds, expected_row_indices)
def test_get_correct_ignored_column_indices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [6]
ignored_column_indices = match.ignored_column_indices()
self.assertEquals(ignored_column_indices.dtype, tf.int32)
with self.test_session() as sess:
ignored_column_indices = sess.run(ignored_column_indices)
self.assertAllEqual(ignored_column_indices, expected_column_indices)
def test_get_correct_matched_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [True, True, False, True, False, True, False]
matched_column_indicator = match.matched_column_indicator()
self.assertEquals(matched_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
matched_column_indicator = sess.run(matched_column_indicator)
self.assertAllEqual(matched_column_indicator, expected_column_indicator)
def test_get_correct_unmatched_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [False, False, True, False, True, False, False]
unmatched_column_indicator = match.unmatched_column_indicator()
self.assertEquals(unmatched_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
unmatched_column_indicator = sess.run(unmatched_column_indicator)
self.assertAllEqual(unmatched_column_indicator, expected_column_indicator)
def test_get_correct_ignored_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [False, False, False, False, False, False, True]
ignored_column_indicator = match.ignored_column_indicator()
self.assertEquals(ignored_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
ignored_column_indicator = sess.run(ignored_column_indicator)
self.assertAllEqual(ignored_column_indicator, expected_column_indicator)
def test_get_correct_unmatched_ignored_column_indices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [2, 4, 6]
unmatched_ignored_column_indices = (match.
unmatched_or_ignored_column_indices())
self.assertEquals(unmatched_ignored_column_indices.dtype, tf.int32)
with self.test_session() as sess:
unmatched_ignored_column_indices = sess.run(
unmatched_ignored_column_indices)
self.assertAllEqual(unmatched_ignored_column_indices,
expected_column_indices)
def test_all_columns_accounted_for(self):
# Note: deliberately setting to small number so not always
# all possibilities appear (matched, unmatched, ignored)
num_matches = 10
match_results = tf.random_uniform(
[num_matches], minval=-2, maxval=5, dtype=tf.int32)
match = matcher.Match(match_results)
matched_column_indices = match.matched_column_indices()
unmatched_column_indices = match.unmatched_column_indices()
ignored_column_indices = match.ignored_column_indices()
with self.test_session() as sess:
matched, unmatched, ignored = sess.run([
matched_column_indices, unmatched_column_indices,
ignored_column_indices
])
all_indices = np.hstack((matched, unmatched, ignored))
all_indices_sorted = np.sort(all_indices)
self.assertAllEqual(all_indices_sorted,
np.arange(num_matches, dtype=np.int32))
def test_scalar_gather_based_on_match(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
input_tensor = tf.constant([0, 1, 2, 3, 4, 5, 6, 7], dtype=tf.float32)
expected_gathered_tensor = [3, 1, 100, 0, 100, 5, 200]
match = matcher.Match(match_results)
gathered_tensor = match.gather_based_on_match(input_tensor,
unmatched_value=100.,
ignored_value=200.)
self.assertEquals(gathered_tensor.dtype, tf.float32)
with self.test_session():
gathered_tensor_out = gathered_tensor.eval()
self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out)
def test_multidimensional_gather_based_on_match(self):
match_results = tf.constant([1, -1, -2])
input_tensor = tf.constant([[0, 0.5, 0, 0.5], [0, 0, 0.5, 0.5]],
dtype=tf.float32)
expected_gathered_tensor = [[0, 0, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]]
match = matcher.Match(match_results)
gathered_tensor = match.gather_based_on_match(input_tensor,
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
self.assertEquals(gathered_tensor.dtype, tf.float32)
with self.test_session():
gathered_tensor_out = gathered_tensor.eval()
self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out)
def test_multidimensional_gather_based_on_match_with_matmul_gather_op(self):
match_results = tf.constant([1, -1, -2])
input_tensor = tf.constant([[0, 0.5, 0, 0.5], [0, 0, 0.5, 0.5]],
dtype=tf.float32)
expected_gathered_tensor = [[0, 0, 0.5, 0.5], [0, 0, 0, 0], [0, 0, 0, 0]]
match = matcher.Match(match_results, use_matmul_gather=True)
gathered_tensor = match.gather_based_on_match(input_tensor,
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
self.assertEquals(gathered_tensor.dtype, tf.float32)
with self.test_session() as sess:
self.assertTrue(
all([op.name is not 'Gather' for op in sess.graph.get_operations()]))
gathered_tensor_out = gathered_tensor.eval()
self.assertAllEqual(expected_gathered_tensor, gathered_tensor_out)
if __name__ == '__main__':
tf.test.main()
| 49.227979
| 80
| 0.706978
|
c662c7a1a930ea4d170d213dd69401c75329ddc1
| 1,174
|
py
|
Python
|
flaxlight/consensus/coinbase.py
|
Flax-Network/flax-light-wallet
|
1745850a28a47bbbc4b5f3d460f35b34b4ed4f25
|
[
"Apache-2.0"
] | 1
|
2021-12-02T14:38:11.000Z
|
2021-12-02T14:38:11.000Z
|
flaxlight/consensus/coinbase.py
|
Flax-Network/flax-light-wallet
|
1745850a28a47bbbc4b5f3d460f35b34b4ed4f25
|
[
"Apache-2.0"
] | null | null | null |
flaxlight/consensus/coinbase.py
|
Flax-Network/flax-light-wallet
|
1745850a28a47bbbc4b5f3d460f35b34b4ed4f25
|
[
"Apache-2.0"
] | 6
|
2021-11-21T00:38:27.000Z
|
2021-12-03T01:25:19.000Z
|
from blspy import G1Element
from flaxlight.types.blockchain_format.coin import Coin
from flaxlight.types.blockchain_format.sized_bytes import bytes32
from flaxlight.util.ints import uint32, uint64
from flaxlight.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import puzzle_for_pk
def create_puzzlehash_for_pk(pub_key: G1Element) -> bytes32:
return puzzle_for_pk(pub_key).get_tree_hash()
def pool_parent_id(block_height: uint32, genesis_challenge: bytes32) -> bytes32:
return bytes32(genesis_challenge[:16] + block_height.to_bytes(16, "big"))
def farmer_parent_id(block_height: uint32, genesis_challenge: bytes32) -> uint32:
return bytes32(genesis_challenge[16:] + block_height.to_bytes(16, "big"))
def create_pool_coin(block_height: uint32, puzzle_hash: bytes32, reward: uint64, genesis_challenge: bytes32):
parent_id = pool_parent_id(block_height, genesis_challenge)
return Coin(parent_id, puzzle_hash, reward)
def create_farmer_coin(block_height: uint32, puzzle_hash: bytes32, reward: uint64, genesis_challenge: bytes32):
parent_id = farmer_parent_id(block_height, genesis_challenge)
return Coin(parent_id, puzzle_hash, reward)
| 40.482759
| 111
| 0.809199
|
7dcb19c3fa08017ac623118c67304c1cc72a1b4b
| 3,524
|
py
|
Python
|
pkg/cortex/client/cortex/telemetry.py
|
cbensimon/cortex
|
013b5f7b7996517e3ec6b033a8f7b12d80906037
|
[
"Apache-2.0"
] | null | null | null |
pkg/cortex/client/cortex/telemetry.py
|
cbensimon/cortex
|
013b5f7b7996517e3ec6b033a8f7b12d80906037
|
[
"Apache-2.0"
] | null | null | null |
pkg/cortex/client/cortex/telemetry.py
|
cbensimon/cortex
|
013b5f7b7996517e3ec6b033a8f7b12d80906037
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
from uuid import uuid4
import sentry_sdk
from sentry_sdk.integrations.dedupe import DedupeIntegration
from sentry_sdk.integrations.stdlib import StdlibIntegration
from sentry_sdk.integrations.modules import ModulesIntegration
from cortex.exceptions import CortexBinaryException
from cortex.consts import (
CORTEX_VERSION,
CORTEX_TELEMETRY_SENTRY_DSN,
CORTEX_TELEMETRY_SENTRY_ENVIRONMENT,
)
def _sentry_client(
disabled: bool = False,
) -> sentry_sdk.Client:
"""
Initialize sentry. You can override the default values with the following env vars:
1. CORTEX_TELEMETRY_SENTRY_DSN
2. CORTEX_TELEMETRY_SENTRY_ENVIRONMENT
3. CORTEX_TELEMETRY_DISABLE
"""
dsn = CORTEX_TELEMETRY_SENTRY_DSN
environment = CORTEX_TELEMETRY_SENTRY_ENVIRONMENT
if disabled or os.getenv("CORTEX_TELEMETRY_DISABLE", "").lower() == "true":
return
if os.getenv("CORTEX_TELEMETRY_SENTRY_DSN", "") != "":
dsn = os.environ["CORTEX_TELEMETRY_SENTRY_DSN"]
if os.getenv("CORTEX_TELEMETRY_SENTRY_ENVIRONMENT", "") != "":
environment = os.environ["CORTEX_TELEMETRY_SENTRY_ENVIRONMENT"]
client = sentry_sdk.Client(
dsn=dsn,
environment=environment,
release=CORTEX_VERSION,
ignore_errors=[CortexBinaryException], # exclude CortexBinaryException exceptions
in_app_include=["cortex"], # for better grouping of events in sentry
attach_stacktrace=True,
default_integrations=False, # disable all default integrations
auto_enabling_integrations=False,
integrations=[
DedupeIntegration(), # prevent duplication of events
StdlibIntegration(), # adds breadcrumbs (aka more info)
ModulesIntegration(), # adds info about installed modules
],
# debug=True,
)
return client
def _create_default_scope(optional_tags: dict = {}) -> sentry_sdk.Scope:
"""
Creates default scope. Adds user ID as tag to the reported event.
Can add optional tags.
"""
scope = sentry_sdk.Scope()
user_id = None
client_id_file_path = pathlib.Path.home() / ".cortex" / "client-id.txt"
if not client_id_file_path.is_file():
client_id_file_path.parent.mkdir(parents=True, exist_ok=True)
client_id_file_path.write_text(str(uuid4()))
user_id = client_id_file_path.read_text()
if user_id:
scope.set_user({"id": user_id})
for k, v in optional_tags.items():
scope.set_tag(k, v)
return scope
# only one instance of this is required
hub = sentry_sdk.Hub(_sentry_client(), _create_default_scope())
def sentry_wrapper(func):
def wrapper(*args, **kwargs):
with hub:
try:
return func(*args, **kwargs)
except:
sentry_sdk.capture_exception()
sentry_sdk.flush()
raise
return wrapper
| 30.912281
| 90
| 0.698354
|
878b4613c2a9110f0dfc9ed82473aaf92b6882c3
| 9,814
|
py
|
Python
|
notification/management/commands/sendout_tasks.py
|
aropan/clist
|
b0eda1c3a7147a3c9300f9b3eadb83c42cdc8bfe
|
[
"Apache-2.0"
] | 166
|
2019-05-16T23:46:08.000Z
|
2022-03-31T05:20:23.000Z
|
notification/management/commands/sendout_tasks.py
|
aropan/clist
|
b0eda1c3a7147a3c9300f9b3eadb83c42cdc8bfe
|
[
"Apache-2.0"
] | 92
|
2020-01-18T22:51:53.000Z
|
2022-03-12T01:23:57.000Z
|
notification/management/commands/sendout_tasks.py
|
aropan/clist
|
b0eda1c3a7147a3c9300f9b3eadb83c42cdc8bfe
|
[
"Apache-2.0"
] | 23
|
2020-02-09T17:38:43.000Z
|
2021-12-09T14:39:07.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from copy import deepcopy
from datetime import timedelta
from logging import getLogger
from smtplib import SMTPDataError, SMTPResponseException
from time import sleep
from traceback import format_exc
import tqdm
import yaml
from django.conf import settings
from django.core.mail.backends.smtp import EmailBackend
from django.core.mail.message import EmailMultiAlternatives
from django.core.management.base import BaseCommand
from django.db.models import Case, PositiveSmallIntegerField, Prefetch, Q, When
from django.template.loader import render_to_string
from django.utils.timezone import now
from django_print_sql import print_sql_decorator
from filelock import FileLock
from telegram.error import Unauthorized
from webpush import send_user_notification
from webpush.utils import WebPushException
from clist.models import Contest
from notification.models import Task
from tg.bot import Bot
from tg.models import Chat
logger = getLogger('notification.sendout.tasks')
class Command(BaseCommand):
help = 'Send out all unsent tasks'
TELEGRAM_BOT = Bot()
CONFIG_FILE = __file__ + '.yaml'
N_STOP_EMAIL_FAILED_LIMIT = 5
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.email_connection = None
self.n_messages_sent = 0
self.config = None
def add_arguments(self, parser):
parser.add_argument('--coders', nargs='+')
parser.add_argument('--dryrun', action='store_true', default=False)
def get_message(self, method, data, **kwargs):
subject_ = kwargs.pop('subject', None)
message_ = kwargs.pop('message', None)
if 'contests' in data:
contests = Contest.objects.filter(pk__in=data['contests'])
context = deepcopy(data.get('context', {}))
context.update({
'contests': contests,
'domain': settings.MAIN_HOST_,
})
context.update(kwargs)
subject = render_to_string('subject', context).strip()
subject = re.sub(r'\s+', ' ', subject)
context['subject'] = subject
method = method.split(':', 1)[0]
message = render_to_string('message/%s' % method, context).strip()
else:
subject = ''
message = ''
context = {}
if subject_:
subject = subject_ + subject
if message_:
message = message_ + message
return subject, message, context
def send_message(self, coder, method, data, **kwargs):
method, *args = method.split(':', 1)
subject, message, context = self.get_message(method=method, data=data, coder=coder, **kwargs)
if method == settings.NOTIFICATION_CONF.TELEGRAM:
if args:
try:
self.TELEGRAM_BOT.send_message(message, args[0], reply_markup=False)
except Unauthorized as e:
if 'bot was kicked from' in str(e):
if 'notification' in kwargs:
delete_info = kwargs['notification'].delete()
logger.error(f'{str(e)}, delete info = {delete_info}')
return 'removed'
elif coder.chat and coder.chat.chat_id:
try:
if not coder.settings.get('telegram', {}).get('unauthorized', False):
self.TELEGRAM_BOT.send_message(message, coder.chat.chat_id, reply_markup=False)
except Unauthorized as e:
if 'bot was blocked by the user' in str(e):
coder.chat.delete()
else:
coder.settings.setdefault('telegram', {})['unauthorized'] = True
coder.save()
elif 'notification' in kwargs:
delete_info = kwargs['notification'].delete()
logger.error(f'Strange notification, delete info = {delete_info}')
return 'removed'
elif method == settings.NOTIFICATION_CONF.EMAIL:
if self.n_messages_sent % 20 == 0:
if self.n_messages_sent:
sleep(10)
self.email_connection = EmailBackend()
mail = EmailMultiAlternatives(
subject=subject,
body=message,
from_email='CLIST <noreply@clist.by>',
to=[coder.user.email],
bcc=['noreply@clist.by'],
connection=self.email_connection,
alternatives=[(message, 'text/html')],
)
mail.send()
self.n_messages_sent += 1
sleep(2)
elif method == settings.NOTIFICATION_CONF.WEBBROWSER:
payload = {
'head': subject,
'body': message,
}
contests = list(context.get('contests', []))
if len(contests) == 1:
contest = contests[0]
payload['url'] = contest.url
payload['icon'] = f'{settings.HTTPS_HOST_}/imagefit/static_resize/64x64/{contest.resource.icon}'
try:
send_user_notification(
user=coder.user,
payload=payload,
ttl=300,
)
except WebPushException as e:
if '403 Forbidden' in str(e):
if 'notification' in kwargs:
delete_info = kwargs['notification'].delete()
logger.error(f'{str(e)}, delete info = {delete_info}')
return 'removed'
def load_config(self):
if os.path.exists(self.CONFIG_FILE):
with open(self.CONFIG_FILE, 'r') as fo:
self.config = yaml.safe_load(fo)
else:
self.config = {}
self.config.setdefault('stop_email', {})
self.config['stop_email'].setdefault('n_failed', 0)
def save_config(self):
lock = FileLock(self.CONFIG_FILE)
with lock.acquire(timeout=60):
with open(self.CONFIG_FILE, 'w') as fo:
yaml.dump(self.config, fo, indent=2)
@print_sql_decorator()
def handle(self, *args, **options):
self.load_config()
coders = options.get('coders')
dryrun = options.get('dryrun')
stop_email = settings.STOP_EMAIL_ and not dryrun
if (
self.config['stop_email']['n_failed'] >= self.N_STOP_EMAIL_FAILED_LIMIT
and now() - self.config['stop_email']['failed_time'] < timedelta(hours=2)
):
stop_email = True
clear_email_task = False
delete_info = Task.objects.filter(
Q(is_sent=True, modified__lte=now() - timedelta(hours=1)) |
Q(created__lte=now() - timedelta(days=1))
).delete()
logger.info(f'Tasks cleared: {delete_info}')
qs = Task.unsent.all()
qs = qs.select_related('notification__coder')
qs = qs.prefetch_related(
Prefetch(
'notification__coder__chat_set',
queryset=Chat.objects.filter(is_group=False),
to_attr='cchat',
)
)
if stop_email:
qs = qs.exclude(notification__method='email')
if coders:
qs = qs.filter(notification__coder__username__in=coders)
if dryrun:
qs = qs.order_by('modified')
else:
qs = qs.annotate(weight=Case(
When(notification__method='email', then=1),
default=0,
output_field=PositiveSmallIntegerField(),
))
qs = qs.order_by('weight', 'modified')
done = 0
failed = 0
deleted = 0
for task in tqdm.tqdm(qs.iterator(), 'sending'):
if stop_email and task.notification.method == settings.NOTIFICATION_CONF.EMAIL:
if clear_email_task:
contests = task.addition.get('contests', [])
if contests and not Contest.objects.filter(pk__in=contests, start_time__gt=now()).exists():
task.delete()
deleted += 1
continue
try:
notification = task.notification
coder = notification.coder
method = notification.method
status = self.send_message(
coder,
method,
task.addition,
subject=task.subject,
message=task.message,
notification=notification,
)
if status == 'removed':
continue
task.is_sent = True
task.save()
except Exception as e:
logger.error('Exception sendout task:\n%s' % format_exc())
task.is_sent = False
task.save()
if isinstance(e, (SMTPResponseException, SMTPDataError)):
stop_email = True
if self.n_messages_sent:
self.config['stop_email']['n_failed'] = 1
else:
self.config['stop_email']['n_failed'] += 1
if self.config['stop_email']['n_failed'] >= self.N_STOP_EMAIL_FAILED_LIMIT:
clear_email_task = True
self.config['stop_email']['failed_time'] = now()
if task.is_sent:
done += 1
else:
failed += 1
logger.info(f'Done: {done}, failed: {failed}, deleted: {deleted}')
self.save_config()
| 37.458015
| 112
| 0.547381
|
0bcece6a99b61b87741ae100e9c01a4f4d6d3235
| 4,014
|
py
|
Python
|
PaddleFL/federated/fl_trainer.py
|
BaptisteTomasin/Federated-Learning-Frameworks
|
ba1cc9dc6ee0e85d4169f32884890bfc7622d2ae
|
[
"Apache-2.0"
] | null | null | null |
PaddleFL/federated/fl_trainer.py
|
BaptisteTomasin/Federated-Learning-Frameworks
|
ba1cc9dc6ee0e85d4169f32884890bfc7622d2ae
|
[
"Apache-2.0"
] | null | null | null |
PaddleFL/federated/fl_trainer.py
|
BaptisteTomasin/Federated-Learning-Frameworks
|
ba1cc9dc6ee0e85d4169f32884890bfc7622d2ae
|
[
"Apache-2.0"
] | null | null | null |
from paddle_fl.paddle_fl.core.trainer.fl_trainer import FLTrainerFactory
from paddle_fl.paddle_fl.core.master.fl_job import FLRunTimeJob
import paddle
from tb_paddle import SummaryWriter
from os.path import join
import sys
import logging
import time
import json
import argparse
from tools import metrics, select_data
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', help = "path to the config file")
parser.add_argument('--id', help = "path to the config file")
args = parser.parse_args()
with open(args.config_path, 'r') as fp:
params = json.load(fp)
# Log
#########
logging.basicConfig(
filename="test.log",
filemode="w",
format="%(asctime)s %(name)s:%(levelname)s:%(message)s",
datefmt="%d-%M-%Y %H:%M:%S",
level=logging.DEBUG)
# Load configs
####################
trainer_id = int(args.id) # trainer id
job_path = params["federated"]["job_path"]
job = FLRunTimeJob()
job.load_trainer_job(job_path, trainer_id)
job._scheduler_ep = "127.0.0.1:"+ str(params["federated"]["scheduler_port"]) # Inform scheduler IP address to trainer
trainer = FLTrainerFactory().create_fl_trainer(job)
trainer._current_ep = "127.0.0.1:{}".format(params["federated"]["seed_of_clients_port"] + trainer_id)
place = paddle.fluid.CPUPlace()
trainer.start(place)
test_program = trainer._main_program.clone(for_test = True)
# Load data
###############
# dataset = Time_series_loader(distributed = params["federated"]["distributed"], ts_path = params["federated"]["clients_path"], number_of_clients = params["federated"]["number_of_clients"], lookback = params["federated"]["lookback"], lookforward = params["federated"]["lookforward"])
dataset = select_data(params)
train_reader = paddle.batch(reader = dataset.train_data(client = trainer_id),
batch_size = params["federated"]["batch_size"])
val_reader = paddle.batch(reader=dataset.val_data(client = trainer_id),
batch_size = params["federated"]["batch_size"])
if trainer_id == 0:
test_reader = paddle.batch(reader=dataset.test_data(),
batch_size = params["federated"]["batch_size"])
inp = paddle.fluid.layers.data(name ='inp', shape = params["federated"]["input_shape"], dtype = params["federated"]["input_dtype"])
label = paddle.fluid.layers.data(name ='label', shape = params["federated"]["label_shape"], dtype = params["federated"]["label_dtype"])
feeder = paddle.fluid.DataFeeder(feed_list = [inp, label], place = paddle.fluid.CPUPlace())
# Summary
###########
data_writer = SummaryWriter(logdir=join(join(params["federated"]["logdir"],"data"),f"client_{trainer_id}"))
# Run
#########
round_id = 0
while not trainer.stop():
round_id += 1
if round_id > params["federated"]["num_round"]:
break
for e in range(params["federated"]["num_epoch"]):
for data in train_reader():
trainer.run(feeder.feed(data), fetch=job._target_names)
train_metrics = metrics(trainer.exe, test_program,feeder, train_reader, job._target_names)
val_metrics = metrics(trainer.exe, test_program,feeder, val_reader, job._target_names)
if trainer_id == 0:
test_metrics = metrics(trainer.exe, test_program,feeder, test_reader, job._target_names)
txt_log = "{} Round {} ".format(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),
round_id)
for metric in range(len(job._target_names)):
metric_name = job._target_names[metric]
txt_log += f"Train {metric_name}: {train_metrics[metric]} Val {metric_name}: {val_metrics[metric]}"
data_writer.add_scalar(f"train/{metric_name}", train_metrics[metric], round_id)
data_writer.add_scalar(f"val/{metric_name}", val_metrics[metric], round_id)
if trainer_id == 0:
txt_log += f" Test {metric_name}: {test_metrics[metric]} "
data_writer.add_scalar(f"test/{metric_name}", test_metrics[metric], round_id)
print(txt_log)
| 39.742574
| 283
| 0.682611
|
d8538ccc6e19e7455facbecca47338ac52088616
| 22,866
|
py
|
Python
|
test/functional/test_framework/util.py
|
FihlaTV/lbrycrd
|
c54af21ce2758490ae3f8340c73cf0f2401801cc
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/util.py
|
FihlaTV/lbrycrd
|
c54af21ce2758490ae3f8340c73cf0f2401801cc
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/util.py
|
FihlaTV/lbrycrd
|
c54af21ce2758490ae3f8340c73cf0f2401801cc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import unhexlify
from decimal import Decimal, ROUND_DOWN
from subprocess import CalledProcessError
import inspect
import json
import logging
import os
import random
import re
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
from io import BytesIO
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_approx(v, vexp, vspan=0.00001):
"""Assert that `v` is within `vspan` of `vexp`"""
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError(
"Expected substring not found in error message:\nsubstring: '{}'\nerror message: '{}'.".format(
message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
time_end = time.time() + timeout
while attempt < attempts and time.time() < time_end:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.05)
# Print the cause of the timeout
predicate_source = "''''\n" + inspect.getsource(predicate) + "'''"
logger.error("wait_until() failed. Predicate: {}".format(predicate_source))
if attempt >= attempts:
raise AssertionError("Predicate {} not true after {} attempts".format(predicate_source, attempts))
elif time.time() >= time_end:
raise AssertionError("Predicate {} not true after {} seconds".format(predicate_source, timeout))
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 12
# Don't assign rpc or p2p ports lower than this
PORT_MIN = int(os.getenv('TEST_RUNNER_PORT_MIN', default=11000))
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert n <= MAX_NODES
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, chain, rpchost):
rpc_u, rpc_p = get_auth_cookie(datadir, chain)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n, chain):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
# Translate chain name to config name
if chain == 'testnet3':
chain_name_conf_arg = 'testnet'
chain_name_conf_section = 'test'
else:
chain_name_conf_arg = chain
chain_name_conf_section = 'lbrycrdreg'
with open(os.path.join(datadir, "lbrycrd.conf"), 'w', encoding='utf8') as f:
f.write("{}=1\n".format(chain_name_conf_arg))
f.write("[{}]\n".format(chain_name_conf_section))
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("server=1\n")
f.write("keypool=1\n")
f.write("discover=0\n")
f.write("dnsseed=0\n")
f.write("listenonion=0\n")
f.write("printtoconsole=0\n")
f.write("upnp=0\n")
os.makedirs(os.path.join(datadir, 'stderr'), exist_ok=True)
os.makedirs(os.path.join(datadir, 'stdout'), exist_ok=True)
return datadir
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def append_config(datadir, options):
with open(os.path.join(datadir, "lbrycrd.conf"), 'a', encoding='utf8') as f:
for option in options:
f.write(option + "\n")
def get_auth_cookie(datadir, chain):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "lbrycrd.conf")):
with open(os.path.join(datadir, "lbrycrd.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
try:
with open(os.path.join(datadir, chain, ".cookie"), 'r', encoding="ascii") as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
except OSError:
pass
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir, chain):
if os.path.isfile(os.path.join(datadir, chain, ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, chain, ".cookie"))
def softfork_active(node, key):
"""Return whether a softfork is active."""
return node.getblockchaininfo()['softforks'][key]['active']
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for peer_id in [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['id'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
time.sleep(wait)
raise AssertionError("Block sync timed out:{}".format("".join("\n {!r}".format(b) for b in best_hash)))
def sync_mempools(rpc_connections, *, wait=1, timeout=120, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
raise AssertionError("Mempool sync timed out:{}".format("".join("\n {!r}".format(m) for m in pool)))
# Transaction/Block functions
#############################
def find_output(node, txid, amount, *, blockhash=None):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1, blockhash)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert confirmations_required >= 0
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransactionwithwallet(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], 0)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value / 2)
outputs[addr2] = satoshi_round(send_value / 2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransactionwithwallet(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert len(utxos) >= count
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = []
from .messages import CTxOut
txout = CTxOut()
txout.nValue = 0
txout.scriptPubKey = hex_str_to_bytes(script_pubkey)
for k in range(128):
txouts.append(txout)
return txouts
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
from .messages import CTransaction
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = satoshi_round(change)
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(rawtx)))
for txout in txouts:
tx.vout.append(txout)
newtx = tx.serialize().hex()
signresult = node.signrawtransactionwithwallet(newtx, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], 0)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
def find_vout_for_address(node, txid, addr):
"""
Locate the vout index of the given transaction sending to the
given address. Raises runtime error exception if not found.
"""
tx = node.getrawtransaction(txid, True)
for i in range(len(tx["vout"])):
if any([addr == a for a in tx["vout"][i]["scriptPubKey"]["addresses"]]):
return i
raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr))
| 39.15411
| 140
| 0.649173
|
c76cd613b23afe52aaeef5c10a5231b3ef6199f8
| 3,860
|
py
|
Python
|
neighbour/views.py
|
francismuk/neighbourhood
|
393c6ae91242b010ef2aca4a845a0cb144c2189c
|
[
"MIT"
] | null | null | null |
neighbour/views.py
|
francismuk/neighbourhood
|
393c6ae91242b010ef2aca4a845a0cb144c2189c
|
[
"MIT"
] | 4
|
2020-06-05T22:23:34.000Z
|
2021-09-08T01:12:21.000Z
|
neighbour/views.py
|
francismuk/neighbourhood
|
393c6ae91242b010ef2aca4a845a0cb144c2189c
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404, HttpResponseRedirect, JsonResponse
from .models import Subscriber, Image, Location, Category, Comments, Profile
from .forms import SubscribeForm, NewPostForm, CommentForm
from .email import send_welcome_email
from django.contrib.auth.decorators import login_required
import datetime as dt
from django.contrib.auth.models import User
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializer import ImageSerializer, ProfileSerializer
from rest_framework import status
# Create your views here.
def index(request):
title = 'Home'
current_user = request.user
images = Image.objects.all()
if request.method == 'POST':
name = request.POST.get('your_name')
email = request.POST.get('email')
recipient = NewsLetterRecipients(name=name, email=email)
recipient.save()
send_welcome_email(name, email)
data = {'success': 'You have been successfully added to mailing list'}
return JsonResponse(data)
else:
form = SubscribeForm()
return render(request, 'index.html', {'title': title, 'images': images, 'letterForm': form})
@login_required(login_url='/accounts/login/')
def new_project(request):
current_user = request.user
if request.method == 'POST':
form = NewPostForm(request.POST, request.FILES)
if form.is_valid():
image = form.save(commit=False)
image.poster = current_user
image.save()
return redirect('index')
else:
form = NewPostForm()
return render(request, 'registration/new_post.html', {"form": form})
@login_required(login_url='/accounts/login/')
def search_projects(request):
if 'image' in request.GET and request.GET["project"]:
search_term = request.GET.get("image")
searched_images = Image.search_images(search_term)
message = f"{search_term}"
return render(request, 'search.html', {"message": message, "projects": searched_projects})
else:
message = "You haven't searched for any person"
return render(request, 'search.html', {"message": message})
def image(request, id):
try:
image = Image.objects.get(pk=id)
except DoesNotExist:
raise Http404()
current_user = request.user
comments = Comments.get_comment(Comments, id)
if request.method == 'POST':
form = CommentForm(request.POST)
if form.is_valid():
comment = form.cleaned_data['comment']
commentt = Comments()
commentt.image = image
commentt.user = current_user
commentt.comment = comment
commentt.save()
else:
form = CommentForm()
return render(request, 'single.html', {"image": image,'form': form,'comments': comments})
class Projects(APIView):
def get(self, request, format = None):
all_projects = Project.objects.all()
serializers = ProjectSerializer(all_projects, many = True)
return Response(serializers.data)
def post(self, request, format=None):
serializers = ProjectSerializer(data=request.data)
permission_classes = (IsAdminOrReadOnly,)
if serializers.is_valid():
serializers.save()
return Response(serializers.data, status= status.HTTP_201_CREATED)
return Response(serializers.errors, status = status.HTTP_400_BAD_REQUEST)
def newsletter(request):
name = request.POST.get('your_name')
email= request.POST.get('email')
recipient= Subscriber(name= name, email =email)
recipient.save()
send_welcome_email(name, email)
data= {'success': 'You have been successfully added to the newsletter mailing list'}
return JsonResponse(data)
| 32.711864
| 98
| 0.675648
|
68514e1c33ec327101399fbf411e27990f2b0334
| 1,709
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
ardit171/recipe-api-api
|
b5c321c8b3087f06437190279b01b1ec3be16ae7
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
ardit171/recipe-api-api
|
b5c321c8b3087f06437190279b01b1ec3be16ae7
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
ardit171/recipe-api-api
|
b5c321c8b3087f06437190279b01b1ec3be16ae7
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2020-08-27 14:31
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.264706
| 266
| 0.63897
|
e8e72442c9dd0ab364b5613656a1c0dc21d244ee
| 9,987
|
py
|
Python
|
contrib/spendfrom/spendfrom.py
|
All-Mn/AllMNv2
|
2025698345ec6a5e397506f404fe1f2fc5ff1cb4
|
[
"MIT"
] | 3
|
2018-11-04T13:06:01.000Z
|
2019-02-09T08:19:15.000Z
|
contrib/spendfrom/spendfrom.py
|
All-Mn/AllMNv2
|
2025698345ec6a5e397506f404fe1f2fc5ff1cb4
|
[
"MIT"
] | null | null | null |
contrib/spendfrom/spendfrom.py
|
All-Mn/AllMNv2
|
2025698345ec6a5e397506f404fe1f2fc5ff1cb4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Use the raw transactions API to spend allmns received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a allmnd or ALLMN-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the ALLMN Core data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/ALLMNCore/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "ALLMNCore")
return os.path.expanduser("~/.allmncore")
def read_bitcoin_config(dbdir):
"""Read the allmn.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "allmn.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a ALLMN Core JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19991 if testnet else 9991
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the allmnd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(allmnd):
info = allmnd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
allmnd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = allmnd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(allmnd):
address_summary = dict()
address_to_account = dict()
for info in allmnd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = allmnd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = allmnd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-allmn-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(allmnd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(allmnd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to allmnd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = allmnd.createrawtransaction(inputs, outputs)
signed_rawtx = allmnd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(allmnd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = allmnd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(allmnd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = allmnd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(allmnd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get allmns from")
parser.add_option("--to", dest="to", default=None,
help="address to get send allmns to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of allmn.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
allmnd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(allmnd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(allmnd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(allmnd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(allmnd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = allmnd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| 37.264925
| 111
| 0.63062
|
4ee6347c27cda48f15df1de22ebf7ec369a8b050
| 8,027
|
py
|
Python
|
models/train_classifier.py
|
caiokinupp/udacity-disaster-response-pipeline
|
2086cb52daf7f9e79fa92683fd4348c5c8501c33
|
[
"MIT"
] | null | null | null |
models/train_classifier.py
|
caiokinupp/udacity-disaster-response-pipeline
|
2086cb52daf7f9e79fa92683fd4348c5c8501c33
|
[
"MIT"
] | null | null | null |
models/train_classifier.py
|
caiokinupp/udacity-disaster-response-pipeline
|
2086cb52daf7f9e79fa92683fd4348c5c8501c33
|
[
"MIT"
] | null | null | null |
"""
Classifier Trainer
Project: Udacity Nanodegree - Disaster Response Pipeline
Sample Script Syntax:
> python train_classifier.py <path to sqllite destination db> <path to the pickle file>
Sample Script Execution:
> python train_classifier.py ../data/disaster_response_db.db classifier.pkl
Arguments:
1) Path to SQLite destination database (e.g. disaster_response_db.db)
2) Path to pickle file name where ML model needs to be saved (e.g. classifier.pkl)
"""
# import libraries
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
# import libraries
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', 500)
import sys
import os
import re
from sqlalchemy import create_engine
import pickle
from scipy.stats import gmean
# import relevant functions/modules from the sklearn
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import fbeta_score, make_scorer
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier, AdaBoostClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.base import BaseEstimator,TransformerMixin
def load_data(database_filepath):
"""
Load Data from the Database
Arguments:
database_filepath -> Path to SQLite destination database (e.g. disaster_response_db.db)
Output:
X -> a dataframe containing features
Y -> a dataframe containing labels
category_names -> List of categories name
"""
engine = create_engine('sqlite:///' + database_filepath)
table_name = os.path.basename(database_filepath).replace(".db","") + "_table"
df = pd.read_sql_table(table_name,engine)
# Remove child_alone as it has only zeros
df = df.drop(['child_alone'],axis=1)
# Given value 2 in the related field are neglible so it could be error.
# It have two options
# Option 1: Replace the "2" values by the most frequently value (1)
#df['related'] = df['related'].map(lambda x: 1 if x == 2 else x)
# Option 2: Drop the rows with value "2" since is a very low number of rows compared with the df
df = df.drop(df[df['related'] == 2].index)
X = df['message']
y = df.iloc[:,4:]
#print(X)
#print(y.columns)
category_names = y.columns # This will be used for visualization purpose
return X, y, category_names
def tokenize(text, url_place_holder_string="urlplaceholder"):
"""
Tokenize the text
Arguments:
text -> Text message which needs to be tokenized
Output:
clean_tokens -> List of tokens extracted from the provided text
"""
# Replace all urls with a urlplaceholder string
url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
# Extract all the urls from the provided text
detected_urls = re.findall(url_regex, text)
# Replace url with a url placeholder string
for detected_url in detected_urls:
text = text.replace(detected_url, url_place_holder_string)
# Extract the word tokens from the provided text
tokens = nltk.word_tokenize(text)
#Lemmanitizer to remove inflectional and derivationally related forms of a word
lemmatizer = nltk.WordNetLemmatizer()
# List of clean tokens
clean_tokens = [lemmatizer.lemmatize(w).lower().strip() for w in tokens]
return clean_tokens
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""
Starting Verb Extractor
This class extract the starting verb of a sentence,
creating a new feature for the ML classifier
"""
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
# Given it is a tranformer we can return the self
def fit(self, X, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
def build_model():
"""
Build Pipeline
Output:
A Scikit ML Pipeline that process text messages and apply a classifier.
"""
pipeline = Pipeline([
('features', FeatureUnion([
('text_pipeline', Pipeline([('count_vectorizer', CountVectorizer(tokenizer=tokenize)),
('tfidf_transformer', TfidfTransformer())])),
('starting_verb_transformer', StartingVerbExtractor())])),
('classifier', MultiOutputClassifier(AdaBoostClassifier()))
])
parameters_grid = {'classifier__estimator__learning_rate': [0.01, 0.02, 0.05],
'classifier__estimator__n_estimators': [10, 20, 40]}
cv = GridSearchCV(pipeline, param_grid=parameters_grid, scoring='f1_micro', n_jobs=6)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
"""
Evaluate Model
This function applies a ML pipeline to a test set and
prints out the model performance (accuracy and f1score)
Arguments:
model -> A valid scikit ML model
X_test -> Test features
Y_test -> Test labels
category_names -> label names (multi-output)
"""
Y_pred = model.predict(X_test)
overall_accuracy = (Y_pred == Y_test).mean().mean()
print('Average overall accuracy {0:.2f}%'.format(overall_accuracy*100))
# Print the whole classification report.
Y_pred = pd.DataFrame(Y_pred, columns = Y_test.columns)
for column in Y_test.columns:
print('Model Performance with Category: {}'.format(column))
print(classification_report(Y_test[column],Y_pred[column]))
def save_model(model, model_filepath):
"""
Save Model
This function saves trained model as Pickle file, to be loaded later.
Arguments:
model -> GridSearchCV or Scikit model object
pickle_filepath -> destination path to save .pkl file
"""
pickle.dump(model, open(model_filepath, 'wb'))
def main():
"""
Train Classifier Main function
This function applies the Machine Learning Pipeline:
1) Extract data from SQLite db
2) Train ML model on training set
3) Estimate model performance on test set
4) Save trained model as Pickle
"""
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print("Please provide the arguments correctly: \nSample Script Execution:\n\
> python train_classifier.py ../data/disaster_response_db.db classifier.pkl \n\
Arguments Description: \n\
1) Path to SQLite destination database (e.g. disaster_response_db.db)\n\
2) Path to pickle file name where ML model needs to be saved (e.g. classifier.pkl")
if __name__ == '__main__':
main()
| 33.169421
| 103
| 0.669864
|
b9b92ee32eaf658557ea56c184dafd10aa726d69
| 2,020
|
py
|
Python
|
environments/mujoco/rand_param_envs/gym/envs/mujoco/humanoidstandup.py
|
lfeng1999/varibad
|
840f4bd56ccee96a6c162265d18ec54db8b77a1e
|
[
"MIT"
] | 119
|
2020-02-12T07:06:17.000Z
|
2022-03-24T08:37:34.000Z
|
environments/mujoco/rand_param_envs/gym/envs/mujoco/humanoidstandup.py
|
lfeng1999/varibad
|
840f4bd56ccee96a6c162265d18ec54db8b77a1e
|
[
"MIT"
] | 2
|
2021-01-13T14:58:50.000Z
|
2021-01-13T14:59:40.000Z
|
environments/mujoco/rand_param_envs/gym/envs/mujoco/humanoidstandup.py
|
lfeng1999/varibad
|
840f4bd56ccee96a6c162265d18ec54db8b77a1e
|
[
"MIT"
] | 26
|
2020-04-20T13:10:11.000Z
|
2022-03-22T10:21:10.000Z
|
import numpy as np
from environments.mujoco.rand_param_envs.gym import utils
from environments.mujoco.rand_param_envs.gym.envs.mujoco import mujoco_env
def mass_center(model):
mass = model.body_mass
xpos = model.data.xipos
return (np.sum(mass * xpos, 0) / np.sum(mass))[0]
class HumanoidStandupEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'humanoidstandup.xml', 5)
utils.EzPickle.__init__(self)
def _get_obs(self):
data = self.model.data
return np.concatenate([data.qpos.flat[2:],
data.qvel.flat,
data.cinert.flat,
data.cvel.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat])
def _step(self, a):
self.do_simulation(a, self.frame_skip)
pos_after = self.model.data.qpos[2][0]
data = self.model.data
uph_cost = (pos_after - 0) / self.model.opt.timestep
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = .5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = uph_cost - quad_ctrl_cost - quad_impact_cost + 1
done = bool(False)
return self._get_obs(), reward, done, dict(reward_linup=uph_cost, reward_quadctrl=-quad_ctrl_cost,
reward_impact=-quad_impact_cost)
def reset_model(self):
c = 0.01
self.set_state(
self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq),
self.init_qvel + self.np_random.uniform(low=-c, high=c, size=self.model.nv, )
)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = self.model.stat.extent * 1.0
self.viewer.cam.lookat[2] += .8
self.viewer.cam.elevation = -20
| 36.727273
| 106
| 0.598515
|
cd0805c9ebfe0082e687ce176b12007af506ed0b
| 1,080
|
py
|
Python
|
The ArcGIS GeoInformation Model/self-service-storymapping/__init__.py
|
esrinederland/DevSummit2020
|
09b14929552a6a5297ac7b117ebd5f21cf6eea37
|
[
"MIT"
] | 3
|
2021-04-07T14:26:25.000Z
|
2021-04-15T14:56:12.000Z
|
The ArcGIS GeoInformation Model/self-service-storymapping/__init__.py
|
esrinederland/DevSummit2021
|
09b14929552a6a5297ac7b117ebd5f21cf6eea37
|
[
"MIT"
] | null | null | null |
The ArcGIS GeoInformation Model/self-service-storymapping/__init__.py
|
esrinederland/DevSummit2021
|
09b14929552a6a5297ac7b117ebd5f21cf6eea37
|
[
"MIT"
] | null | null | null |
import logging
from . import SelfServiceStoryMapping2020
import urllib.parse
import os
import azure.functions as func
__version__ = "v202000901.01"
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info(f'{__version__} Python HTTP trigger function processed a request. SelfServiceStoryMapping2020')
try:
bodybytes = req.get_body()
bodystring = bodybytes.decode('utf-8')
# logging.info("bodystring: {}".format(bodystring))
bodyobject = urllib.parse.parse_qs(bodystring)
logging.info("bodyobject: {}".format(bodyobject))
logging.info("agolusername: {}".format(os.environ["AGOL_USERNAME"]))
SelfServiceStoryMapping2020.ParseBody(bodyobject)
return func.HttpResponse(f"This HTTP triggered function executed successfully. {__version__}")
except Exception as ex:
logging.exception("error parsing body")
return func.HttpResponse(
f"This HTTP triggered function executed unsuccessfully. v{__version__} Error: {ex}",
status_code=500
)
| 36
| 112
| 0.696296
|
ab4ed9f13c5433799c63f6dbbe5a8b13f0ccd9b2
| 2,048
|
py
|
Python
|
chatroom-server.py
|
Jacob-Yangman/chatroom-v1
|
a9a70e5cf7373bcea678920c4397cb59d4044805
|
[
"Apache-2.0"
] | null | null | null |
chatroom-server.py
|
Jacob-Yangman/chatroom-v1
|
a9a70e5cf7373bcea678920c4397cb59d4044805
|
[
"Apache-2.0"
] | null | null | null |
chatroom-server.py
|
Jacob-Yangman/chatroom-v1
|
a9a70e5cf7373bcea678920c4397cb59d4044805
|
[
"Apache-2.0"
] | null | null | null |
"""
Author : Jacob
Email : jacob7154@qq.com
Env : python3
Time : 2021-8-15
GroupChat Project
"""
from socket import *
from multiprocessing import Process
# Address of Server
HOST = "0.0.0.0"
PORT = 7500
ADDR = (HOST, PORT)
# A dict stored addresses and usernames of all connecting clients
user = {}
def join_group(sock, address, name):
if name in user:
sock.sendto(b"FAIL", address)
return
# Inform user who has joined in group chat successfully.
sock.sendto(b"OK", address)
# Inform others that someone has joined in.
msg = f"Welcome {name}!"
for person in user:
sock.sendto(msg.encode(), user[person])
# Register client-address of present user in dict_users
user[name] = address
def chat(sock, addr, message):
name = message.split(":", 1)[0]
for u, a in user.items():
if u == name:
continue
sock.sendto(message.encode(), a)
def exit_group(sock, content):
name = content.split(" ", 1)[0]
# delete user info if exists.
if name in user:
del user[name]
for a in user.values():
sock.sendto(content.encode(), a)
def handle(sock):
while True:
request, addr = sock.recvfrom(1024)
info = request.decode().split(" ", 1)
request = info[0]
content = info[1]
if request == "LOGIN":
join_group(sock, addr, content)
elif request == "CHAT":
chat(sock, addr, content)
elif request == "EXIT":
exit_group(sock, content)
# Entry function
def main():
sock = socket(type=SOCK_DGRAM)
sock.bind(ADDR)
# Create a process to receive requests from clients.
p = Process(target=handle, args=(sock,), daemon=True)
p.start()
# To create a group notice anytime you want.
while True:
admin_notice = input("Group Notice:")
# Send group notice by child process.
notice = "CHAT " + "Group Notice:" + admin_notice
sock.sendto(notice.encode(), ADDR)
if __name__ == '__main__':
main()
| 24.380952
| 65
| 0.611816
|
7d8442f5c4e7741024852590d7d30e0672873a02
| 611
|
py
|
Python
|
web_app/services/basilica_service.py
|
jae-finger/twitoff
|
73a42c343dc5fbbe08c4cc470b7705e9ff8bb34c
|
[
"MIT"
] | null | null | null |
web_app/services/basilica_service.py
|
jae-finger/twitoff
|
73a42c343dc5fbbe08c4cc470b7705e9ff8bb34c
|
[
"MIT"
] | 3
|
2021-06-08T21:32:20.000Z
|
2022-03-12T00:32:35.000Z
|
web_app/services/basilica_service.py
|
jae-finger/twitoff
|
73a42c343dc5fbbe08c4cc470b7705e9ff8bb34c
|
[
"MIT"
] | null | null | null |
import basilica
import os
from dotenv import load_dotenv
load_dotenv()
BASILICA_API_KEY = os.getenv("BASILICA_API_KEY")
connection = basilica.Connection(BASILICA_API_KEY)
# print(type(connection)) #> <class 'basilica.Connection'>
# print(embedding)
# a list of 768 numbers?
if __name__ == "__main__":
embedding = connection.embed_sentence("hey this is a cool tweet", model="twitter")
tweets = ["Hello world", "artificial intelligence", "another tweet"]
embeddings = connection.embed_sentences(tweets, model="twitter")
for embed in embeddings:
print("-----")
print(len(embed))
| 30.55
| 86
| 0.720131
|
5ab3d5d9e962da359f04592e15e469609f5ed333
| 1,093
|
py
|
Python
|
consts.py
|
alucebur/snake
|
43df6b47f5f42f99a0365c20f3034c1d83b6c3d9
|
[
"Unlicense"
] | null | null | null |
consts.py
|
alucebur/snake
|
43df6b47f5f42f99a0365c20f3034c1d83b6c3d9
|
[
"Unlicense"
] | null | null | null |
consts.py
|
alucebur/snake
|
43df6b47f5f42f99a0365c20f3034c1d83b6c3d9
|
[
"Unlicense"
] | null | null | null |
"""Constants."""
from typing import Tuple, List, NewType
import pygame
# Size of grid units in pixels
BLOCK = (32, 32)
# Size of sprite images in pixels
SPRITE_BLOCK = (64, 64)
# Types
Point = NewType('Point', Tuple[int, int])
Size = NewType('Size', Tuple[int, int])
SnakeBody = NewType('SnakeBody', List[Tuple[Point, str]])
Color = NewType('Color', Tuple[int, int, int])
# Colors, pygame.Color() is not hashable
WHITE = Color((200, 200, 200))
BLACK = Color((0, 0, 0))
BGCOLOR = Color((40, 40, 40))
SNAKE_COLOR = Color((0, 200, 0))
APPLE_COLOR = Color((200, 0, 0))
# File names
PUN_FILE = "puns.json"
CONFIG_FILE = "settings.json"
OPPOSITE = {'up': "down", 'down': "up", 'left': "right", 'right': "left"}
# Default config
DEFAULT_SETTINGS = {
'sound': 1.0,
'music': 0.8,
'classic': False
}
DEFAULT_KEYMAPPING = {
'direction':
{
pygame.K_UP: "up",
pygame.K_DOWN: "down",
pygame.K_LEFT: "left",
pygame.K_RIGHT: "right"
},
'grid': pygame.K_g,
'pause': pygame.K_p,
'exit': pygame.K_ESCAPE,
'accept': pygame.K_RETURN
}
| 22.306122
| 73
| 0.618481
|
7c4b4818f4acc990e2501eb60c9b2d48e66beb85
| 3,012
|
py
|
Python
|
criterium/models.py
|
andywar65/rpnew_root
|
9281cb16783313a1cd23b1394f2bad485ac1b33d
|
[
"BSD-2-Clause"
] | null | null | null |
criterium/models.py
|
andywar65/rpnew_root
|
9281cb16783313a1cd23b1394f2bad485ac1b33d
|
[
"BSD-2-Clause"
] | null | null | null |
criterium/models.py
|
andywar65/rpnew_root
|
9281cb16783313a1cd23b1394f2bad485ac1b33d
|
[
"BSD-2-Clause"
] | null | null | null |
from datetime import datetime
from django.db import models
from users.models import Member
from pagine.models import Event, Location, generate_unique_slug
from .choices import *
class Race(models.Model):
title = models.CharField('Nome', help_text="Il nome della gara",
max_length = 50)
slug = models.SlugField(max_length=50, editable=False, null=True)
event = models.ForeignKey(Event, on_delete=models.SET_NULL,
blank= True, null=True, verbose_name = 'Evento',
related_name = 'event_race')
date = models.DateField('Data', blank= True, null=True,
help_text="In mancanza di Evento", )
location = models.ForeignKey(Location, on_delete=models.SET_NULL,
blank= True, null=True, verbose_name = 'Luogo',
help_text="In mancanza di Evento", )
type = models.CharField('Tipo', choices = TYPE, max_length = 4,
blank= True, null=True, )
description = models.CharField('Descrizione', blank= True, null=True,
max_length = 500)
def get_date(self):
if self.event:
return self.event.date.date()
return self.date
get_date.short_description = 'Data'
def get_edition(self):
date = self.get_date()
year = date.year
month = date.month
if month >= 11:
return str(year) + '-' + str(year+1)
else:
return str(year-1) + '-' + str(year)
get_edition.short_description = 'Edizione'
def get_path(self):
return '/criterium/' + self.get_edition() + '/' + self.slug
def get_location(self):
if self.event:
return self.event.location
return self.location
get_location.short_description = 'Luogo'
def save(self, *args, **kwargs):
if not self.slug: # create
self.slug = generate_unique_slug(Race, self.title)
if not self.date:
self.date = self.event.date
super(Race, self).save(*args, **kwargs)
def __str__(self):
return self.title
class Meta:
verbose_name = 'Gara'
verbose_name_plural = 'Gare'
ordering = ('-date', )
class AthleteManager(models.Manager):
def get_queryset(self):
return super().get_queryset().order_by('member__last_name',
'member__first_name')
class Athlete(models.Model):
member = models.ForeignKey(Member, on_delete=models.CASCADE,
verbose_name = 'Iscritto', null = True, )
race = models.ForeignKey(Race, on_delete=models.CASCADE,
editable = False, null = True, )
points = models.IntegerField('Punti')
placement = models.IntegerField('Piazzamento assoluto', blank = True,
null = True, )
time = models.TimeField('Tempo', blank = True, null = True, )
objects = AthleteManager()
def __str__(self):
return self.member.get_full_name()
def get_full_name(self):
return self.member.get_full_name()
class Meta:
verbose_name = 'Atleta'
verbose_name_plural = 'Atleti/e'
| 33.098901
| 73
| 0.63579
|
b92247f21be46007102083c5724ebb64905fb15f
| 3,268
|
py
|
Python
|
scripts/providers/akamai/list_edgehostnames.py
|
satroutr/poppy
|
27417f86854d9e0a04726acc263ef0a2ce9f8f6e
|
[
"Apache-2.0"
] | 3
|
2017-07-05T20:09:59.000Z
|
2018-11-27T22:02:57.000Z
|
scripts/providers/akamai/list_edgehostnames.py
|
satroutr/poppy
|
27417f86854d9e0a04726acc263ef0a2ce9f8f6e
|
[
"Apache-2.0"
] | 24
|
2017-04-18T15:14:04.000Z
|
2019-03-20T19:09:07.000Z
|
scripts/providers/akamai/list_edgehostnames.py
|
satroutr/poppy
|
27417f86854d9e0a04726acc263ef0a2ce9f8f6e
|
[
"Apache-2.0"
] | 8
|
2017-04-03T13:24:27.000Z
|
2021-11-08T20:28:10.000Z
|
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ConfigParser
import json
import os
import requests
import sys
from akamai.edgegrid import EdgeGridAuth
def main(args):
if len(args) != 2:
print("usage: python list_edgehostnames.py [env]")
print(
"example : python list_edgehostnames.py [prod|test]")
sys.exit(2)
env = args[1]
config_parser = ConfigParser.RawConfigParser()
config_path = os.path.expanduser('~/.poppy/akamai.conf')
config_parser.read(config_path)
# print("querying akamai api for property hostnames: ")
akamai_request(env, config_parser)
def edge_session(env, config):
s = requests.Session()
s.auth = EdgeGridAuth(
# This is akamai credential
client_token=config.get(env, 'client_token'),
client_secret=config.get(env, 'client_secret'),
access_token=config.get(env, 'access_token'))
return s
def akamai_request(env, config):
base_url = config.get(env, 'base_url')
group_id = config.get(env, 'group_id')
contract_id = config.get(env, 'contract_id')
policy_num = config.get(env, 'policy_number')
# get the latest version number
version_url = (
'{0}papi/v1/properties/prp_{1}/versions/' +
'?contractId=ctr_{2}&groupId=grp_{3}')
version_url = version_url.format(
base_url,
policy_num,
contract_id,
group_id
)
# print("Querying: ", version_url)
s = edge_session(env, config)
response = s.get(version_url,
headers={
'Content-type': 'application/json'
})
version_dict = response.json()
version_num = 1
for item in version_dict['versions']['items']:
if item['productionStatus'] == 'ACTIVE':
version_num = item['propertyVersion']
break
# get the hostname information
policy_url = (
'{0}papi/v1/properties/prp_{1}/versions/{4}/hostnames/' +
'?contractId=ctr_{2}&groupId=grp_{3}')
policy_url = policy_url.format(
base_url,
policy_num,
contract_id,
group_id,
version_num
)
# print("Querying: ", policy_url)
s = edge_session(env, config)
response = s.get(policy_url,
headers={
'Content-type': 'application/json'
})
resp_dict = response.json()
# print resp_dict
domains_dict = {}
for item in resp_dict['hostnames']['items']:
domains_dict.setdefault(
item['cnameTo'], list()).append(item['cnameFrom'])
print(json.dumps(domains_dict, indent=4, sort_keys=True))
if __name__ == '__main__':
main(sys.argv)
| 28.920354
| 69
| 0.632497
|
dff98cb14f0e04669675bc340e6e2eaab99e10b1
| 1,666
|
py
|
Python
|
app.py
|
sadilchamishka/EMOSENSE
|
e2cb389d095dfa9cd385354c178593ad5e42bee5
|
[
"MIT"
] | null | null | null |
app.py
|
sadilchamishka/EMOSENSE
|
e2cb389d095dfa9cd385354c178593ad5e42bee5
|
[
"MIT"
] | null | null | null |
app.py
|
sadilchamishka/EMOSENSE
|
e2cb389d095dfa9cd385354c178593ad5e42bee5
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, jsonify
from UtterenceModel import predictUtterence
from DeepLearntFeatures import featureMean,feature20BinMeans
from ConversationModel import predictConversationOffline, predictConversationOnline
from flask_cors import CORS
app = Flask(__name__)
cors = CORS(app)
utterence_folder = './utterences/'
@app.route("/")
def home():
return "success"
@app.route("/utterence",methods = ['POST'])
def utterenceEmotionPrediction():
file = request.files['audio']
utterence_path = utterence_folder+'utt.wav'
file.save(utterence_path)
prediction = predictUtterence(utterence_path)
return jsonify({'prediction': prediction[0]})
@app.route("/conversation/offline",methods = ['POST'])
def conversationEmotionPredictionOffline():
files = request.files
data = request.args['speakers']
prediction,attention_f,attention_b = predictConversationOffline(files,data)
emotion_predictions = []
i=0
for p in prediction.tolist():
temp = {}
temp['timestep'] = i
temp['Happy'] = p[0]
temp['Sad'] = p[1]
temp['Neutral'] = p[2]
temp['Angry'] = p[3]
temp['Excited'] = p[4]
temp['Frustrated'] = p[5]
emotion_predictions.append(temp)
i+=1
#return jsonify({'prediction': prediction.tolist(), 'attentionf':attention_f, 'attentionb':attention_b})
return jsonify({'prediction':emotion_predictions})
@app.route("/conversation/online",methods = ['POST'])
def conversationEmotionPredictionOnline():
files = request.files
data = request.args['speakers']
prediction = predictConversationOnline(files,data)
print(prediction)
return "success"
if __name__ == "__main__":
app.run(host='0.0.0.0')
| 29.22807
| 106
| 0.728091
|
53c062b0958b140fe91790404f1953f991d107d2
| 981
|
py
|
Python
|
app/models/user.py
|
OmarGC/testing-fastapi
|
d0fb8595da229f040d7884f3a171b527a27d686d
|
[
"Apache-2.0"
] | null | null | null |
app/models/user.py
|
OmarGC/testing-fastapi
|
d0fb8595da229f040d7884f3a171b527a27d686d
|
[
"Apache-2.0"
] | null | null | null |
app/models/user.py
|
OmarGC/testing-fastapi
|
d0fb8595da229f040d7884f3a171b527a27d686d
|
[
"Apache-2.0"
] | null | null | null |
#Third party libraries
from sqlalchemy.sql.schema import Column, Table
from sqlalchemy.sql.sqltypes import Boolean, Integer, String, DateTime, Date
from sqlalchemy.sql.functions import func
#Own's Libraries
from app.config.connection import db
user = Table(
"USUARIO",
db.meta,
Column( "id_usuario", Integer, primary_key=True, nullable=False ),
Column( "created_at", DateTime(timezone=True), server_default=func.now(), nullable=False ),
Column( "updated_at", DateTime(timezone=True), onupdate=func.now(), nullable=True ),
Column( "nombre_usuario", String(120), nullable=False ),
Column( "apellido_paterno_usuario", String(120), nullable=True ),
Column( "apellido_materno_usuario", String(120), nullable=True ),
Column( "email_usuario", String(150), nullable=False, unique=True ),
Column( "telefono_usuario", String(20), nullable=False, unique=True ),
Column( "activo_usuario", Boolean, nullable=False )
)
db.meta.create_all(db.engine)
| 42.652174
| 95
| 0.733945
|
b6b3d963ecebcda7f0a98ba1b110ff733a1f6cd9
| 4,400
|
py
|
Python
|
pif_ingestor/core.py
|
CitrineInformatics/pif-ingestor
|
7512ad2cf86ba8bee8cf18dc7181d5fc9977f290
|
[
"Apache-2.0"
] | 1
|
2018-01-04T18:17:36.000Z
|
2018-01-04T18:17:36.000Z
|
pif_ingestor/core.py
|
CitrineInformatics/pif-ingestor
|
7512ad2cf86ba8bee8cf18dc7181d5fc9977f290
|
[
"Apache-2.0"
] | 11
|
2017-06-23T17:41:43.000Z
|
2018-10-26T18:34:04.000Z
|
pif_ingestor/core.py
|
CitrineInformatics/pif-ingestor
|
7512ad2cf86ba8bee8cf18dc7181d5fc9977f290
|
[
"Apache-2.0"
] | 4
|
2017-09-14T12:18:09.000Z
|
2018-04-24T19:05:02.000Z
|
from .ui import get_cli
from .manager import IngesterManager
from .enrichment import add_tags, add_license, add_contact
from .uploader import upload
from .packager import create_package
from .globus import push_to_globus
import os.path
from os import walk, listdir
from pypif import pif
from pypif.obj.system import System
import json
import logging
import types
from .ext.matmeta_wrapper import add_metadata
from pypif_sdk.func import replace_by_key
def _stream_write(fname, pifs_iterable):
with open(fname, "w") as f:
f.write("[\n")
first = True
for p in pifs_iterable:
if not first:
f.write(",\n")
first = False
pif.dump(p, f)
f.write("\n]")
def _handle_pif(path, ingest_name, convert_args, enrich_args, metadata, ingest_manager, path_replace):
"""Ingest and enrich pifs from a path, returning affected paths"""
# Run an ingest extension
if ingest_name == "auto":
pifs = ingest_manager.run_extensions([path], convert_args)
elif ingest_name == "merge":
pifs = ingest_manager.run_extensions([path], convert_args, merge=True)
else:
pifs = ingest_manager.run_extension(ingest_name, path, convert_args)
if isinstance(pifs, System):
pifs = [pifs]
if len(metadata) > 0:
pifs = [add_metadata(x, metadata) for x in pifs]
if len(path_replace) > 0:
pifs = [replace_by_key(x, "relative_path", path_replace, new_key="url", remove=False) for x in pifs]
# Perform enrichment
add_tags(pifs, enrich_args['tags'])
add_license(pifs, enrich_args['license'])
add_contact(pifs, enrich_args['contact'])
# Write the pif
if os.path.isfile(path):
pif_name = "{}_{}".format(path, "pif.json")
res = [path, pif_name]
else:
pif_name = os.path.join(path, "pif.json")
res = [path]
_stream_write(pif_name, pifs)
logging.info("Created pif at {}".format(pif_name))
return res
def _enumerate_files(path, recursive):
if os.path.isfile(path):
return [path]
if os.path.isdir(path) and not recursive:
return [x for x in listdir(path) if os.path.isfile(x)]
res = []
for root, dirs, files in walk(path):
res.extend(os.path.join(root, x) for x in files)
return res
def main(args):
"""Main driver for pif-ingestor"""
enrichment_args = {
'tags': args.tags,
'license': args.license,
'contact': args.contact
}
# Load the ingest extensions
ingest_manager = IngesterManager()
path_replace = {}
if args.globus_collection:
globus_remap = push_to_globus(_enumerate_files(args.path, args.recursive), collection=args.globus_collection)
path_replace = {k: v["http_url"] for k, v in globus_remap.items() if "http_url" in v}
metadata = {}
if args.meta:
with open(args.meta, "r") as f:
metadata = json.load(f)
all_files = []
exceptions = {}
if args.recursive:
for root, dirs, files in walk(args.path):
try:
new = _handle_pif(root, args.format, args.converter_arguments, enrichment_args, metadata, ingest_manager, path_replace)
all_files.extend(new)
except Exception as err:
exceptions[root] = err
else:
all_files.extend(_handle_pif(args.path, args.format, args.converter_arguments, enrichment_args, metadata, ingest_manager, path_replace))
if len(all_files) == 0 and len(exceptions) > 0:
raise ValueError("Unable to parse any subdirectories. Exceptions:\n{}".format(
"\n".join(["{}: {}".format(k, str(v)) for k, v in exceptions.items()]))
)
with open("ingestor.log", "w") as f:
f.write("Exceptions:\n")
for root, err in exceptions.items():
f.write("{}: {}\n".format(root, str(err)))
# Upload the pif and associated files
if args.dataset:
upload(all_files, args.dataset)
if args.zip:
if args.zip[-4:] == ".zip":
zipname = args.zip
else:
zipname = args.zip + ".zip"
create_package(all_files, zipname, format="zip")
if args.tar:
if args.tar[-4:] == ".tar":
tarname = args.tar
else:
tarname = args.tar + ".tar"
create_package(all_files, tarname, format="tar")
| 31.428571
| 144
| 0.626136
|
4d2ba47355e11ed59dbcdea8e4be78ae58de329b
| 14,318
|
py
|
Python
|
Packs/Infinipoint/Integrations/Infinipoint/Infinipoint.py
|
coralogix/content
|
bba0fb1fdaf44a09efafe268ff610215cb698977
|
[
"MIT"
] | null | null | null |
Packs/Infinipoint/Integrations/Infinipoint/Infinipoint.py
|
coralogix/content
|
bba0fb1fdaf44a09efafe268ff610215cb698977
|
[
"MIT"
] | 1
|
2020-07-29T21:48:58.000Z
|
2020-07-29T21:48:58.000Z
|
Packs/Infinipoint/Integrations/Infinipoint/Infinipoint.py
|
coralogix/content
|
bba0fb1fdaf44a09efafe268ff610215cb698977
|
[
"MIT"
] | null | null | null |
from CommonServerPython import *
from typing import Any, Dict, List, Optional, cast
''' IMPORTS '''
import jwt
import math
import dateparser
from datetime import timezone
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL VARS '''
BASE_URL = "https://console.infinipoint.io"
MAX_INCIDENTS_TO_FETCH = 1000
COMMANDS_CONFIG = {
"infinipoint-get-assets-programs": {
"args": {
"name": "contains",
"device_risk": "contains",
"publisher": "contains",
"version": "contains"
},
"route": "/api/assets/programs",
"outputs_prefix": "Infinipoint.Assets.Programs",
"outputs_key_field": "name"
},
"infinipoint-get-vulnerable-devices": {
"args": {
"device_os": "=",
"device_risk": ">="
},
"route": "/api/vulnerability/devices",
"outputs_prefix": "Infinipoint.Vulnerability.Devices",
"outputs_key_field": "$host"
},
"infinipoint-get-cve": {
"args": {
"cve_id": "="
},
"route": "/api/vulnerability/{cve_id}/details",
"outputs_prefix": "Infinipoint.Cve.Details",
"outputs_key_field": "ReportID",
"pagination": False,
"get_req": True
},
"infinipoint-get-device": {
"args": {
"host": "contains",
"osType": "=",
"osName": "contains",
"status": "=",
"agentVersion": "="
},
"route": "/api/devices",
"outputs_prefix": "Infinipoint.Devices",
"outputs_key_field": "osName"
},
"infinipoint-get-tag": {
"args": {
"name": "contains"
},
"route": "/api/tags",
"outputs_prefix": "Infinipoint.Tags",
"outputs_key_field": "tagId"
},
"infinipoint-get-networks": {
"args": {
"alias": "=",
"cidr": "="
},
"route": "/api/networks",
"outputs_prefix": "Infinipoint.Networks.Info",
"outputs_key_field": "alias"
},
"infinipoint-get-assets-hardware": {
"args": {
"host": "contains",
"os_type": "contains"
},
"route": "/api/assets/hardware",
"outputs_prefix": "Infinipoint.Assets.Hardware",
"outputs_key_field": "$host"
},
"infinipoint-get-assets-cloud": {
"args": {
"host": "contains",
"os_type": "contains",
"source": "contains"
},
"route": "/api/assets/cloud",
"outputs_prefix": "Infinipoint.Assets.Cloud",
"outputs_key_field": "$host"
},
"infinipoint-get-assets-users": {
"args": {
"host": "contains",
"username": "contains"
},
"route": "/api/assets/users",
"outputs_prefix": "Infinipoint.Assets.User",
"outputs_key_field": "$host"
},
"infinipoint-get-queries": {
"args": {
"name": "contains"
},
"route": "/api/all-scripts/search",
"outputs_prefix": "Infinipoint.Scripts.Search",
"outputs_key_field": "actionId"
},
"infinipoint-run-queries": {
"args": {
"id": "contains",
"target": "contains"
},
"route": "/api/all-scripts/execute",
"outputs_prefix": "Infinipoint.Scripts.execute",
"outputs_key_field": "actionId",
"pagination": False,
"pass_args": True
},
"infinipoint-get-events": {
"args": {
"offset": "contains",
"limit": "contains"
},
"route": "/api/demisto/events",
"outputs_prefix": "Infinipoint.Compliance.Incidents",
"outputs_key_field": "deviceID",
"pagination": False,
"pass_args": True
},
"infinipoint-get-device-details": {
"args": {
"discoveryId": "contains"
},
"route": "/api/discover/details/{discoveryId}",
"outputs_prefix": "Infinipoint.Device.Details",
"outputs_key_field": "$device",
"pagination": False,
"get_req": True,
},
"infinipoint-get-action": {
"args": {
"action_id": "contains"
},
"route": "/api/responses/{action_id}",
"outputs_prefix": "Infinipoint.Responses",
"outputs_key_field": "$host",
"format_route": True
},
"infinipoint-get-compliance-status": {
"args": {
"device_id": "="
},
"route": "/api/compliance/device/{device_id}",
"outputs_prefix": "Infinipoint.Compliance.Device",
"outputs_key_field": "success",
"pagination": False,
"get_req": True
}
}
class Client(BaseClient):
def call_command(self, url_suffix: str, args: Dict[str, Any], pagination=True, page_index=0, method='POST') \
-> Dict[str, Any]:
"""
function to send a request to Infinipoint's API.
"""
if args and pagination:
args['page'] = page_index
return self._http_request(
method=method,
url_suffix=url_suffix,
json_data=args
)
def call_api(self, route: str, rules, pagination=True, condition='AND', method='POST'):
"""
loop pagination in case the total items count is bigger that page size (100)
"""
if not pagination:
res = self.call_command(route, rules, pagination=pagination, method=method)
return res
else:
query = {
'pageSize': 100,
'page': 0,
'ruleSet': {
'condition': condition,
'rules': rules
}
}
results: List[Dict[str, Any]] = []
res = self.call_command(route, query, method=method)
results = results + res['items']
for i in range(1, math.ceil(res['itemsTotal'] / 100)):
res = self.call_command(route, query, page_index=i, method=method)
results = results + res['items']
return results
'''HELPER FUNCTIONS'''
def get_auth_headers(access_key, private_key):
"""
function to sign a jwt token with a jwt secret.
output: request headers with a signed token
"""
try:
payload = {
"iat": int(time.time()),
"sub": access_key
}
token = jwt.encode(payload, private_key.replace('\\n', '\n'), 'ES256').decode("utf-8")
return {"Content-Type": "application/json",
"Authorization": f"Bearer {token}"}
except Exception as e:
return_error(f"Error while signing JWT token - check your private/access keys!\nError message:\n{e}")
def arg_to_timestamp(arg: Any, arg_name: str, required: bool = False) -> Optional[int]:
if arg is None:
if required is True:
raise ValueError(f'Missing "{arg_name}"')
return None
if isinstance(arg, str) and arg.isdigit():
return int(arg)
if isinstance(arg, str):
date = dateparser.parse(arg, settings={'TIMEZONE': 'UTC', 'RETURN_AS_TIMEZONE_AWARE': True})
if date is None:
# if d is None it means dateparser failed to parse it
raise ValueError(f'Invalid date: {arg_name}')
return int(date.timestamp())
if isinstance(arg, (int, float)):
# Convert to int if the input is a float
return int(arg)
raise ValueError(f'Invalid date: "{arg_name}"')
def arg_to_int(arg: Any, arg_name: str, required: bool = False) -> Optional[int]:
if arg is None:
if required is True:
raise ValueError(f'Missing "{arg_name}"')
return None
if isinstance(arg, str):
if arg.isdigit():
return int(arg)
raise ValueError(f'Invalid number: "{arg_name}"="{arg}"')
if isinstance(arg, int):
return arg
raise ValueError(f'Invalid number: "{arg_name}"')
def fetch_incidents(client, last_run: Dict[str, int], first_fetch_time: Optional[int]):
max_results = arg_to_int(
arg=demisto.params().get('max_fetch'),
arg_name='max_fetch',
required=False
)
if not max_results or max_results > MAX_INCIDENTS_TO_FETCH:
max_results = MAX_INCIDENTS_TO_FETCH
last_fetch = last_run.get('last_fetch', None)
subscription = demisto.params().get('incident_type', ["event", "alert"])
if last_fetch is None:
last_fetch = first_fetch_time
else:
last_fetch = int(last_fetch)
latest_created_time = cast(int, last_fetch)
incidents: List[Dict[str, Any]] = []
args = {
'limit': max_results,
'offset': last_fetch
}
alerts = infinipoint_command(client, args, COMMANDS_CONFIG['infinipoint-get-events'])
if alerts:
for alert in alerts.outputs:
if alert.get("subscription") in subscription:
incident_created_epoch_time = int(alert.get('timestamp', '0'))
incident_created_time = datetime.fromtimestamp(int(alert.get('timestamp', '0')), timezone.utc)
incident = {
'name': f'Infinipoint {alert.get("name")}',
'type': f'Infinipoint {alert.get("type")}',
'occurred': incident_created_time.isoformat(),
'rawJSON': json.dumps(alert.get('rawJSON'))
}
incidents.append(incident)
if incident_created_epoch_time > latest_created_time:
latest_created_time = incident_created_epoch_time
next_run = {'last_fetch': latest_created_time}
demisto.setLastRun(next_run)
demisto.incidents(incidents)
'''MAIN FUNCTIONS'''
def test_module(route, base_url, insecure, headers):
"""Tests API connectivity and authentication'
Returning '200' indicates that the integration works like it is supposed to.
Connection to the service is successful.
"""
res = requests.request(
"POST",
base_url + route,
headers=headers,
verify=insecure
)
res.raise_for_status()
def infinipoint_command(client: Client, args=None, optional_args=None, pagination=True):
rules = None
cve = []
method = "POST"
# Cancel pagination if necessary
if "pagination" in optional_args:
pagination = optional_args['pagination']
# Pass arguments as is
if "pass_args" in optional_args:
rules = args
# Move request type to GET
elif "get_req" in optional_args:
optional_args['route'] = optional_args['route'].format(**args)
method = "GET"
# Change url - Post request
elif "format_route" in optional_args:
optional_args['route'] = optional_args['route'].format(**args)
else:
rules = []
for k, v in optional_args['args'].items():
if args.get(k):
rules.append({'field': k, "operator": v, "value": f"{args[k]}"})
res = client.call_api(optional_args['route'], rules, pagination=pagination, method=method)
if res:
for node in res:
# Handle time format - convert to ISO from epoch
if '$time' in node and isinstance(node['$time'], int):
created_time = datetime.fromtimestamp(int(node.get('$time', '0')), timezone.utc)
node['$time'] = created_time.isoformat()
# CVE reputation
if "cve_id" in res:
cve = [Common.CVE(
id=res['cve_id'],
cvss=res['cve_dynamic_data']['base_metric_v2']['base_score'],
description=res['cve_description'],
published='',
modified=''
)]
return CommandResults(outputs_prefix=optional_args['outputs_prefix'],
outputs_key_field=optional_args['outputs_key_field'],
outputs=res,
indicators=cve)
def run_queries_command(client: Client, args: Dict, optional_args=None):
target = args.get('target')
node = {'id': args.get('id')}
if target:
node['target'] = {'ids': args.get('target')}
res = client.call_api(route=optional_args['route'], rules=node, pagination=False)
if res:
command_results = CommandResults(
outputs_prefix=optional_args['outputs_prefix'],
outputs_key_field=optional_args['outputs_key_field'],
outputs=res)
return command_results
''' EXECUTION '''
def main():
verify_ssl = not demisto.params().get('insecure', False)
access_key = demisto.params().get('access_key')
private_key = demisto.params().get('private_key')
first_fetch_time = arg_to_timestamp(arg=demisto.params().get('first_fetch', '3 days'),
arg_name='First fetch time', required=True)
proxy = demisto.params().get('proxy', False)
demisto.info(f'command is {demisto.command()}')
try:
headers = get_auth_headers(access_key, private_key)
client = Client(
base_url=BASE_URL,
verify=verify_ssl,
headers=headers,
proxy=proxy)
if demisto.command() == 'test-module':
test_module("/api/auth/health/", BASE_URL, verify_ssl, headers)
demisto.results('ok')
elif demisto.command() == 'fetch-incidents':
fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time)
elif demisto.command() == "infinipoint-run-queries":
return_results(run_queries_command(client=client, args=demisto.args(),
optional_args=COMMANDS_CONFIG["infinipoint-run-queries"]))
elif demisto.command() in COMMANDS_CONFIG:
return_results(infinipoint_command(client=client, args=demisto.args(),
optional_args=COMMANDS_CONFIG[demisto.command()]))
except Exception as e:
err_msg = f'Error - Infinipoint Integration [{e}]'
return_error(err_msg, error=e)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 30.725322
| 113
| 0.563766
|
b80099a1c0ca87184e7dc440bc850de2421228cb
| 4,613
|
py
|
Python
|
data_generator_oct.py
|
mlaves/3doct-pose-dataset
|
ec067b95f5cb94f5f962ed79e952f0381e8aa2ae
|
[
"MIT"
] | 1
|
2020-07-06T13:36:05.000Z
|
2020-07-06T13:36:05.000Z
|
data_generator_oct.py
|
mlaves/3doct-pose-dataset
|
ec067b95f5cb94f5f962ed79e952f0381e8aa2ae
|
[
"MIT"
] | 1
|
2020-08-10T08:48:27.000Z
|
2020-08-10T09:24:04.000Z
|
data_generator_oct.py
|
mlaves/3doct-pose-dataset
|
ec067b95f5cb94f5f962ed79e952f0381e8aa2ae
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from skimage import io
import PIL
from torchvision import transforms
from scipy.ndimage.interpolation import zoom
from tqdm import tqdm
from glob import glob
class OCTDataset(Dataset):
"""
Loads the OCT dataset.
"""
def __init__(self, data_dir, resize_to=(256, 256), augment=False, preload=False, preloaded_data_from=None):
"""
Given the root directory of the dataset, this function initializes the data set
:param data_dir: List with paths of raw images
"""
self._resize_to = resize_to
self._data_dir = data_dir
self._augment = augment
self._preload = preload
# max values of output for normalization
self._max_vals = np.array([0.999612, 0.999535, 0.599804, 5.99884, 5.998696, 7.998165])
if not preloaded_data_from:
self._img_file_names = sorted(glob(data_dir + "/*.npz"))
self._imgs = [] # list of PILs or empty
self._labels = [] # list of normalized x,y pixel coordinates of tool base
if self._preload:
for fname in tqdm(self._img_file_names):
img, label = self._load_npz(fname)
img = self._argmax_project(img)
img = self._to_pil_and_resize(img, self._resize_to)
self._imgs.append(img)
self._labels.append(label/self._max_vals)
else:
if preloaded_data_from:
self._labels = preloaded_data_from._labels
self._imgs = preloaded_data_from._imgs
self._img_file_names = preloaded_data_from._img_file_names
self._resize_to = preloaded_data_from._resize_to
self._preload = True
@staticmethod
def _to_pil_and_resize(x, new_size):
trans_always1 = [
transforms.ToPILImage(),
transforms.Resize(new_size, interpolation=1),
]
trans = transforms.Compose(trans_always1)
x = trans(x)
return x
@staticmethod
def _argmax_project(x):
y = [np.argmax(x, axis=0), np.argmax(x, axis=1), np.argmax(x, axis=2)]
return np.stack(y, axis=-1).astype(np.uint8)
@staticmethod
def _load_npz(file_name, rescale=True):
f = np.load(file_name)
img = f['data']
pos = f['pos']
img = img[8:] # crop top 8 rows due to reflection artifacts
min_shape = np.min(img.shape)
if rescale:
img = zoom(img,
zoom=(min_shape / img.shape[0],
min_shape / img.shape[1],
min_shape / img.shape[2]),
order=0)
img = img.transpose(2, 0, 1) # permute data as it is in FORTRAN order
return img, pos
def __len__(self):
return len(self._img_file_names)
def __getitem__(self, idx):
if self._preload:
x = self._imgs[idx]
y = np.array(self._labels[idx], dtype=np.float32)
else:
x, label = self._load_npz(self._img_file_names[idx])
label = label/self._max_vals
x = self._argmax_project(x)
x = self._to_pil_and_resize(x, self._resize_to)
y = np.array(label, dtype=np.float32)
trans_augment = []
if self._augment:
trans_augment.append(transforms.RandomApply([transforms.ColorJitter(brightness=0.2, contrast=0.2,
saturation=0.2, hue=0.1)], p=0.5))
trans_always2 = [
transforms.ToTensor(),
]
trans = transforms.Compose(trans_augment + trans_always2)
x = trans(x)
return x, y
def demo():
from matplotlib import pyplot as plt
dataset_train = OCTDataset(data_dir='/media/data/oct_data_needle/data',
augment=False, preload=False)
data_loader_train = DataLoader(dataset_train, batch_size=1, shuffle=True)
print("Train dataset length:", len(data_loader_train))
for i_batch, b in enumerate(data_loader_train):
x, y = b
print(i_batch, y)
fig, ax = plt.subplots(3, 1)
ax[0].imshow(x.data.cpu().numpy()[0, 0])
ax[1].imshow(x.data.cpu().numpy()[0, 1])
ax[2].imshow(x.data.cpu().numpy()[0, 2])
fig.show()
ret = plt.waitforbuttonpress(0.0)
if ret:
break
plt.close()
if __name__ == "__main__":
demo()
| 31.59589
| 114
| 0.574463
|
8133f35a5f11c125c79edc8e8d9a3b3a5b4bd85f
| 2,783
|
py
|
Python
|
mkt/submit/serializers.py
|
spasovski/zamboni
|
c7f4714029e3b2dc918ddfc2103f8e051193c14d
|
[
"BSD-3-Clause"
] | 1
|
2017-07-14T19:22:39.000Z
|
2017-07-14T19:22:39.000Z
|
mkt/submit/serializers.py
|
imclab/olympia
|
35bc9c484e384bafab520ca8b5d5b0f8da5b62c0
|
[
"BSD-3-Clause"
] | 6
|
2021-02-02T23:08:48.000Z
|
2021-09-08T02:47:17.000Z
|
mkt/submit/serializers.py
|
imclab/olympia
|
35bc9c484e384bafab520ca8b5d5b0f8da5b62c0
|
[
"BSD-3-Clause"
] | null | null | null |
import json
from django.core.urlresolvers import reverse
from rest_framework import serializers
import amo
from addons.models import Preview
from files.models import FileUpload
from mkt.api.fields import ReverseChoiceField
from mkt.webapps.models import Webapp
class AppStatusSerializer(serializers.ModelSerializer):
status = ReverseChoiceField(choices_dict=amo.STATUS_CHOICES_API,
required=False)
disabled_by_user = serializers.BooleanField(required=False)
allowed_statuses = {
# You can push to the pending queue.
amo.STATUS_NULL: amo.STATUS_PENDING,
# You can push to public if you've been reviewed.
amo.STATUS_PUBLIC_WAITING: amo.STATUS_PUBLIC,
}
class Meta:
model = Webapp
fields = ('status', 'disabled_by_user')
def validate_status(self, attrs, source):
if not self.object:
raise serializers.ValidationError(u'Error getting app.')
if not source in attrs:
return attrs
# An incomplete app's status can not be changed.
if not self.object.is_fully_complete():
raise serializers.ValidationError(
self.object.completion_error_msgs())
# Only some specific changes are possible depending on the app current
# status.
if (self.object.status not in self.allowed_statuses or
attrs[source] != self.allowed_statuses[self.object.status]):
raise serializers.ValidationError(
'App status can not be changed to the one you specified.')
return attrs
class FileUploadSerializer(serializers.ModelSerializer):
id = serializers.CharField(source='pk', read_only=True)
processed = serializers.BooleanField(read_only=True)
class Meta:
model = FileUpload
fields = ('id', 'processed', 'valid', 'validation')
def transform_validation(self, obj, value):
return json.loads(value) if value else value
class PreviewSerializer(serializers.ModelSerializer):
filetype = serializers.CharField()
id = serializers.IntegerField(source='pk')
image_url = serializers.CharField(read_only=True)
resource_uri = serializers.SerializerMethodField('get_resource_uri')
thumbnail_url = serializers.CharField(read_only=True)
class Meta:
model = Preview
fields = ['filetype', 'image_url', 'id', 'resource_uri',
'thumbnail_url']
def get_resource_uri(self, request):
if self.object is None:
return None
return reverse('app-preview-detail', kwargs={'pk': self.object.pk})
class SimplePreviewSerializer(PreviewSerializer):
class Meta(PreviewSerializer.Meta):
fields = ['image_url', 'thumbnail_url']
| 32.741176
| 78
| 0.681639
|
b962117885f14bf861751484f2830798a20cd9f0
| 3,914
|
py
|
Python
|
scripts/check_ifndefs.py
|
mohankumarSriram/katana
|
77c8a4bfba9f5808d63e1e936fc4b70c4830429d
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/check_ifndefs.py
|
mohankumarSriram/katana
|
77c8a4bfba9f5808d63e1e936fc4b70c4830429d
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/check_ifndefs.py
|
mohankumarSriram/katana
|
77c8a4bfba9f5808d63e1e936fc4b70c4830429d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# check_ifndefs.py [-fix] <file or directory>...
#
# Check and optionally fix ifndef guards in files.
#
# The standard define pattern is:
#
# KATANA_<PATH_WITH_UNDERSCORES>_H_
#
# where path is the path to the header file without its extension and with
# the components "include", "src", "test", "tests" removed.
from __future__ import print_function
import argparse
import os
import re
import sys
import tempfile
import shutil
guard_pattern = re.compile(
r"""^\#ifndef \s* (.*)$ \n
^\#define \s* (.*)$""",
re.MULTILINE | re.VERBOSE,
)
def no_ext(path):
last_sep = path.rfind(os.path.sep)
if last_sep < 0:
return path
# Plus one for after the separator. Plus one again to discount filenames
# that are all extension, i.e., dotfiles.
first_dot = path.find(".", last_sep + 1 + 1)
if first_dot < 0:
return path
return path[:first_dot]
def make_guard(root, filename):
p = os.path.relpath(filename, root)
# We can't use os.path.splitexp directly because files may have multiple
# extensions (e.g., config.h.in).
p = no_ext(p)
p = p.upper()
p = p.replace("/INCLUDE/", "/", 1)
p = p.replace("/SRC/", "/", 1)
p = p.replace("/TESTS/", "/", 1)
p = p.replace("/TEST/", "/", 1)
# Just in case, remove characters that can't be part of macros
p = re.sub("[+\-*%=<>?~&\^|#:;{}.[\]]","", p)
# Differentiate between snake_case file names and directories
p = p.replace("_", "", -1)
p = p.replace("/", "_")
return "KATANA_{p}_H_".format(p=p)
def run_check(root, filename):
with open(filename, "r") as f:
contents = f.read()
m = guard_pattern.search(contents)
if not m:
return False
g1 = m.group(1)
g2 = m.group(2)
expected = make_guard(root, filename)
# Python2 is still kicking in some of our build environments. Minimize
# the difference between Python3.6 f-strings and Python2 string format.
d = {
"g1": g1,
"g2": g2,
"filename": filename,
"expected": expected,
}
if g1 != g2:
print("{filename}: ifndef {g1} not equal define {g2}".format(**d), file=sys.stderr)
return True
if g1 != expected:
print("{filename}: expected {expected} but found {g1}".format(**d), file=sys.stderr)
return True
return False
def run_fix(root, filename):
with open(filename, "r") as f:
contents = f.read()
expected = make_guard(root, filename)
replacement = "#ifndef {expected}\n#define {expected}".format(expected=expected)
contents, num_subs = guard_pattern.subn(replacement, contents, count=1)
if num_subs == 0:
return False
with tempfile.NamedTemporaryFile(mode="w", delete=False) as f:
f.write(contents)
shutil.move(f.name, filename)
return False
def main(files, root, fix):
if fix:
process = run_fix
else:
process = run_check
any_errors = False
for name in files:
if not os.path.isdir(name):
any_errors = process(root, name) or any_errors
for dir, _, subfiles in os.walk(name):
for subname in [os.path.join(dir, s) for s in subfiles]:
any_errors = process(root, subname) or any_errors
if any_errors:
return 1
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="check or fix ifndef guards in files")
parser.add_argument("files", nargs="+", help="files or directories to examine")
parser.add_argument("--root", help="root directory to determine import name", required=True)
parser.add_argument("-fix", help="fix files instead of checking them", action="store_true", default=False)
args = parser.parse_args()
sys.exit(main(**vars(args)))
| 30.107692
| 110
| 0.60884
|
8e1acfb9885e0d7c1d3e6d514027d8743057daf3
| 655
|
py
|
Python
|
thippiproject/modelapp/views.py
|
Anandgowda18/djangocomplete
|
c9eebe7834e404c73deca295289142a1e95ab573
|
[
"Apache-2.0"
] | null | null | null |
thippiproject/modelapp/views.py
|
Anandgowda18/djangocomplete
|
c9eebe7834e404c73deca295289142a1e95ab573
|
[
"Apache-2.0"
] | null | null | null |
thippiproject/modelapp/views.py
|
Anandgowda18/djangocomplete
|
c9eebe7834e404c73deca295289142a1e95ab573
|
[
"Apache-2.0"
] | 1
|
2021-08-31T10:20:49.000Z
|
2021-08-31T10:20:49.000Z
|
from django.shortcuts import render
from modelapp.forms import ProjectForm
from modelapp.models import Project
# Create your views here.
def index(request):
return render(request,'modelapp/index.html')
def listproject(request):
projectlist = Project.objects.all()
return render(request,'modelapp/listproject.html',{'project':projectlist})
def addproject(request):
form = ProjectForm()
if request.method == 'POST':
form = ProjectForm(request.POST)
if form.is_valid():
form.save()
return index(request)
print("nothing Saved")
return render(request,'modelapp/addproject.html',{'form':form})
| 31.190476
| 78
| 0.703817
|
29de8b74c57468ab33da4892d3062b4d16315555
| 473
|
py
|
Python
|
packages/python/plotly/plotly/validators/scatter/marker/line/_cmin.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/scatter/marker/line/_cmin.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/validators/scatter/marker/line/_cmin.py
|
mastermind88/plotly.py
|
efa70710df1af22958e1be080e105130042f1839
|
[
"MIT"
] | null | null | null |
import _plotly_utils.basevalidators
class CminValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="cmin", parent_name="scatter.marker.line", **kwargs):
super(CminValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
**kwargs,
)
| 36.384615
| 88
| 0.661734
|
fd4e1a94302a78bbbb774f79b70e2adafe08c31b
| 2,719
|
py
|
Python
|
exp/eval/calibration/calib_current.py
|
stefantkeller/VECSELsetup
|
c1740e170b54be40f7315808e451c0731a5d7f3b
|
[
"MIT"
] | null | null | null |
exp/eval/calibration/calib_current.py
|
stefantkeller/VECSELsetup
|
c1740e170b54be40f7315808e451c0731a5d7f3b
|
[
"MIT"
] | null | null | null |
exp/eval/calibration/calib_current.py
|
stefantkeller/VECSELsetup
|
c1740e170b54be40f7315808e451c0731a5d7f3b
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import errorvalues as ev # github.com/stefantkeller/errorvalues
from VECSELsetup.eval.varycolor import varycolor
from VECSELsetup.eval.gen_functions import extract
def plot_current_current(logfile,colors,textxy):
current_set, current = extract(logfile, identifiers=['Current'])
T = current_set.keys()[0] # we care only about one temperature, actually, for calibration Temp is irrelevant anyway
xymin, xymax = np.min(current_set[T]), np.max(current_set[T])
textx, texty = textxy[0], textxy[1]
# linreg
q0,m0 = ev.linreg(current_set[T],current[T].v(),current[T].e(),overwrite_zeroerrors=True)
# plot
plt.errorbar(current_set[T],current[T].v(),
yerr=current[T].e(),
c=colors[0],linestyle=' ')
plt.plot(current_set[T],m0.v()*current_set[T]+q0.v(),c=colors[1])
summary = r'(${0}$) $\times$ c_set + (${1}$) A'.format(m0.round(2),q0.round(2))
plt.text(textx,texty, summary,color='k')
return xymin, xymax
def check_current_integrity(logfiles,calibplot_c):
cols = varycolor(3*len(logfiles)) # 3 per to have decent distinction
textx, texty = 5, 15
xymin, xymax = 0, 0
plt.clf()
plt.subplot(1,1,1)
for ls in range(len(logfiles)):
xymin_, xymax_ = plot_current_current(logfiles[ls],cols[ls*3:],(textx,texty-2*ls))
if xymin_ < xymin: xymin = xymin_
if xymax_ > xymax: xymax = xymax_
xlim = [xymin,xymax]
ylim = xlim
title = 'Current -- set vs get'
plt.title(title)
plt.xlabel('Current set (A)')
plt.ylabel('Current actually applied (A)')
plt.xlim(xlim)
plt.ylim(ylim)
plt.grid('on')
#plt.show()
plt.savefig(calibplot_c)
print u'Current calibration finished:\n{0}'.format(calibplot_c)
def main():
logfile1 = '20141128_calib/1_pump_calib.csv' # thermal PM at sample pos
logfile2 = '20141128_calib/2_refl_calib.csv' # thermal PM behind refl lens, before beam sampler; sees what is incident on BS
logfile3 = '20141128_calib/3_emission_calib.csv' # PM_th behind lens without BS
logfile4 = '20141128_calib/4_emission_calib.csv' # PM_th with lens
rootpath = '/'.join(logfile1.split('/')[:-1])
lut_folder = '/LUTs'
calibplot_c = rootpath+lut_folder+'/calib_current.png'
#logfile1 = '../1_pump_calib.csv' # from here: take 'Current'
#logfile2 = '../2_refl_calib.csv' # ...
#logfile3 = '../3_emission_calib.csv' # ...
#logfile4 = '../4_emission_calib.csv' # ...
check_current_integrity([logfile1,logfile2,logfile3,logfile4],calibplot_c)
if __name__ == "__main__":
main()
| 32.369048
| 128
| 0.660169
|
7bf0f865298b372db0283eb6fe4dc77de1bde6c0
| 2,633
|
py
|
Python
|
nova/api/openstack/compute/plugins/v3/admin_password.py
|
SnabbCo/nova
|
d156d7fdf241569da2c27ae02ec88e6ef448f7e2
|
[
"Apache-2.0"
] | 2
|
2016-04-19T08:20:39.000Z
|
2021-10-03T16:00:37.000Z
|
nova/api/openstack/compute/plugins/v3/admin_password.py
|
SnabbCo/nova
|
d156d7fdf241569da2c27ae02ec88e6ef448f7e2
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/plugins/v3/admin_password.py
|
SnabbCo/nova
|
d156d7fdf241569da2c27ae02ec88e6ef448f7e2
|
[
"Apache-2.0"
] | 1
|
2020-07-24T06:34:03.000Z
|
2020-07-24T06:34:03.000Z
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import admin_password
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.i18n import _
ALIAS = "os-admin-password"
authorize = extensions.extension_authorizer('compute', 'v3:%s' % ALIAS)
class AdminPasswordController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(AdminPasswordController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
@wsgi.action('change_password')
@wsgi.response(204)
@extensions.expected_errors((400, 404, 409, 501))
@validation.schema(admin_password.change_password)
def change_password(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
password = body['change_password']['admin_password']
instance = common.get_instance(self.compute_api, context, id,
want_objects=True)
try:
self.compute_api.set_admin_password(context, instance, password)
except exception.InstancePasswordSetFailed as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as e:
raise common.raise_http_conflict_for_instance_invalid_state(
e, 'change_password')
except NotImplementedError:
msg = _("Unable to set password on instance")
raise exc.HTTPNotImplemented(explanation=msg)
class AdminPassword(extensions.V3APIExtensionBase):
"""Admin password management support."""
name = "AdminPassword"
alias = ALIAS
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
controller = AdminPasswordController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| 35.581081
| 79
| 0.703
|
b349e0fddaf66af7ac736508c20a795887abddb1
| 6,086
|
py
|
Python
|
src/scene_vis/vtk_wrapper/vtk_voxel_grid.py
|
salarim/scene_vis
|
8e146195599aaa7598137dd223e9ce2b9e0b25a3
|
[
"MIT"
] | 33
|
2019-07-16T19:52:43.000Z
|
2022-03-17T15:30:59.000Z
|
src/scene_vis/vtk_wrapper/vtk_voxel_grid.py
|
salarim/scene_vis
|
8e146195599aaa7598137dd223e9ce2b9e0b25a3
|
[
"MIT"
] | null | null | null |
src/scene_vis/vtk_wrapper/vtk_voxel_grid.py
|
salarim/scene_vis
|
8e146195599aaa7598137dd223e9ce2b9e0b25a3
|
[
"MIT"
] | 10
|
2021-12-25T06:36:18.000Z
|
2022-03-15T11:25:00.000Z
|
"""https://github.com/kujason/scene_vis"""
import numpy as np
import vtk
from vtk.util import numpy_support
class VtkVoxelGrid:
"""Display cubes from a vtkCubeSource to visualize voxels from a VoxelGrid object.
Scalar arrays such as height or point density can also be added and visualized.
"""
def __init__(self):
# Default Options
self.use_heights_as_scalars = True
# References to the converted numpy arrays to avoid seg faults
self.np_to_vtk_points = None
self.np_to_vtk_cells = None
self.scalar_dict = {}
# VTK Data
self.vtk_poly_data = vtk.vtkPolyData()
self.vtk_points = vtk.vtkPoints()
self.vtk_cells = vtk.vtkCellArray()
self.vtk_poly_data.SetPoints(self.vtk_points)
self.vtk_poly_data.SetVerts(self.vtk_cells)
self.vtk_poly_data.Modified()
# Cube Source
self.vtk_cube_source = vtk.vtkCubeSource()
# Glyph 3D
self.vtk_glyph_3d = vtk.vtkGlyph3D()
self.vtk_glyph_3d.SetSourceConnection(self.vtk_cube_source.GetOutputPort())
self.vtk_glyph_3d.SetInputData(self.vtk_poly_data)
self.vtk_glyph_3d.ScalingOff()
self.vtk_glyph_3d.Update()
# Mapper
self.vtk_poly_data_mapper = vtk.vtkPolyDataMapper()
self.vtk_poly_data_mapper.SetColorModeToDefault()
self.vtk_poly_data_mapper.SetScalarRange(0, 1.0)
self.vtk_poly_data_mapper.SetScalarVisibility(True)
self.vtk_poly_data_mapper.SetInputConnection(self.vtk_glyph_3d.GetOutputPort())
# Voxel Grid Actor
self.vtk_actor = vtk.vtkActor()
self.vtk_actor.SetMapper(self.vtk_poly_data_mapper)
def set_voxels(self, voxel_grid):
"""Sets the voxel positions to visualize
Args:
voxel_grid: VoxelGrid
"""
# Get voxels from VoxelGrid
voxels = voxel_grid.voxel_indices
num_voxels = len(voxels)
# Shift voxels based on extents and voxel size
voxel_positions = (voxels + voxel_grid.min_voxel_coord) * voxel_grid.voxel_size
voxel_positions += voxel_grid.voxel_size / 2.0
# Resize the cube source based on voxel size
self.vtk_cube_source.SetXLength(voxel_grid.voxel_size)
self.vtk_cube_source.SetYLength(voxel_grid.voxel_size)
self.vtk_cube_source.SetZLength(voxel_grid.voxel_size)
# Set the voxels
flattened_points = np.array(voxel_positions).flatten()
flattened_points = flattened_points.astype(np.float32)
self.np_to_vtk_points = numpy_support.numpy_to_vtk(
flattened_points, deep=True, array_type=vtk.VTK_TYPE_FLOAT32)
self.np_to_vtk_points.SetNumberOfComponents(3)
self.vtk_points.SetData(self.np_to_vtk_points)
# Save the heights as a scalar array
if self.use_heights_as_scalars:
self.set_scalar_array("Height", voxels.transpose()[1])
self.set_active_scalars("Height")
# Create cells, one per voxel, cells in the form: [length, point index]
cell_lengths = np.ones(num_voxels)
cell_indices = np.arange(0, num_voxels)
flattened_cells = np.array([cell_lengths, cell_indices]).transpose().flatten()
flattened_cells = flattened_cells.astype(np.int32)
# Convert list of cells to vtk format and set the cells
self.np_to_vtk_cells = numpy_support.numpy_to_vtk(
flattened_cells, deep=True, array_type=vtk.VTK_ID_TYPE)
self.np_to_vtk_cells.SetNumberOfComponents(2)
self.vtk_cells.SetCells(num_voxels, self.np_to_vtk_cells)
def set_scalar_array(self, scalar_name, scalars, scalar_range=None):
"""Sets a scalar array in the scalar_dict, which can be used
to modify the colouring of each voxel.
Use set_active_scalars to choose the scalar array to be visualized.
If a scalar range is not given, the scalar array will be set based on
the minimum and maximum scalar values.
Args:
scalar_name: Name of scalar array, used as dictionary key
scalars: 1D array of scalar values corresponding to each cell
scalar_range: (optional) Custom scalar range
"""
if scalar_range is not None:
range_min = scalar_range[0]
range_max = scalar_range[1]
if range_min == range_max:
raise ValueError("Scalar range maximum cannot equal minimum")
else:
# Remap to range
map_range = range_max - range_min
remapped_scalar_values = (scalars - range_min) / map_range
remapped_scalar_values = \
remapped_scalar_values.astype(np.float32)
else:
# Calculate scalar range if not specified
scalar_min = np.amin(scalars)
scalar_max = np.amax(scalars)
if scalar_min == scalar_max:
remapped_scalar_values = np.full(scalars.shape,
scalar_min,
dtype=np.float32)
else:
map_range = scalar_max - scalar_min
remapped_scalar_values = (scalars - scalar_min) / map_range
remapped_scalar_values = \
remapped_scalar_values.astype(np.float32)
# Convert numpy array to vtk format
vtk_scalar_array = numpy_support.numpy_to_vtk(
remapped_scalar_values, deep=True, array_type=vtk.VTK_TYPE_FLOAT32)
vtk_scalar_array.SetNumberOfComponents(1)
vtk_scalar_array.SetName(scalar_name)
# Add scalar array to the PolyData
self.vtk_poly_data.GetPointData().AddArray(vtk_scalar_array)
# Save the scalar array into a dict entry
self.scalar_dict[scalar_name] = vtk_scalar_array
def set_active_scalars(self, scalar_name):
"""Sets the active scalar array for display
"""
self.vtk_poly_data.GetPointData().SetActiveScalars(scalar_name)
| 39.264516
| 87
| 0.656918
|
a6ecd7ae1fae9a91a8ea1c00e50fecbca91316bc
| 9,671
|
py
|
Python
|
src/prefect/agent/local/agent.py
|
nathaniel-md/prefect
|
467bc5b1dcd83716bd896eff549f6bceb59da8cf
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/prefect/agent/local/agent.py
|
nathaniel-md/prefect
|
467bc5b1dcd83716bd896eff549f6bceb59da8cf
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/prefect/agent/local/agent.py
|
nathaniel-md/prefect
|
467bc5b1dcd83716bd896eff549f6bceb59da8cf
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import os
import socket
import sys
from subprocess import STDOUT, Popen, DEVNULL
from typing import Iterable, List
from prefect import config
from prefect.agent import Agent
from prefect.environments.storage import GCS, S3, Azure, Local, GitHub
from prefect.serialization.storage import StorageSchema
from prefect.utilities.graphql import GraphQLResult
class LocalAgent(Agent):
"""
Agent which deploys flow runs locally as subprocesses. There are a range of kwarg
options to control information which may be provided to these subprocesses.
Optional import paths may be specified to append dependency modules to the PATH:
```
prefect agent start local --import-path "/usr/local/my_module" --import-path "~/other_module"
# Now the local scripts/packages my_module and other_module will be importable in
# the flow's subprocess
```
Environment variables may be set on the agent to be provided to each flow run's subprocess:
```
prefect agent start local --env MY_SECRET_KEY=secret --env OTHER_VAR=$OTHER_VAR
```
Args:
- name (str, optional): An optional name to give this agent. Can also be set through
the environment variable `PREFECT__CLOUD__AGENT__NAME`. Defaults to "agent"
- labels (List[str], optional): a list of labels, which are arbitrary string identifiers used by Prefect
Agents when polling for work
- env_vars (dict, optional): a dictionary of environment variables and values that will be set
on each flow run that this agent submits for execution
- max_polls (int, optional): maximum number of times the agent will poll Prefect Cloud for flow runs;
defaults to infinite
- agent_address (str, optional): Address to serve internal api at. Currently this is
just health checks for use by an orchestration layer. Leave blank for no api server (default).
- no_cloud_logs (bool, optional): Disable logging to a Prefect backend for this agent and all deployed flow runs
- import_paths (List[str], optional): system paths which will be provided to each Flow's runtime environment;
useful for Flows which import from locally hosted scripts or packages
- show_flow_logs (bool, optional): a boolean specifying whether the agent should re-route Flow run logs
to stdout; defaults to `False`
- hostname_label (boolean, optional): a boolean specifying whether this agent should auto-label itself
with the hostname of the machine it is running on. Useful for flows which are stored on the local
filesystem.
"""
def __init__(
self,
name: str = None,
labels: Iterable[str] = None,
env_vars: dict = None,
import_paths: List[str] = None,
show_flow_logs: bool = False,
hostname_label: bool = True,
max_polls: int = None,
agent_address: str = None,
no_cloud_logs: bool = False,
) -> None:
self.processes = set()
self.import_paths = import_paths or []
self.show_flow_logs = show_flow_logs
super().__init__(
name=name,
labels=labels,
env_vars=env_vars,
max_polls=max_polls,
agent_address=agent_address,
no_cloud_logs=no_cloud_logs,
)
hostname = socket.gethostname()
# Resolve common Docker hostname by using IP
if hostname == "docker-desktop":
hostname = socket.gethostbyname(hostname)
if hostname_label and (hostname not in self.labels):
assert isinstance(self.labels, list)
self.labels.append(hostname)
self.labels.extend(
[
"azure-flow-storage",
"gcs-flow-storage",
"s3-flow-storage",
"github-flow-storage",
]
)
self.logger.debug(f"Import paths: {self.import_paths}")
self.logger.debug(f"Show flow logs: {self.show_flow_logs}")
def heartbeat(self) -> None:
for process in list(self.processes):
if process.poll() is not None:
self.processes.remove(process)
if process.returncode:
self.logger.info(
"Process PID {} returned non-zero exit code".format(process.pid)
)
super().heartbeat()
def deploy_flow(self, flow_run: GraphQLResult) -> str:
"""
Deploy flow runs on your local machine as Docker containers
Args:
- flow_run (GraphQLResult): A GraphQLResult flow run object
Returns:
- str: Information about the deployment
Raises:
- ValueError: if deployment attempted on unsupported Storage type
"""
self.logger.info(
"Deploying flow run {}".format(flow_run.id) # type: ignore
)
if not isinstance(
StorageSchema().load(flow_run.flow.storage), (Local, Azure, GCS, S3, GitHub)
):
self.logger.error(
"Storage for flow run {} is not a supported type.".format(flow_run.id)
)
raise ValueError("Unsupported Storage type")
env_vars = self.populate_env_vars(flow_run=flow_run)
current_env = os.environ.copy()
current_env.update(env_vars)
python_path = []
if current_env.get("PYTHONPATH"):
python_path.append(current_env.get("PYTHONPATH"))
python_path.append(os.getcwd())
if self.import_paths:
python_path += self.import_paths
current_env["PYTHONPATH"] = ":".join(python_path)
stdout = sys.stdout if self.show_flow_logs else DEVNULL
# note: we will allow these processes to be orphaned if the agent were to exit
# before the flow runs have completed. The lifecycle of the agent should not
# dictate the lifecycle of the flow run. However, if the user has elected to
# show flow logs, these log entries will continue to stream to the users terminal
# until these child processes exit, even if the agent has already exited.
p = Popen(
["prefect", "execute", "cloud-flow"],
stdout=stdout,
stderr=STDOUT,
env=current_env,
)
self.processes.add(p)
self.logger.debug(
"Submitted flow run {} to process PID {}".format(flow_run.id, p.pid)
)
return "PID: {}".format(p.pid)
def populate_env_vars(self, flow_run: GraphQLResult) -> dict:
"""
Populate metadata and variables in the environment variables for a flow run
Args:
- flow_run (GraphQLResult): A flow run object
Returns:
- dict: a dictionary representing the populated environment variables
"""
all_vars = {
"PREFECT__CLOUD__API": config.cloud.api,
"PREFECT__CLOUD__AUTH_TOKEN": self.client._api_token,
"PREFECT__CLOUD__AGENT__LABELS": str(self.labels),
"PREFECT__CONTEXT__FLOW_RUN_ID": flow_run.id, # type: ignore
"PREFECT__CONTEXT__FLOW_ID": flow_run.flow.id, # type: ignore
"PREFECT__CLOUD__USE_LOCAL_SECRETS": "false",
"PREFECT__LOGGING__LOG_TO_CLOUD": str(self.log_to_cloud).lower(),
"PREFECT__LOGGING__LEVEL": "DEBUG",
"PREFECT__ENGINE__FLOW_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudFlowRunner",
"PREFECT__ENGINE__TASK_RUNNER__DEFAULT_CLASS": "prefect.engine.cloud.CloudTaskRunner",
**self.env_vars,
}
return {k: v for k, v in all_vars.items() if v is not None}
@staticmethod
def generate_supervisor_conf(
token: str = None,
labels: Iterable[str] = None,
import_paths: List[str] = None,
show_flow_logs: bool = False,
) -> str:
"""
Generate and output an installable supervisorctl configuration file for the agent.
Args:
- token (str, optional): A `RUNNER` token to give the agent
- labels (List[str], optional): a list of labels, which are arbitrary string
identifiers used by Prefect Agents when polling for work
- import_paths (List[str], optional): system paths which will be provided to each Flow's runtime environment;
useful for Flows which import from locally hosted scripts or packages
- show_flow_logs (bool, optional): a boolean specifying whether the agent should re-route Flow run logs
to stdout; defaults to `False`
Returns:
- str: A string representation of the generated configuration file
"""
# Use defaults if not provided
token = token or ""
labels = labels or []
import_paths = import_paths or []
with open(
os.path.join(os.path.dirname(__file__), "supervisord.conf"), "r"
) as conf_file:
conf = conf_file.read()
add_opts = ""
add_opts += "-t {token} ".format(token=token) if token else ""
add_opts += "-f " if show_flow_logs else ""
add_opts += (
" ".join("-l {label} ".format(label=label) for label in labels)
if labels
else ""
)
add_opts += (
" ".join("-p {path}".format(path=path) for path in import_paths)
if import_paths
else ""
)
conf = conf.replace("{{OPTS}}", add_opts)
return conf
if __name__ == "__main__":
LocalAgent().start()
| 39.473469
| 121
| 0.622066
|
3b2e7c476ac1c10db917bc5f884505e8bc96d197
| 18,973
|
py
|
Python
|
android_webview/tools/cts_utils_test.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
android_webview/tools/cts_utils_test.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
android_webview/tools/cts_utils_test.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import tempfile
import shutil
import sys
import unittest
import zipfile
sys.path.append(
os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, 'third_party',
'pymock'))
from mock import patch # pylint: disable=import-error
sys.path.append(
os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, 'third_party',
'catapult', 'common', 'py_utils'))
from py_utils import tempfile_ext
import cts_utils
CIPD_DATA = {}
CIPD_DATA['template'] = """# Copyright notice.
# cipd create instructions.
package: %s
description: Dummy Archive
data:
- file: %s
- file: %s
- file: %s
- file: %s
"""
CIPD_DATA['package'] = 'chromium/android_webview/tools/cts_archive'
CIPD_DATA['file1'] = 'arch1/platform1/file1.zip'
CIPD_DATA['file1_arch'] = 'arch1'
CIPD_DATA['file1_platform'] = 'platform1'
CIPD_DATA['file2'] = 'arch1/platform2/file2.zip'
CIPD_DATA['file3'] = 'arch2/platform1/file3.zip'
CIPD_DATA['file4'] = 'arch2/platform2/file4.zip'
CIPD_DATA['yaml'] = CIPD_DATA['template'] % (
CIPD_DATA['package'], CIPD_DATA['file1'], CIPD_DATA['file2'],
CIPD_DATA['file3'], CIPD_DATA['file4'])
CONFIG_DATA = {}
CONFIG_DATA['json'] = """{
"platform1": {
"arch": {
"arch1": {
"filename": "arch1/platform1/file1.zip",
"_origin": "https://a1.p1/f1.zip"
},
"arch2": {
"filename": "arch2/platform1/file3.zip",
"_origin": "https://a2.p1/f3.zip"
}
},
"test_runs": [
{
"apk": "p1/test.apk"
}
]
},
"platform2": {
"arch": {
"arch1": {
"filename": "arch1/platform2/file2.zip",
"_origin": "https://a1.p2/f2.zip"
},
"arch2": {
"filename": "arch2/platform2/file4.zip",
"_origin": "https://a2.p2/f4.zip"
}
},
"test_runs": [
{
"apk": "p2/test1.apk"
},
{
"apk": "p2/test2.apk"
}
]
}
}
"""
CONFIG_DATA['origin11'] = 'https://a1.p1/f1.zip'
CONFIG_DATA['base11'] = 'f1.zip'
CONFIG_DATA['file11'] = 'arch1/platform1/file1.zip'
CONFIG_DATA['origin12'] = 'https://a2.p1/f3.zip'
CONFIG_DATA['base12'] = 'f3.zip'
CONFIG_DATA['file12'] = 'arch2/platform1/file3.zip'
CONFIG_DATA['apk1'] = 'p1/test.apk'
CONFIG_DATA['origin21'] = 'https://a1.p2/f2.zip'
CONFIG_DATA['base21'] = 'f2.zip'
CONFIG_DATA['file21'] = 'arch1/platform2/file2.zip'
CONFIG_DATA['origin22'] = 'https://a2.p2/f4.zip'
CONFIG_DATA['base22'] = 'f4.zip'
CONFIG_DATA['file22'] = 'arch2/platform2/file4.zip'
CONFIG_DATA['apk2a'] = 'p2/test1.apk'
CONFIG_DATA['apk2b'] = 'p2/test2.apk'
DEPS_DATA = {}
DEPS_DATA['template'] = """deps = {
'src/android_webview/tools/cts_archive': {
'packages': [
{
'package': '%s',
'version': '%s',
},
],
'condition': 'checkout_android',
'dep_type': 'cipd',
},
}
"""
DEPS_DATA['revision'] = 'ctsarchiveversion'
DEPS_DATA['deps'] = DEPS_DATA['template'] % (CIPD_DATA['package'],
DEPS_DATA['revision'])
SUITES_DATA = {}
SUITES_DATA['template'] = """{
# Test suites.
'basic_suites': {
'suite1': {
'webview_cts_tests': {
'swarming': {
'shards': 2,
'cipd_packages': [
{
"cipd_package": 'chromium/android_webview/tools/cts_archive',
'location': 'android_webview/tools/cts_archive',
'revision': '%s',
}
]
},
},
},
'suite2': {
'webview_cts_tests': {
'swarming': {
'shards': 2,
'cipd_packages': [
{
"cipd_package": 'chromium/android_webview/tools/cts_archive',
'location': 'android_webview/tools/cts_archive',
'revision': '%s',
}
]
},
},
},
}
}"""
SUITES_DATA['pyl'] = SUITES_DATA['template'] % (DEPS_DATA['revision'],
DEPS_DATA['revision'])
GENERATE_BUILDBOT_JSON = os.path.join('testing', 'buildbot',
'generate_buildbot_json.py')
_CIPD_REFERRERS = [
'DEPS', os.path.join('testing', 'buildbot', 'test_suites.pyl')
]
# Used by check_tempdir.
with tempfile.NamedTemporaryFile() as _f:
_TEMP_DIR = os.path.dirname(_f.name) + os.path.sep
class FakeCIPD(object):
"""Fake CIPD service that supports create and ensure operations."""
_ensure_regex = r'\$ParanoidMode CheckIntegrity[\n\r]+' \
r'@Subdir ([\w/-]+)[\n\r]+' \
r'([\w/-]+) ([\w:]+)[\n\r]*'
_package_json = """{
"result": {
"package": "%s",
"instance_id": "%s"
}
}"""
def __init__(self):
self._yaml = {}
self._fake_version = 0
self._latest_version = {}
def add_package(self, package_def, version):
"""Adds a version, which then becomes available for ensure operations.
Args:
package_def: path to package definition in cipd yaml format. The
contents of each file will be set to the file name string.
version: cipd version
Returns:
json string with same format as that of cipd ensure -json-output
"""
with open(package_def) as def_file:
yaml_dict = cts_utils.CTSCIPDYaml.parse(def_file.readlines())
package = yaml_dict['package']
if package not in self._yaml:
self._yaml[package] = {}
if version in self._yaml[package]:
raise Exception('Attempting to add existing version: ' + version)
self._yaml[package][version] = {}
self._yaml[package][version]['yaml'] = yaml_dict
self._latest_version[package] = version
return self._package_json % (yaml_dict['package'], version)
def get_package(self, package, version):
"""Gets the yaml dict of the package at version
Args:
package: name of cipd package
version: version of cipd package
Returns:
Dictionary of the package in cipd yaml format
"""
return self._yaml[package][version]['yaml']
def get_latest_version(self, package):
return self._latest_version.get(package)
def create(self, package_def, output=None):
"""Implements cipd create -pkg-def <pakcage_def> [-json-output <path>]
Args:
package_def: path to package definition in cipd yaml format. The
contents of each file will be set to the file name string.
output: output file to write json formatted result
Returns:
json string with same format as that of cipd ensure -json-output
"""
version = 'fake_version_' + str(self._fake_version)
json_result = self.add_package(package_def, version)
self._fake_version += 1
if output:
writefile(json_result, output)
return version
def ensure(self, ensure_root, ensure_file):
"""Implements cipd ensure -root <ensure_root> -ensure-file <ensure_file>
Args:
ensure_root: Base directory to copy files to
ensure_file: Path to the cipd ensure file specifying the package version
Raises:
Exception if package and/or version was not previously added
or if ensure file format is not as expected.
"""
ensure_contents = readfile(ensure_file)
match = re.match(self._ensure_regex, ensure_contents)
if match:
subdir = match.group(1)
package = match.group(2)
version = match.group(3)
if package not in self._yaml:
raise Exception('Package not found: ' + package)
if version not in self._yaml[package]:
raise Exception('Version not found: ' + version)
else:
raise Exception('Ensure file not recognized: ' + ensure_contents)
for file_name in [e['file'] for e in \
self._yaml[package][version]['yaml']['data']]:
writefile(file_name,
os.path.join(os.path.abspath(ensure_root), subdir, file_name))
class FakeRunCmd(object):
"""Fake RunCmd that can perform cipd and cp operstions."""
def __init__(self, cipd=None):
self._cipd = cipd
def run_cmd(self, args):
"""Implement devil.utils.cmd_helper.RunCmd.
This doesn't implement cwd kwarg since it's not used by cts_utils
Args:
args: list of args
"""
if (len(args) == 6 and args[:3] == ['cipd', 'ensure', '-root']
and args[4] == '-ensure-file'):
# cipd ensure -root <root> -ensure-file <file>
check_tempdir(os.path.dirname(args[3]))
self._cipd.ensure(args[3], args[5])
elif (len(args) == 6 and args[:3] == ['cipd', 'create', '-pkg-def']
and args[4] == '-json-output'):
# cipd create -pkg-def <def file> -json-output <output file>
check_tempdir(os.path.dirname(args[5]))
self._cipd.create(args[3], args[5])
elif len(args) == 4 and args[:2] == ['cp', '--reflink=never']:
# cp --reflink=never <src> <dest>
check_tempdir(os.path.dirname(args[3]))
shutil.copyfile(args[2], args[3])
elif len(args) == 3 and args[0] == 'cp':
# cp <src> <dest>
check_tempdir(os.path.dirname(args[2]))
shutil.copyfile(args[1], args[2])
else:
raise Exception('Unknown cmd: ' + str(args))
class CTSUtilsTest(unittest.TestCase):
"""Unittests for the cts_utils.py."""
def testCTSCIPDYamlSanity(self):
yaml_data = cts_utils.CTSCIPDYaml(cts_utils.CIPD_PATH)
self.assertTrue(yaml_data.get_package())
self.assertTrue(yaml_data.get_files())
with tempfile.NamedTemporaryFile() as outputFile:
yaml_data.write(outputFile.name)
with open(cts_utils.CIPD_PATH) as cipdFile:
self.assertEqual(cipdFile.readlines(), outputFile.readlines())
def testCTSCIPDYamlOperations(self):
with tempfile.NamedTemporaryFile() as yamlFile:
yamlFile.writelines(CIPD_DATA['yaml'])
yamlFile.flush()
yaml_data = cts_utils.CTSCIPDYaml(yamlFile.name)
self.assertEqual(CIPD_DATA['package'], yaml_data.get_package())
self.assertEqual([
CIPD_DATA['file1'], CIPD_DATA['file2'], CIPD_DATA['file3'],
CIPD_DATA['file4']
], yaml_data.get_files())
yaml_data.append_file('arch2/platform3/file5.zip')
self.assertEqual([
CIPD_DATA['file1'], CIPD_DATA['file2'], CIPD_DATA['file3'],
CIPD_DATA['file4']
] + ['arch2/platform3/file5.zip'], yaml_data.get_files())
yaml_data.remove_file(CIPD_DATA['file1'])
self.assertEqual([
CIPD_DATA['file2'], CIPD_DATA['file3'], CIPD_DATA['file4'],
'arch2/platform3/file5.zip'
], yaml_data.get_files())
with tempfile.NamedTemporaryFile() as yamlFile:
yaml_data.write(yamlFile.name)
new_yaml_contents = readfile(yamlFile.name)
self.assertEqual(
CIPD_DATA['template'] %
(CIPD_DATA['package'], CIPD_DATA['file2'], CIPD_DATA['file3'],
CIPD_DATA['file4'], 'arch2/platform3/file5.zip'), new_yaml_contents)
@patch('devil.utils.cmd_helper.RunCmd')
def testCTSCIPDDownload(self, run_mock):
fake_cipd = FakeCIPD()
fake_run_cmd = FakeRunCmd(cipd=fake_cipd)
run_mock.side_effect = fake_run_cmd.run_cmd
with tempfile.NamedTemporaryFile() as yamlFile,\
tempfile_ext.NamedTemporaryDirectory() as tempDir:
yamlFile.writelines(CIPD_DATA['yaml'])
yamlFile.flush()
fake_version = fake_cipd.create(yamlFile.name)
archive = cts_utils.CTSCIPDYaml(yamlFile.name)
cts_utils.cipd_download(archive, fake_version, tempDir)
self.assertEqual(CIPD_DATA['file1'],
readfile(os.path.join(tempDir, CIPD_DATA['file1'])))
self.assertEqual(CIPD_DATA['file2'],
readfile(os.path.join(tempDir, CIPD_DATA['file2'])))
def testCTSConfigSanity(self):
cts_config = cts_utils.CTSConfig()
platforms = cts_config.get_platforms()
self.assertTrue(platforms)
platform = platforms[0]
archs = cts_config.get_archs(platform)
self.assertTrue(archs)
self.assertTrue(cts_config.get_cipd_zip(platform, archs[0]))
self.assertTrue(cts_config.get_origin(platform, archs[0]))
self.assertTrue(cts_config.get_apks(platform))
def testCTSConfig(self):
with tempfile.NamedTemporaryFile() as configFile:
configFile.writelines(CONFIG_DATA['json'])
configFile.flush()
cts_config = cts_utils.CTSConfig(configFile.name)
self.assertEquals(['platform1', 'platform2'], cts_config.get_platforms())
self.assertEquals(['arch1', 'arch2'], cts_config.get_archs('platform1'))
self.assertEquals(['arch1', 'arch2'], cts_config.get_archs('platform2'))
self.assertEquals('arch1/platform1/file1.zip',
cts_config.get_cipd_zip('platform1', 'arch1'))
self.assertEquals('arch2/platform1/file3.zip',
cts_config.get_cipd_zip('platform1', 'arch2'))
self.assertEquals('arch1/platform2/file2.zip',
cts_config.get_cipd_zip('platform2', 'arch1'))
self.assertEquals('arch2/platform2/file4.zip',
cts_config.get_cipd_zip('platform2', 'arch2'))
self.assertEquals('https://a1.p1/f1.zip',
cts_config.get_origin('platform1', 'arch1'))
self.assertEquals('https://a2.p1/f3.zip',
cts_config.get_origin('platform1', 'arch2'))
self.assertEquals('https://a1.p2/f2.zip',
cts_config.get_origin('platform2', 'arch1'))
self.assertEquals('https://a2.p2/f4.zip',
cts_config.get_origin('platform2', 'arch2'))
self.assertTrue(['p1/test.apk'], cts_config.get_apks('platform1'))
self.assertTrue(['p2/test1.apk', 'p2/test2.apk'],
cts_config.get_apks('platform2'))
def testFilterZip(self):
with tempfile_ext.NamedTemporaryDirectory() as workDir,\
cts_utils.chdir(workDir):
writefile('abc', 'a/b/one.apk')
writefile('def', 'a/b/two.apk')
writefile('ghi', 'a/b/three.apk')
movetozip(['a/b/one.apk', 'a/b/two.apk', 'a/b/three.apk'],
'downloaded.zip')
cts_utils.filterzip('downloaded.zip', ['a/b/one.apk', 'a/b/two.apk'],
'filtered.zip')
zf = zipfile.ZipFile('filtered.zip', 'r')
self.assertEquals(2, len(zf.namelist()))
self.assertEquals('abc', zf.read('a/b/one.apk'))
self.assertEquals('def', zf.read('a/b/two.apk'))
@patch('cts_utils.filterzip')
def testFilterCTS(self, filterzip_mock): # pylint: disable=no-self-use
with tempfile.NamedTemporaryFile() as configFile:
configFile.writelines(CONFIG_DATA['json'])
configFile.flush()
cts_config = cts_utils.CTSConfig(configFile.name)
cts_utils.filter_cts_file(cts_config, CONFIG_DATA['base11'], '/filtered')
filterzip_mock.assert_called_with(
CONFIG_DATA['base11'], [CONFIG_DATA['apk1']],
os.path.join('/filtered', CONFIG_DATA['base11']))
@patch('devil.utils.cmd_helper.RunCmd')
def testUpdateCIPDPackage(self, run_mock):
fake_cipd = FakeCIPD()
fake_run_cmd = FakeRunCmd(cipd=fake_cipd)
run_mock.side_effect = fake_run_cmd.run_cmd
with tempfile_ext.NamedTemporaryDirectory() as tempDir,\
cts_utils.chdir(tempDir):
writefile(CIPD_DATA['yaml'], 'cipd.yaml')
version = cts_utils.update_cipd_package('cipd.yaml')
uploaded = fake_cipd.get_package(CIPD_DATA['package'], version)
self.assertEquals(CIPD_DATA['package'], uploaded['package'])
uploaded_files = [e['file'] for e in uploaded['data']]
self.assertEquals(4, len(uploaded_files))
for i in range(1, 5):
self.assertTrue(CIPD_DATA['file' + str(i)] in uploaded_files)
def testChromiumRepoHelper(self):
with tempfile_ext.NamedTemporaryDirectory() as tempDir,\
cts_utils.chdir(tempDir):
setup_fake_repo('.')
helper = cts_utils.ChromiumRepoHelper(root_dir='.')
self.assertEquals(DEPS_DATA['revision'], helper.get_cipd_dependency_rev())
self.assertEquals(
os.path.join(tempDir, 'a', 'b'), helper.rebase('a', 'b'))
helper.update_cts_cipd_rev('newversion')
self.assertEquals('newversion', helper.get_cipd_dependency_rev())
expected_deps = DEPS_DATA['template'] % (CIPD_DATA['package'],
'newversion')
self.assertEquals(expected_deps, readfile(_CIPD_REFERRERS[0]))
expected_suites = SUITES_DATA['template'] % ('newversion', 'newversion')
self.assertEquals(expected_suites, readfile(_CIPD_REFERRERS[1]))
writefile('#deps not referring to cts cipd', _CIPD_REFERRERS[0])
with self.assertRaises(Exception):
helper.update_cts_cipd_rev('anothernewversion')
@patch('urllib.urlretrieve')
@patch('os.makedirs')
# pylint: disable=no-self-use
def testDownload(self, mock_makedirs, mock_retrieve):
t1 = cts_utils.download('http://www.download.com/file1.zip',
'/download_dir/file1.zip')
t2 = cts_utils.download('http://www.download.com/file2.zip',
'/download_dir/file2.zip')
t1.join()
t2.join()
mock_makedirs.assert_called_with('/download_dir')
mock_retrieve.assert_any_call('http://www.download.com/file1.zip',
'/download_dir/file1.zip')
mock_retrieve.assert_any_call('http://www.download.com/file2.zip',
'/download_dir/file2.zip')
def setup_fake_repo(repoRoot):
"""Populates various files needed for testing cts_utils.
Args:
repo_root: Root of the fake repo under which to write config files
"""
with cts_utils.chdir(repoRoot):
writefile(DEPS_DATA['deps'], cts_utils.DEPS_FILE)
writefile(CONFIG_DATA['json'],
os.path.join(cts_utils.TOOLS_DIR, cts_utils.CONFIG_FILE))
writefile(CIPD_DATA['yaml'],
os.path.join(cts_utils.TOOLS_DIR, cts_utils.CIPD_FILE))
writefile(SUITES_DATA['pyl'], cts_utils.TEST_SUITES_FILE)
def readfile(fpath):
"""Returns contents of file at fpath."""
with open(fpath) as f:
return f.read()
def writefile(contents, path):
"""Writes contents to file at path."""
dir_path = os.path.dirname(os.path.abspath(path))
if not os.path.isdir(dir_path):
os.makedirs(os.path.dirname(path))
with open(path, 'w') as f:
f.write(contents)
def movetozip(fileList, outputPath):
"""Move files in fileList to zip file at outputPath"""
with zipfile.ZipFile(outputPath, 'a') as zf:
for f in fileList:
zf.write(f)
os.remove(f)
def check_tempdir(path):
"""Check if directory at path is under tempdir.
Args:
path: path of directory to check
Raises:
AssertionError if directory is not under tempdir.
"""
abs_path = os.path.abspath(path) + os.path.sep
if abs_path[:len(_TEMP_DIR)] != _TEMP_DIR:
raise AssertionError(
'"%s" is not under tempdir "%s".' % (abs_path, _TEMP_DIR))
if __name__ == '__main__':
unittest.main()
| 34.812844
| 80
| 0.636852
|
c7605894fb9d49997d242a8d5d4958bff5ea51e1
| 4,215
|
py
|
Python
|
app/odl/odl_constants.py
|
kukkalli/orchestrator
|
0b53e3f95c0a886a739cf08d611ea76c958bc691
|
[
"Apache-2.0"
] | 1
|
2022-03-02T09:43:45.000Z
|
2022-03-02T09:43:45.000Z
|
app/odl/odl_constants.py
|
kukkalli/orchestrator
|
0b53e3f95c0a886a739cf08d611ea76c958bc691
|
[
"Apache-2.0"
] | null | null | null |
app/odl/odl_constants.py
|
kukkalli/orchestrator
|
0b53e3f95c0a886a739cf08d611ea76c958bc691
|
[
"Apache-2.0"
] | null | null | null |
import logging
from configuration_constants import ConfigurationConstants
LOG = logging.getLogger(__name__)
class ODLConstants(object):
# OpenDayLight Constants
TOPOLOGY = ConfigurationConstants.ODL_URL + "/restconf/operational/network-topology:network-topology"
NODES = ConfigurationConstants.ODL_URL + "/restconf/operational/opendaylight-inventory:nodes"
NODE = NODES + "/node/{}"
PORT = NODE + '/node-connector/{}'
HEADER = {"Content-Type": "application/json", "Accept": "application/json"}
PUT_FLOW_URL = ConfigurationConstants.ODL_URL + '/restconf/config/opendaylight-inventory:nodes/node/{' \
'node_id}/table/{table}/flow/{flow_id}'
PUT_XML_HEADER = {"content-type": "application/xml", "Accept": "application/json"}
ETHER_TYPE_ARP_MATCHER = "2054" # constant decimal value of 0x0806 that matches with only ARP frames
ETHER_TYPE_IP_MATCHER = "2048" # constant decimal value of 0x0800 used to match IP
ARP_REQUEST_OP = "1" # this opcode is for ARP request
ARP_REPLY_OP = "2" # This opcode is only for ARP reply
# It is very important to consider that the string should not start with an empty line or so.
# The following is the correct one
# Other important point is to use "_" instead of "-" for naming the keys inside {} as python can not read -
FLOW_XML = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<flow xmlns="urn:opendaylight:flow:inventory">
<hard-timeout>{hard_timeout}</hard-timeout>
<idle-timeout>{idle_timeout}</idle-timeout>
<cookie>{cookie}</cookie>
<priority>{priority}</priority>
<id>{flow_id}</id>
<table_id>{table}</table_id>
<instructions>
<instruction>
<order>0</order>
<apply-actions>
<action>
<output-action>
<output-node-connector>{output_action}</output-node-connector>
</output-action>
<order>0</order>
</action>
</apply-actions>
</instruction>
</instructions>
</flow>
"""
# the original xml which also has the matching part.
FLOW_XML_backup = """<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<flow xmlns="urn:opendaylight:flow:inventory">
<hard-timeout>{hard_timeout}</hard-timeout>
<idle-timeout>{idle_timeout}</idle-timeout>
<cookie>{cookie}</cookie>
<priority>{priority}</priority>
<match>
<in-port>{in_port}</in-port>
</match>
<id>{flow_id}</id>
<table_id>{table}</table_id>
<instructions>
<instruction>
<order>0</order>
<apply-actions>
<action>
<output-action>
<output-node-connector>{output_action}</output-node-connector>
</output-action>
<order>0</order>
</action>
</apply-actions>
</instruction>
</instructions>
</flow>
"""
FLOW_JSON = """
{"flow-node-inventory:flow": [
{
"id": "{flow_id}",
"table_id": {table_id},
"idle-timeout": {idle_timeout},
"priority": {priority},
"hard-timeout": {hard_timeout},
"match": {
"ipv4-source": "{src_ip}",
"ipv4-destination": "{dst_ip}",
"ethernet-match": {
"ethernet-type": {
"type": 2048
}
}
},
"cookie": 1,
"instructions": {
"instruction": [
{
"order": 0,
"apply-actions": {
"action": [
{
"order": 0,
"output-action": {
"output-node-connector": "3"}
}
]
}
}
]
}
}
]
}
"""
| 35.420168
| 111
| 0.507948
|
f94f8f89918e073c83355e1548563f64c0d39e62
| 1,725
|
py
|
Python
|
challenges/Enslavednode_chat/poller/for-release/machine.py
|
pingjuiliao/cb-multios
|
64ededd0b87030eda7c40c4388a4ad8283712d8e
|
[
"MIT"
] | 473
|
2016-08-01T12:48:16.000Z
|
2022-03-09T18:13:14.000Z
|
challenges/Enslavednode_chat/poller/for-release/machine.py
|
pingjuiliao/cb-multios
|
64ededd0b87030eda7c40c4388a4ad8283712d8e
|
[
"MIT"
] | 71
|
2016-08-01T03:33:44.000Z
|
2022-03-09T18:37:04.000Z
|
challenges/Enslavednode_chat/poller/for-release/machine.py
|
pingjuiliao/cb-multios
|
64ededd0b87030eda7c40c4388a4ad8283712d8e
|
[
"MIT"
] | 121
|
2016-08-01T04:07:53.000Z
|
2022-03-07T11:08:09.000Z
|
#!/usr/bin/env python
import math
import random
import string
import sys
from generator.actions import Actions
class Machine(Actions):
WELCOME = (
"\nWelcome to the Enslavednode chat server!\n"
"The bots here love to talk, but they are shy...\n"
"You need to get the ball rolling.\n"
)
PROMPT = "you > "
def random_sentence(self):
s = ''
for i in xrange(random.randint(0, 16)):
for k in xrange(random.randint(0, 256)):
s += random.choice(string.letters)
s += ' '
return s
def start(self):
self.read(expect=self.WELCOME, length=len(self.WELCOME))
def read_until_prompt(self):
self.read(expect='.*', expect_format='pcre', delim=self.PROMPT)
def dosomething(self):
pass
def call_reverse(self):
to_rev = self.random_sentence()
inp = ' '.join(['/rev', to_rev])
self.write(inp + '\n')
self.read(expect='case > ' + to_rev[::-1], delim='\n')
def call_hello(self):
inp = ' '.join(['/hello', self.random_sentence()])
self.write(inp + '\n')
self.read(expect='case > Hello, you.', delim='\n')
def call_howsmart(self):
inp = ' '.join(['/howsmart', self.random_sentence()])
self.write(inp + '\n')
self.read(expect='case > I have an intelligence rating of:.*',
expect_format='pcre', delim='\n')
def call_pont(self):
inp = ' '.join(['/pont', self.random_sentence()])
self.write(inp + '\n')
def jabber(self):
self.write(self.random_sentence() + '\n')
def part(self):
self.write('/part\n')
self.read(expect='BYE!', delim='\n')
| 27.380952
| 71
| 0.561159
|
32a74f1612cab6ffa086975a113edb0c9ea235fd
| 741
|
py
|
Python
|
core/diagnostics.py
|
RobertoPrevato/PythonUtilities
|
9137e3c1e357e0c7b62c005ee0ab7efdfdaf7d94
|
[
"MIT"
] | 1
|
2019-06-09T12:12:59.000Z
|
2019-06-09T12:12:59.000Z
|
core/diagnostics.py
|
RobertoPrevato/PythonUtilities
|
9137e3c1e357e0c7b62c005ee0ab7efdfdaf7d94
|
[
"MIT"
] | null | null | null |
core/diagnostics.py
|
RobertoPrevato/PythonUtilities
|
9137e3c1e357e0c7b62c005ee0ab7efdfdaf7d94
|
[
"MIT"
] | null | null | null |
import time
class StopWatch:
def __init__(self, func=time.perf_counter):
self.elapsed = 0.0
self._func = func
self._start = None
def start(self):
if self._start is not None:
raise RuntimeError('Already started')
self._start = self._func()
def stop(self):
if self._start is None:
raise RuntimeError('Not started')
end = self._func()
self.elapsed += end - self._start
self._start = None
def reset(self):
self.elapsed = 0.0
@property
def running(self):
return self._start is not None
def __enter__(self):
self.start()
return self
def __exit__(self, *args):
self.stop()
| 21.794118
| 49
| 0.569501
|
87d36e6153265bb390013a48850396f24f08c1c3
| 10,406
|
py
|
Python
|
statsmodels/sandbox/distributions/genpareto.py
|
yarikoptic/statsmodels
|
f990cb1a1ef0c9883c9394444e6f9d027efabec6
|
[
"BSD-3-Clause"
] | 34
|
2018-07-13T11:30:46.000Z
|
2022-01-05T13:48:10.000Z
|
venv/lib/python3.6/site-packages/statsmodels/sandbox/distributions/genpareto.py
|
HeyWeiPan/vnpy_crypto
|
844381797a475a01c05a4e162592a5a6e3a48032
|
[
"MIT"
] | 6
|
2015-08-28T16:59:03.000Z
|
2019-04-12T22:29:01.000Z
|
venv/lib/python3.6/site-packages/statsmodels/sandbox/distributions/genpareto.py
|
HeyWeiPan/vnpy_crypto
|
844381797a475a01c05a4e162592a5a6e3a48032
|
[
"MIT"
] | 28
|
2015-04-01T20:02:25.000Z
|
2021-07-03T00:09:28.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 12 14:59:03 2010
Warning: not tried out or tested yet, Done
Author: josef-pktd
"""
from __future__ import print_function
import numpy as np
from scipy import stats
from scipy.misc import comb
from scipy.stats.distributions import rv_continuous
from numpy import where, inf
from numpy import abs as np_abs
## Generalized Pareto with reversed sign of c as in literature
class genpareto2_gen(rv_continuous):
def _argcheck(self, c):
c = np.asarray(c)
self.b = where(c > 0, 1.0/np_abs(c), inf)
return where(c==0, 0, 1)
def _pdf(self, x, c):
Px = np.power(1-c*x,-1.0+1.0/c)
return Px
def _logpdf(self, x, c):
return (-1.0+1.0/c) * np.log1p(-c*x)
def _cdf(self, x, c):
return 1.0 - np.power(1-c*x,1.0/c)
def _ppf(self, q, c):
vals = -1.0/c * (np.power(1-q, c)-1)
return vals
def _munp(self, n, c):
k = np.arange(0,n+1)
val = (1.0/c)**n * np.sum(comb(n,k)*(-1)**k / (1.0+c*k),axis=0)
return where(c*n > -1, val, inf)
def _entropy(self, c):
if (c < 0):
return 1-c
else:
self.b = 1.0 / c
return rv_continuous._entropy(self, c)
genpareto2 = genpareto2_gen(a=0.0,name='genpareto',
longname="A generalized Pareto",
shapes='c',extradoc="""
Generalized Pareto distribution
genpareto2.pdf(x,c) = (1+c*x)**(-1-1/c)
for c != 0, and for x >= 0 for all c, and x < 1/abs(c) for c < 0.
"""
)
shape, loc, scale = 0.5, 0, 1
rv = np.arange(5)
quant = [0.01, 0.1, 0.5, 0.9, 0.99]
for method, x in [('pdf', rv),
('cdf', rv),
('sf', rv),
('ppf', quant),
('isf', quant)]:
print(getattr(genpareto2, method)(x, shape, loc, scale))
print(getattr(stats.genpareto, method)(x, -shape, loc, scale))
print(genpareto2.stats(shape, loc, scale, moments='mvsk'))
print(stats.genpareto.stats(-shape, loc, scale, moments='mvsk'))
print(genpareto2.entropy(shape, loc, scale))
print(stats.genpareto.entropy(-shape, loc, scale))
def paramstopot(thresh, shape, scale):
'''transform shape scale for peak over threshold
y = x-u|x>u ~ GPD(k, sigma-k*u) if x ~ GPD(k, sigma)
notation of de Zea Bermudez, Kotz
k, sigma is shape, scale
'''
return shape, scale - shape*thresh
def paramsfrompot(thresh, shape, scalepot):
return shape, scalepot + shape*thresh
def warnif(cond, msg):
if not cond:
print(msg, 'does not hold')
def meanexcess(thresh, shape, scale):
'''mean excess function of genpareto
assert are inequality conditions in de Zea Bermudez, Kotz
'''
warnif(shape > -1, 'shape > -1')
warnif(thresh >= 0, 'thresh >= 0') #make it weak inequality
warnif((scale - shape*thresh) > 0, '(scale - shape*thresh) > 0')
return (scale - shape*thresh) / (1 + shape)
def meanexcess_plot(data, params=None, lidx=100, uidx=10, method='emp', plot=0):
if method == 'est':
#doesn't make much sense yet,
#estimate the parameters and use theoretical meanexcess
if params is None:
raise NotImplementedError
else:
pass #estimate parames
elif method == 'emp':
#calculate meanexcess from data
datasorted = np.sort(data)
meanexcess = (datasorted[::-1].cumsum())/np.arange(1,len(data)+1) - datasorted[::-1]
meanexcess = meanexcess[::-1]
if plot:
plt.plot(datasorted[:-uidx], meanexcess[:-uidx])
if not params is None:
shape, scale = params
plt.plot(datasorted[:-uidx], (scale - datasorted[:-uidx] * shape) / (1. + shape))
return datasorted, meanexcess
print(meanexcess(5, -0.5, 10))
print(meanexcess(5, -2, 10))
import matplotlib.pyplot as plt
data = genpareto2.rvs(-0.75, scale=5, size=1000)
#data = np.random.uniform(50, size=1000)
#data = stats.norm.rvs(0, np.sqrt(50), size=1000)
#data = stats.pareto.rvs(1.5, np.sqrt(50), size=1000)
tmp = meanexcess_plot(data, params=(-0.75, 5), plot=1)
print(tmp[1][-20:])
print(tmp[0][-20:])
#plt.show()
def meanexcess_emp(data):
datasorted = np.sort(data).astype(float)
meanexcess = (datasorted[::-1].cumsum())/np.arange(1,len(data)+1) - datasorted[::-1]
meancont = (datasorted[::-1].cumsum())/np.arange(1,len(data)+1)
meanexcess = meanexcess[::-1]
return datasorted, meanexcess, meancont[::-1]
def meanexcess_dist(self, lb, *args, **kwds):
#default function in expect is identity
# need args in call
if np.ndim(lb) == 0:
return self.expect(lb=lb, conditional=True)
else:
return np.array([self.expect(lb=lbb, conditional=True) for
lbb in lb])
ds, me, mc = meanexcess_emp(1.*np.arange(1,10))
print(ds)
print(me)
print(mc)
print(meanexcess_dist(stats.norm, lb=0.5))
print(meanexcess_dist(stats.norm, lb=[-np.inf, -0.5, 0, 0.5]))
rvs = stats.norm.rvs(size=100000)
rvs = rvs - rvs.mean()
print(rvs.mean(), rvs[rvs>-0.5].mean(), rvs[rvs>0].mean(), rvs[rvs>0.5].mean())
'''
C:\Programs\Python25\lib\site-packages\matplotlib-0.99.1-py2.5-win32.egg\matplotlib\rcsetup.py:117: UserWarning: rcParams key "numerix" is obsolete and has no effect;
please delete it from your matplotlibrc file
warnings.warn('rcParams key "numerix" is obsolete and has no effect;\n'
[ 1. 0.5 0. 0. 0. ]
[ 1. 0.5 0. 0. 0. ]
[ 0. 0.75 1. 1. 1. ]
[ 0. 0.75 1. 1. 1. ]
[ 1. 0.25 0. 0. 0. ]
[ 1. 0.25 0. 0. 0. ]
[ 0.01002513 0.1026334 0.58578644 1.36754447 1.8 ]
[ 0.01002513 0.1026334 0.58578644 1.36754447 1.8 ]
[ 1.8 1.36754447 0.58578644 0.1026334 0.01002513]
[ 1.8 1.36754447 0.58578644 0.1026334 0.01002513]
(array(0.66666666666666674), array(0.22222222222222243), array(0.56568542494923058), array(-0.60000000000032916))
(array(0.66666666666666674), array(0.22222222222222243), array(0.56568542494923058), array(-0.60000000000032916))
0.5
0.5
25.0
shape > -1 does not hold
-20
[ 41.4980671 42.83145298 44.24197578 45.81622844 47.57145212
49.52692287 51.70553275 54.0830766 56.61358997 59.53409167
62.8970042 66.73494156 71.04227973 76.24015612 82.71835988
89.79611663 99.4252195 106.2372462 94.83432424 0. ]
[ 15.79736355 16.16373531 17.44204268 17.47968055 17.73264951
18.23939099 19.02638455 20.79746264 23.7169161 24.48807136
25.90496638 28.35556795 32.27623618 34.65714495 37.37093362
47.32957609 51.27970515 78.98913941 129.04309012 189.66864848]
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> meanexcess_emp(np.arange(10))
(array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), array([4, 4, 5, 5, 5, 6, 6, 5, 4, 0]), array([9, 8, 8, 7, 7, 6, 6, 5, 5, 4]))
>>> meanexcess_emp(1*np.arange(10))
(array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), array([4, 4, 5, 5, 5, 6, 6, 5, 4, 0]), array([9, 8, 8, 7, 7, 6, 6, 5, 5, 4]))
>>> meanexcess_emp(1.*np.arange(10))
(array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]), array([ 4.5 , 4.88888889, 5.25 , 5.57142857, 5.83333333,
6. , 6. , 5.66666667, 4.5 , 0. ]), array([ 9. , 8.5, 8. , 7.5, 7. , 6.5, 6. , 5.5, 5. , 4.5]))
>>> meanexcess_emp(0.5**np.arange(10))
(array([ 0.00195313, 0.00390625, 0.0078125 , 0.015625 , 0.03125 ,
0.0625 , 0.125 , 0.25 , 0.5 , 1. ]), array([ 0.19960938, 0.22135417, 0.24804688, 0.28125 , 0.32291667,
0.375 , 0.4375 , 0.5 , 0.5 , 0. ]), array([ 1. , 0.75 , 0.58333333, 0.46875 , 0.3875 ,
0.328125 , 0.28348214, 0.24902344, 0.22178819, 0.19980469]))
>>> meanexcess_emp(np.arange(10)**0.5)
(array([ 0. , 1. , 1.41421356, 1.73205081, 2. ,
2.23606798, 2.44948974, 2.64575131, 2.82842712, 3. ]), array([ 1.93060005, 2.03400006, 2.11147337, 2.16567659, 2.19328936,
2.18473364, 2.11854461, 1.94280904, 1.5 , 0. ]), array([ 3. , 2.91421356, 2.82472615, 2.73091704, 2.63194723,
2.52662269, 2.41311242, 2.28825007, 2.14511117, 1.93060005]))
>>> meanexcess_emp(np.arange(10)**-2)
(array([-2147483648, 0, 0, 0, 0,
0, 0, 0, 0, 1]), array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), array([ 1, 0, 0, 0, 0,
0, 0, 0, 0, -214748365]))
>>> meanexcess_emp(np.arange(10)**(-0.5))
(array([ 0.33333333, 0.35355339, 0.37796447, 0.40824829, 0.4472136 ,
0.5 , 0.57735027, 0.70710678, 1. , Inf]), array([ Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf, NaN]), array([ Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf]))
>>> np.arange(10)**(-0.5)
array([ Inf, 1. , 0.70710678, 0.57735027, 0.5 ,
0.4472136 , 0.40824829, 0.37796447, 0.35355339, 0.33333333])
>>> meanexcess_emp(np.arange(1,10)**(-0.5))
(array([ 0.33333333, 0.35355339, 0.37796447, 0.40824829, 0.4472136 ,
0.5 , 0.57735027, 0.70710678, 1. ]), array([ 0.4857152 , 0.50223543, 0.51998842, 0.53861177, 0.55689141,
0.57111426, 0.56903559, 0.5 , 0. ]), array([ 1. , 0.85355339, 0.76148568, 0.69611426, 0.64633413,
0.60665316, 0.57398334, 0.5464296 , 0.52275224]))
>>> meanexcess_emp(np.arange(1,10))
(array([1, 2, 3, 4, 5, 6, 7, 8, 9]), array([4, 5, 5, 5, 6, 6, 5, 4, 0]), array([9, 8, 8, 7, 7, 6, 6, 5, 5]))
>>> meanexcess_emp(1.*np.arange(1,10))
(array([ 1., 2., 3., 4., 5., 6., 7., 8., 9.]), array([ 4.88888889, 5.25 , 5.57142857, 5.83333333, 6. ,
6. , 5.66666667, 4.5 , 0. ]), array([ 9. , 8.5, 8. , 7.5, 7. , 6.5, 6. , 5.5, 5. ]))
>>> datasorted = np.sort(1.*np.arange(1,10))
>>> (datasorted[::-1].cumsum()-datasorted[::-1])
array([ 0., 9., 17., 24., 30., 35., 39., 42., 44.])
>>> datasorted[::-1].cumsum()
array([ 9., 17., 24., 30., 35., 39., 42., 44., 45.])
>>> datasorted[::-1]
array([ 9., 8., 7., 6., 5., 4., 3., 2., 1.])
>>>
'''
| 43.358333
| 211
| 0.552085
|
6fa868612ac81e980a8629fa352dc7a95e4cc4e4
| 6,470
|
py
|
Python
|
cv2videoFilePINLocations.py
|
cliffeby/duckpin1
|
de00ec2e675ca073edcbad58865300a2a957f04b
|
[
"MIT"
] | null | null | null |
cv2videoFilePINLocations.py
|
cliffeby/duckpin1
|
de00ec2e675ca073edcbad58865300a2a957f04b
|
[
"MIT"
] | 1
|
2018-04-23T21:35:32.000Z
|
2018-10-04T03:15:00.000Z
|
cv2videoFilePINLocations.py
|
cliffeby/Duckpin2
|
9b1b0891e898625373409f7b4b7d4e058184c45e
|
[
"MIT"
] | null | null | null |
# import the necessary packages
import time
import cv2
import numpy
from matplotlib import pyplot as plt
def show_color_histogram(image):
# for i, col in enumerate(['b', 'g', 'r']):
# draw_image_histogram(image, [i], color=col)
# plt.show()
draw_image_histogram(image, [0])
def with_open_cv(image):
img_new = image[:,:,[0,2]] = 0
cv2.imshow('result.jpg',img_new)
def draw_image_histogram(image, channels):
hist = cv2.calcHist([image], channels, None, [3], [10, 250])
# plt.plot(hist, color=color)
# plt.xlim([0, 10])
print('Green', hist, frameNo)
def isFrameNext(current, previous):
if current-previous == 1:
return True
else:
return False
def ballOrReset( frame_img,center):
global PBFN, PBFD,mask
if PBFN > 6:
return
if PBFN == 0:
PBFD = []
mask = frame_img
if PBFN < 7:
PBFD.append(center) #append data
PBFN = PBFN+1
return
def writeImageSeries(frameNoStart, numberOfFrames):
if frameNoStart <= frameNo:
if frameNo <= frameNoStart+numberOfFrames:
print ('Saving ../videos/video3dFrame'+ str(frameNo) +'.jpg')
cv2.imwrite('../videos/video3dFrame'+ str(frameNo) +'.jpg',img_rgb)
def isPinSetter():
global setterPresent
global frameNo
global img_rgb
global firstSetterFrame
# Convert BGR to HSV
frame = img_rgb[150:450, 650:1600]
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of green color in HSV
lower_green = numpy.array([65,60,60])
upper_green = numpy.array([80,255,255])
# Threshold the HSV image to get only green colors
mask = cv2.inRange(hsv, lower_green, upper_green)
res = cv2.bitwise_and(frame,frame, mask=mask)
_,thrshed = cv2.threshold(cv2.cvtColor(res,cv2.COLOR_BGR2GRAY),3,255,cv2.THRESH_BINARY)
_,contours,_ = cv2.findContours(thrshed,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
setterPresent = False
area = 0
for cnt in contours:
#Contour area is taken
area = cv2.contourArea(cnt) +area
if area >1000:
setterPresent = True
firstSetterFrame = frameNo
if setterPresent:
print("Green", area, frameNo)
else:
firstSetterFrame = 0
cv2.imshow('frame',frame)
# initialize the camera and grab a reference to the raw camera capture
# cap = cv2.VideoCapture('../feb28/bffl28.h264')
cap = cv2.VideoCapture('../videos/video3d.h264')
setterPresent = False
PBFN = 0
PBFD= []
newSeries = True
x=-0
x1=0 +x
y=-0
y1=0 + y
lower_red = numpy.array([0,0,100]) # lower_red = np.array([0,100,0])
upper_red = numpy.array([110, 110, 255]) # upper_red = np.array([180,255,255])
crop_ranges = ([1100,1700, 220,2800],[0,0,0,0])
pts = []
kernel = numpy.ones((5,5), numpy.uint8)
frameNo = 0
prevFrame = 0
ballCounter = [0]*3
origCounter = 0
for i in range(0,1):
a =(int(crop_ranges[i][2]/2)+x,int(crop_ranges[i][0]/2)+y)
b = (int(crop_ranges[i][3]/2)+x1, int(crop_ranges[i][1]/2)+y1)
ret, frame1 = cap.read()
# frame1= frame1[1100:220, 1700:2850]
mask= frame1[550:850, 250:1600]
frame1 = mask
while(cap.isOpened()):
ret, frame2 = cap.read()
frameNo = frameNo +1
img_rgb = frame2
if setterPresent:
if firstSetterFrame + 34 > frameNo:
continue
isPinSetter()
frame2= frame2[550:850, 250:1600]
hist = cv2.calcHist(frame2,[1],None,[4], [10,50])
img_gray1 = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
img_gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
diff = cv2.absdiff(img_gray1,img_gray2)
ret, thresh = cv2.threshold(diff, 100,200,cv2.THRESH_BINARY)
frame = thresh
# thresh = cv2.erode(thresh, kernel, iterations=1)
# thresh = cv2.dilate(thresh, kernel, iterations=1)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
# if len(cnts) > 3:
# continue
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
ballCounter[0]=0
ballCounter[1]=0
ballCounter[2]=0
# only proceed if the radius meets a minimum size
if radius > 30:
if radius < 120 :
# draw the circle and centroid on the frame,
# then update the list of tracked points
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
cv2.circle(img_gray2, center, int(radius), (0, 0, 255), -1)
if center < (1100,200):
# if newSeries==True:
# prevFrame = frameNo-1
# newSeries = False
# PBFN = 0
# if isFrameNext(frameNo, prevFrame):
# prevFrame = frameNo
# ballOrReset(frame1,center)
print('CENTER',center, radius, frameNo, len(cnts))
cv2.imwrite('P:videos/cv2Img'+str(frameNo)+'.jpg',img_gray2)
# else:
print('Final',frameNo, PBFD)
newSeries = True
# update the points queue
# pts.append(center)
# # loop over the set of tracked points
# for i in range(1, len(pts)):
# # if either of the tracked points are None, ignore
# # them
# if pts[i - 1] is None or pts[i] is None:
# continue
# # otherwise, compute the thickness of the line and
# # draw the connecting lines
# # thickness = int(numpy.sqrt(args["buffer"] / float(i + 1)) * 2.5)
# cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), 1)
# # if pts[i][0] < 1100:
# # print('PTS',pts[i])
# # show the frame to our screen
cv2.imshow("Frame", img_rgb)
cv2.rectangle(img_rgb,b, a, 255,2)
# cv2.imshow("Frame1/Mask", frame1)
# cv2.imshow('BO.jpg', img_gray1)
cv2.imshow('Diff', thresh)
# writeImageSeries(135,20)
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
# frame.truncate(0)
# mask = frame1
# frame1=frame2
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
| 31.871921
| 91
| 0.594281
|
dee34a1d84b2d274f1d9bff473634b18a2744a60
| 406
|
py
|
Python
|
sriracha/view.py
|
pcranaway/sriracha
|
cba5703e05cf8bee1b83d2ff163400f448e9e88a
|
[
"MIT"
] | null | null | null |
sriracha/view.py
|
pcranaway/sriracha
|
cba5703e05cf8bee1b83d2ff163400f448e9e88a
|
[
"MIT"
] | null | null | null |
sriracha/view.py
|
pcranaway/sriracha
|
cba5703e05cf8bee1b83d2ff163400f448e9e88a
|
[
"MIT"
] | null | null | null |
from liquid import Template, Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader('views/'))
cache = {}
def render_view(name, model):
"""
renders a view with a model using liquid i think
"""
# cache the template if it's not already cached
if not name in cache:
cache[name] = env.get_template('{}.html'.format(name))
return cache[name].render(**model)
| 25.375
| 62
| 0.679803
|
87534ffd5899dfed0f9bc7722baba126ada11938
| 6,009
|
py
|
Python
|
AutomatedTesting/Gem/PythonTests/physics/C13508019_Terrain_TerrainTexturePainterWorks.py
|
cypherdotXd/o3de
|
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
|
[
"Apache-2.0",
"MIT"
] | 1
|
2022-03-12T14:13:45.000Z
|
2022-03-12T14:13:45.000Z
|
AutomatedTesting/Gem/PythonTests/physics/C13508019_Terrain_TerrainTexturePainterWorks.py
|
cypherdotXd/o3de
|
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
|
[
"Apache-2.0",
"MIT"
] | 2
|
2022-01-13T04:29:38.000Z
|
2022-03-12T01:05:31.000Z
|
AutomatedTesting/Gem/PythonTests/physics/C13508019_Terrain_TerrainTexturePainterWorks.py
|
cypherdotXd/o3de
|
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
# Test case ID : C13508019
# Test Case Title : Verify terrain materials are updated after using terrain texture layer painter.
# fmt: off
class Tests:
enter_game_mode = ("Entered game mode", "Failed to enter game mode")
find_box_bounce = ("Entity Bounce Box found", "Bounce Box not found")
find_box_no_bounce = ("Entity No Bounce Box found", "No Bounce Box not found")
find_terrain = ("Terrain found", "Terrain not found")
entities_actions_finished = ("Entity actions completed", "Entity actions not completed")
nonbouncy_box_not_bounced = ("Non-Bouncy Box didn't bounce", "Non-Bouncy Box did bounce")
bouncy_box_not_bounced = ("Bouncy Box did bounce", "Bouncy Box didn't bounce")
exit_game_mode = ("Exited game mode", "Couldn't exit game mode")
# fmt: on
def C13508019_Terrain_TerrainTexturePainterWorks():
# run() will open a a level and validate terrain material are updated after using terrain texture painter
# It does this by:
# 1) Opens level with a terrain that has been painted by two different physical materials with a box above each
# 2) Enters Game mode
# 3) Finds the entities in the scene
# 5) Listens for boxes colliding with terrain
# 7) Listens for boxes to exit(bounce) the collision or not (not bounce)
# 8) Validate the results (The box above the blue terrain does not bounce and the box above the red terrain does)
# 9) Exits game mode and editor
# Expected Result: Both the boxes will be affected by the physical material on the terrain. One box will
# bounce and the other wont.
# Setup path
import os, sys
import ImportPathHelper as imports
imports.init()
from editor_python_test_tools.utils import Report
from editor_python_test_tools.utils import TestHelper as helper
import azlmbr.legacy.general as general
import azlmbr.bus
TIME_OUT = 5.0 # Max time to complete test
class Box: # Holds box object attributes
def __init__(self, boxID):
self.box_id = boxID
self.collided_with_terrain = False
self.bounced = False
helper.init_idle()
# 1) Open level
helper.open_level("Physics", "C13508019_Terrain_TerrainTexturePainterWorks")
# 2) Enter game mode
helper.enter_game_mode(Tests.enter_game_mode)
# Creat box objects and set id's
# Box that is above terrain that will bounce
bounce_box = Box(general.find_game_entity("Box_Yes_Bounce"))
Report.result(Tests.find_box_bounce, bounce_box.box_id.IsValid())
# Box that is above terrain that will not bounce
no_bounce_box = Box(general.find_game_entity("Box_No_Bounce"))
Report.result(Tests.find_box_no_bounce, no_bounce_box.box_id.IsValid())
terrain_id = general.find_game_entity("Terrain")
Report.result(Tests.find_terrain, terrain_id.IsValid())
# Listen for a collision to begin
def on_collision_begin(args):
other_id = args[0]
if other_id.Equal(no_bounce_box.box_id) and no_bounce_box.collided_with_terrain is False:
Report.info("Non-Bouncy box collided with terrain ")
no_bounce_box.collided_with_terrain = True
if other_id.Equal(bounce_box.box_id) and bounce_box.collided_with_terrain is False:
Report.info("Bouncy box collided with terrain ")
bounce_box.collided_with_terrain = True
# Listen for a collision to end
def on_collision_end(args):
other_id = args[0]
if other_id.Equal(no_bounce_box.box_id):
no_bounce_box.bounced = True
if other_id.Equal(bounce_box.box_id):
bounce_box.bounced = True
handler = azlmbr.physics.CollisionNotificationBusHandler()
handler.connect(terrain_id)
handler.add_callback("OnCollisionBegin", on_collision_begin)
handler.add_callback("OnCollisionEnd", on_collision_end)
# The test_entities_actions_completed function below returns a boolean of if the following actions happened:
# Did the bounce box hit the terrain
# Did the no bounce box hit the terrain
# Did the bounce box bounce
# Did the no bounce box not bounce
def test_entities_actions_completed():
return (
bounce_box.collided_with_terrain
and no_bounce_box.collided_with_terrain
and bounce_box.bounced
and not no_bounce_box.bounced # not here because the no bounce box should not have bounced
)
entities_actions_finished = helper.wait_for_condition(test_entities_actions_completed, TIME_OUT)
# Report is the entities in level finished their actions
Report.result(Tests.entities_actions_finished, entities_actions_finished)
# Report Info
Report.info("Bouncy Box hit terrain: Expected = True Actual = {}".format(bounce_box.collided_with_terrain))
Report.info(
"Non-Bouncy Box hit terrain: Expected = True Actual = {}".format(no_bounce_box.collided_with_terrain)
)
Report.info("Did Bouncy Box bounce: Expected = True Actual = {}".format(bounce_box.bounced))
Report.info("Did Non-Bounce Box bounce: Expected = False Actual = {}".format(no_bounce_box.bounced))
# Check if the above test completed.
if entities_actions_finished:
Report.result(Tests.bouncy_box_not_bounced, bounce_box.bounced)
Report.result(Tests.nonbouncy_box_not_bounced, not no_bounce_box.bounced)
# 4) Exit game mode
helper.exit_game_mode(Tests.exit_game_mode)
if __name__ == "__main__":
import ImportPathHelper as imports
imports.init()
from editor_python_test_tools.utils import Report
Report.start_test(C13508019_Terrain_TerrainTexturePainterWorks)
| 41.441379
| 119
| 0.707605
|
34a7ce21a9ddc9f74b4889af1b2a4fbceafacacf
| 11,675
|
py
|
Python
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/global_/afi_safis/afi_safi/graceful_restart/state/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 64
|
2016-10-20T15:47:18.000Z
|
2021-11-11T11:57:32.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/global_/afi_safis/afi_safi/graceful_restart/state/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 126
|
2016-10-05T10:36:14.000Z
|
2019-05-15T08:43:23.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/global_/afi_safis/afi_safi/graceful_restart/state/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 63
|
2016-11-07T15:23:08.000Z
|
2021-09-22T14:41:16.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/graceful-restart/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information for BGP graceful-restart
"""
__slots__ = ("_path_helper", "_extmethods", "__enabled")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"graceful-restart",
"state",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/graceful_restart/state/enabled (boolean)
YANG Description: This leaf indicates whether graceful-restart is enabled for
this AFI-SAFI
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/graceful_restart/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: This leaf indicates whether graceful-restart is enabled for
this AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
enabled = __builtin__.property(_get_enabled)
_pyangbind_elements = OrderedDict([("enabled", enabled)])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/graceful-restart/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information for BGP graceful-restart
"""
__slots__ = ("_path_helper", "_extmethods", "__enabled")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"graceful-restart",
"state",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/graceful_restart/state/enabled (boolean)
YANG Description: This leaf indicates whether graceful-restart is enabled for
this AFI-SAFI
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/graceful_restart/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: This leaf indicates whether graceful-restart is enabled for
this AFI-SAFI
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
enabled = __builtin__.property(_get_enabled)
_pyangbind_elements = OrderedDict([("enabled", enabled)])
| 37.300319
| 370
| 0.596317
|
936578b67fcdc0a22e99a31bcec678a0e3661ebb
| 3,502
|
py
|
Python
|
wfirst_cgi/models_phaseb/python/wfirst_phaseb_proper/examples/run_hlc_input_fields.py
|
PhillipKP/proper-models
|
5a859a2c65113c58d52488b43d9f8db6691b536b
|
[
"Apache-2.0"
] | 3
|
2019-05-22T22:23:45.000Z
|
2022-03-26T02:26:14.000Z
|
wfirst_cgi/models_phaseb/python/wfirst_phaseb_proper/examples/run_hlc_input_fields.py
|
PhillipKP/proper-models
|
5a859a2c65113c58d52488b43d9f8db6691b536b
|
[
"Apache-2.0"
] | null | null | null |
wfirst_cgi/models_phaseb/python/wfirst_phaseb_proper/examples/run_hlc_input_fields.py
|
PhillipKP/proper-models
|
5a859a2c65113c58d52488b43d9f8db6691b536b
|
[
"Apache-2.0"
] | 4
|
2020-06-18T21:15:28.000Z
|
2021-09-30T08:19:58.000Z
|
# Copyright 2019 California Institute of Technology
# ------------------------------------------------------------------
import proper
import numpy as np
import matplotlib.pylab as plt
from matplotlib.colors import LogNorm
from matplotlib.patches import Circle
import wfirst_phaseb_proper
from wfirst_phaseb_proper import trim
import time
def run_hlc_input_fields():
nlam = 7
lam0 = 0.575
bandwidth = 0.1
minlam = lam0 * (1 - bandwidth/2)
maxlam = lam0 * (1 + bandwidth/2)
lam_array = np.linspace( minlam, maxlam, nlam )
n = 256 # output image dimension
final_sampling = 0.1 # output sampling in lam0/D
# compute coronagraphic field with full prescription
print( 'Computing field with full prescription' )
t1 = time.time()
(fields, sampling) = proper.prop_run_multi('wfirst_phaseb', lam_array, n, QUIET=True, \
PASSVALUE={'use_errors':1,'polaxis':-2,'zindex':[4],'zval_m':[0.19e-9],'final_sampling_lam0':final_sampling} )
t2 = time.time()
print( 'Time (sec) for full prescription = ' + str(t2-t1) )
images = np.abs(fields)**2
image_full = np.sum( images, 0 ) / nlam
# write FPM exit pupil fields with full prescription
print( 'Writing FPM exit pupil fields using full prescription' )
(fields, sampling) = proper.prop_run_multi('wfirst_phaseb', lam_array, n, QUIET=True, \
PASSVALUE={'use_errors':1,'polaxis':-2,'zindex':[4],'zval_m':[0.19e-9],'use_hlc_dm_patterns':0,'use_fpm':0,'end_at_fpm_exit_pupil':1, \
'output_field_rootname':'hlc_input_field'} )
# compute coronagraphic field with compact prescription and input fields
print( 'Computing field with compact prescription and pre-computed input fields' )
t1 = time.time()
(fields, sampling) = proper.prop_run_multi('wfirst_phaseb_compact', lam_array, n, QUIET=True, \
PASSVALUE={'input_field_rootname':'hlc_input_field','polaxis':-2,'final_sampling_lam0':final_sampling} )
t2 = time.time()
print( 'Time (sec) for compact prescription with input fields = ' + str(t2-t1) )
images = np.abs(fields)**2
image = np.sum( images, 0 ) / nlam
# move source to 7 lam/D
print( 'Computing offset source to compute NI' )
(fields, sampling) = proper.prop_run_multi('wfirst_phaseb_compact', lam_array, n, QUIET=True, \
PASSVALUE={'source_x_offset':7.0,'final_sampling_lam0':final_sampling} )
psfs = np.abs(fields)**2
psf = np.sum( psfs, 0 ) / nlam
max_psf = np.max(psf)
ni_full = image_full / max_psf
ni = image / max_psf
fig, ax = plt.subplots( nrows=1, ncols=2, figsize=(9,4) )
im = ax[0].imshow(ni_full, norm=LogNorm(vmin=1e-7,vmax=1e-2), cmap=plt.get_cmap('jet'))
circ = Circle((n/2,n/2),3/final_sampling,edgecolor='white', facecolor='none')
ax[0].add_patch(circ)
circ = Circle((n/2,n/2),9/final_sampling,edgecolor='white', facecolor='none')
ax[0].add_patch(circ)
ax[0].set_title('Full prescription')
fig.colorbar( im, ax=ax[0], shrink=0.5 )
im = ax[1].imshow(ni, norm=LogNorm(vmin=1e-7,vmax=1e-2), cmap=plt.get_cmap('jet'))
circ = Circle((n/2,n/2),3/final_sampling,edgecolor='white', facecolor='none')
ax[1].add_patch(circ)
circ = Circle((n/2,n/2),9/final_sampling,edgecolor='white', facecolor='none')
ax[1].add_patch(circ)
ax[1].set_title('Compact prescription')
fig.colorbar( im, ax=ax[1], shrink=0.5 )
plt.show()
if __name__ == '__main__':
run_hlc_input_fields()
| 39.795455
| 143
| 0.661051
|
41674c18e362d94785a7c0d0f013705b4676bcdd
| 2,753
|
py
|
Python
|
src/core/toga/widgets/scrollcontainer.py
|
d3r3kk/toga
|
2d8c0eb30371c4ef4f0610251233569e9c618e93
|
[
"BSD-3-Clause"
] | 2
|
2019-02-19T17:19:24.000Z
|
2020-04-13T21:22:24.000Z
|
src/core/toga/widgets/scrollcontainer.py
|
d3r3kk/toga
|
2d8c0eb30371c4ef4f0610251233569e9c618e93
|
[
"BSD-3-Clause"
] | 2
|
2019-10-26T20:54:06.000Z
|
2019-10-26T21:43:43.000Z
|
src/core/toga/widgets/scrollcontainer.py
|
d3r3kk/toga
|
2d8c0eb30371c4ef4f0610251233569e9c618e93
|
[
"BSD-3-Clause"
] | 4
|
2019-02-13T17:54:15.000Z
|
2019-10-26T21:16:27.000Z
|
from .base import Widget
class ScrollContainer(Widget):
""" Instantiate a new instance of the scrollable container widget.
Args:
id (str): An identifier for this widget.
style (:obj:`Style`): An optional style object.
If no style is provided then a new one will be created for the widget.
horizontal (bool): If True enable horizontal scroll bar.
vertical (bool): If True enable vertical scroll bar.
content (:class:`toga.Widget`): The content of the scroll window.
factory (:module:): A provided factory module will be used to create the
implementation of the ScrollContainer.
"""
MIN_WIDTH = 100
MIN_HEIGHT = 100
def __init__(self, id=None, style=None, horizontal=True,
vertical=True, content=None, factory=None):
super().__init__(id=id, style=style, factory=factory)
self._content = None
# Create a platform specific implementation of a Scroll Container
self._impl = self.factory.ScrollContainer(interface=self)
# Set all attributes
self.horizontal = horizontal
self.vertical = vertical
self.content = content
@property
def content(self):
""" Content of the scroll container.
Returns:
The content of the widget (:class:`toga.Widget`).
"""
return self._content
@content.setter
def content(self, widget):
if widget:
widget.app = self.app
widget.window = self.window
self._content = widget
self._impl.set_content(widget._impl)
self._impl.rehint()
widget.refresh()
def refresh(self):
"""Refresh the layout and appearance of this widget."""
super().refresh()
# If the scroll container has content, refresh that layout too.
if self.content:
self.content.refresh()
@property
def vertical(self):
""" Shows whether vertical scrolling is enabled.
Returns:
(bool) True if enabled, False if disabled.
"""
return self._vertical
@vertical.setter
def vertical(self, value):
self._vertical = value
self._impl.set_vertical(value)
@property
def horizontal(self):
""" Shows whether horizontal scrolling is enabled.
Returns:
(bool) True if enabled, False if disabled.
"""
return self._horizontal
@horizontal.setter
def horizontal(self, value):
self._horizontal = value
self._impl.set_horizontal(value)
def refresh_sublayouts(self):
"""Refresh the layout and appearance of this widget."""
self._content.refresh()
| 29.287234
| 82
| 0.615692
|
e108872f01048fdcff09766c68c8a113ef82c671
| 2,384
|
py
|
Python
|
src/sdk/pynni/tests/test_assessor.py
|
PeterouZh/nni
|
0a6c234a2ebb2c368d9bbfe2685e14ad12afc6ff
|
[
"MIT"
] | 52
|
2020-08-17T03:59:32.000Z
|
2022-01-25T20:48:00.000Z
|
src/sdk/pynni/tests/test_assessor.py
|
PeterouZh/nni
|
0a6c234a2ebb2c368d9bbfe2685e14ad12afc6ff
|
[
"MIT"
] | 21
|
2020-11-13T19:01:01.000Z
|
2022-02-27T09:12:51.000Z
|
src/sdk/pynni/tests/test_assessor.py
|
PeterouZh/nni
|
0a6c234a2ebb2c368d9bbfe2685e14ad12afc6ff
|
[
"MIT"
] | 22
|
2020-08-25T03:31:44.000Z
|
2022-01-05T03:47:34.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import nni.protocol
from nni.protocol import CommandType, send, receive
from nni.assessor import Assessor, AssessResult
from nni.msg_dispatcher import MsgDispatcher
from io import BytesIO
import json
from unittest import TestCase, main
_trials = []
_end_trials = []
class NaiveAssessor(Assessor):
def assess_trial(self, trial_job_id, trial_history):
_trials.append(trial_job_id)
if sum(trial_history) % 2 == 0:
return AssessResult.Good
else:
return AssessResult.Bad
def trial_end(self, trial_job_id, success):
_end_trials.append((trial_job_id, success))
_in_buf = BytesIO()
_out_buf = BytesIO()
def _reverse_io():
_in_buf.seek(0)
_out_buf.seek(0)
nni.protocol._out_file = _in_buf
nni.protocol._in_file = _out_buf
def _restore_io():
_in_buf.seek(0)
_out_buf.seek(0)
nni.protocol._in_file = _in_buf
nni.protocol._out_file = _out_buf
class AssessorTestCase(TestCase):
def test_assessor(self):
_reverse_io()
send(CommandType.ReportMetricData, '{"trial_job_id":"A","type":"PERIODICAL","sequence":0,"value":"2"}')
send(CommandType.ReportMetricData, '{"trial_job_id":"B","type":"PERIODICAL","sequence":0,"value":"2"}')
send(CommandType.ReportMetricData, '{"trial_job_id":"A","type":"PERIODICAL","sequence":1,"value":"3"}')
send(CommandType.TrialEnd, '{"trial_job_id":"A","event":"SYS_CANCELED"}')
send(CommandType.TrialEnd, '{"trial_job_id":"B","event":"SUCCEEDED"}')
send(CommandType.NewTrialJob, 'null')
_restore_io()
assessor = NaiveAssessor()
dispatcher = MsgDispatcher(None, assessor)
nni.msg_dispatcher_base._worker_fast_exit_on_terminate = False
dispatcher.run()
e = dispatcher.worker_exceptions[0]
self.assertIs(type(e), AssertionError)
self.assertEqual(e.args[0], 'Unsupported command: CommandType.NewTrialJob')
self.assertEqual(_trials, ['A', 'B', 'A'])
self.assertEqual(_end_trials, [('A', False), ('B', True)])
_reverse_io()
command, data = receive()
self.assertIs(command, CommandType.KillTrialJob)
self.assertEqual(data, '"A"')
self.assertEqual(len(_out_buf.read()), 0)
if __name__ == '__main__':
main()
| 30.177215
| 111
| 0.670302
|
3cf9cd2d5d3c4abaefb20029c37971b213c909e2
| 7,672
|
py
|
Python
|
python/cuml/test/test_dbscan.py
|
teju85/cuml
|
91ddc9eb557d7dc948e8394755890ee4a3102efd
|
[
"Apache-2.0"
] | 1
|
2021-04-06T14:24:25.000Z
|
2021-04-06T14:24:25.000Z
|
python/cuml/test/test_dbscan.py
|
teju85/cuml
|
91ddc9eb557d7dc948e8394755890ee4a3102efd
|
[
"Apache-2.0"
] | 1
|
2020-03-05T02:25:50.000Z
|
2020-03-05T02:25:50.000Z
|
python/cuml/test/test_dbscan.py
|
teju85/cuml
|
91ddc9eb557d7dc948e8394755890ee4a3102efd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
from cuml.test.utils import get_handle
from cuml import DBSCAN as cuDBSCAN
from cuml.test.utils import get_pattern, unit_param, \
quality_param, stress_param
from sklearn.cluster import DBSCAN as skDBSCAN
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import adjusted_rand_score
dataset_names = ['noisy_moons', 'varied', 'aniso', 'blobs',
'noisy_circles', 'no_structure']
@pytest.mark.parametrize('max_mbytes_per_batch', [1e9, 5e9])
@pytest.mark.parametrize('datatype', [np.float32, np.float64])
@pytest.mark.parametrize('use_handle', [True, False])
@pytest.mark.parametrize('nrows', [unit_param(500), quality_param(5000),
stress_param(500000)])
@pytest.mark.parametrize('ncols', [unit_param(20), quality_param(100),
stress_param(1000)])
@pytest.mark.parametrize('out_dtype', [unit_param("int32"),
unit_param(np.int32),
unit_param("int64"),
unit_param(np.int64),
quality_param("int32"),
stress_param("int32")])
def test_dbscan(datatype, use_handle, nrows, ncols,
max_mbytes_per_batch, out_dtype):
n_samples = nrows
n_feats = ncols
X, y = make_blobs(n_samples=n_samples, cluster_std=0.01,
n_features=n_feats, random_state=0)
handle, stream = get_handle(use_handle)
cudbscan = cuDBSCAN(handle=handle, eps=1, min_samples=2,
max_mbytes_per_batch=max_mbytes_per_batch,
output_type='numpy')
cu_labels = cudbscan.fit_predict(X, out_dtype=out_dtype)
if nrows < 500000:
skdbscan = skDBSCAN(eps=1, min_samples=2, algorithm="brute")
sk_labels = skdbscan.fit_predict(X)
score = adjusted_rand_score(cu_labels, sk_labels)
assert score == 1
if out_dtype == "int32" or out_dtype == np.int32:
assert cu_labels.dtype == np.int32
elif out_dtype == "int64" or out_dtype == np.int64:
assert cu_labels.dtype == np.int64
@pytest.mark.parametrize("name", [
'noisy_moons',
'blobs',
'no_structure'])
@pytest.mark.parametrize('nrows', [unit_param(500), quality_param(5000),
stress_param(500000)])
def test_dbscan_sklearn_comparison(name, nrows):
default_base = {'quantile': .3,
'eps': .5,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 2}
n_samples = nrows
pat = get_pattern(name, n_samples)
params = default_base.copy()
params.update(pat[1])
X, y = pat[0]
X = StandardScaler().fit_transform(X)
cuml_dbscan = cuDBSCAN(eps=params['eps'], min_samples=5,
output_type='numpy')
cu_y_pred = cuml_dbscan.fit_predict(X)
if nrows < 500000:
dbscan = skDBSCAN(eps=params['eps'], min_samples=5)
sk_y_pred = dbscan.fit_predict(X)
score = adjusted_rand_score(sk_y_pred, cu_y_pred)
assert(score == 1.0)
@pytest.mark.parametrize("name", [
'noisy_moons',
'blobs',
'no_structure'])
def test_dbscan_default(name):
default_base = {'quantile': .3,
'eps': .5,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 2}
n_samples = 500
pat = get_pattern(name, n_samples)
params = default_base.copy()
params.update(pat[1])
X, y = pat[0]
X = StandardScaler().fit_transform(X)
cuml_dbscan = cuDBSCAN(output_type='numpy')
cu_y_pred = cuml_dbscan.fit_predict(X)
dbscan = skDBSCAN(eps=params['eps'], min_samples=5)
sk_y_pred = dbscan.fit_predict(X)
score = adjusted_rand_score(sk_y_pred, cu_y_pred)
assert(score == 1.0)
@pytest.mark.xfail(strict=True, raises=ValueError)
def test_dbscan_out_dtype_fails_invalid_input():
X, _ = make_blobs(n_samples=500)
cudbscan = cuDBSCAN(output_type='numpy')
cudbscan.fit_predict(X, out_dtype="bad_input")
def test_core_point_prop1():
params = {'eps': 1.1, 'min_samples': 4}
# The input looks like a latin cross or a star with a chain:
# .
# . . . . .
# .
# There is 1 core-point (intersection of the bars)
# and the two points to the very right are not reachable from it
# So there should be one cluster (the plus/star on the left)
# and two noise points
X = np.array([
[0, 0],
[1, 0],
[1, 1],
[1, -1],
[2, 0],
[3, 0],
[4, 0]
], dtype=np.float32)
cudbscan = cuDBSCAN(**params)
cu_y_pred = cudbscan.fit_predict(X)
dbscan = skDBSCAN(**params)
sk_y_pred = dbscan.fit_predict(X)
score = adjusted_rand_score(sk_y_pred, cu_y_pred)
assert(score == 1.0)
def test_core_point_prop2():
params = {'eps': 1.1, 'min_samples': 4}
# The input looks like a long two-barred (orhodox) cross or
# two stars next to each other:
# . .
# . . . . . .
# . .
# There are 2 core-points but they are not reachable from each other
# So there should be two clusters, both in the form of a plus/star
X = np.array([
[0, 0],
[1, 0],
[1, 1],
[1, -1],
[2, 0],
[3, 0],
[4, 0],
[4, 1],
[4, -1],
[5, 0]
], dtype=np.float32)
cudbscan = cuDBSCAN(**params)
cu_y_pred = cudbscan.fit_predict(X)
dbscan = skDBSCAN(**params)
sk_y_pred = dbscan.fit_predict(X)
score = adjusted_rand_score(sk_y_pred, cu_y_pred)
assert(score == 1.0)
def test_core_point_prop3():
params = {'eps': 1.1, 'min_samples': 4}
# The input looks like a two-barred (orhodox) cross or
# two stars sharing a link:
# . .
# . . . . .
# . .
# There are 2 core-points but they are not reachable from each other
# So there should be two clusters.
# However, the link that is shared between the stars
# actually has an ambiguous label (to the best of my knowledge)
# as it will depend on the order in which we process the core-points.
# So we exclude that point from the comparison with sklearn
X = np.array([
[0, 0],
[1, 0],
[1, 1],
[1, -1],
[3, 0],
[4, 0],
[4, 1],
[4, -1],
[5, 0],
[2, 0]
], dtype=np.float32)
cudbscan = cuDBSCAN(**params)
cu_y_pred = cudbscan.fit_predict(X)
dbscan = skDBSCAN(**params)
sk_y_pred = dbscan.fit_predict(X)
score = adjusted_rand_score(sk_y_pred[:-1], cu_y_pred[:-1])
assert(score == 1.0)
| 31.834025
| 74
| 0.586288
|
6d9e824779307496f0f4b69ab3f1a4d70b4d8692
| 3,584
|
py
|
Python
|
examples/play_stream.py
|
richardbarlow/python-sounddevice
|
afe0a822b5591c2e3ae7fe9f1517d7342e048c46
|
[
"MIT"
] | null | null | null |
examples/play_stream.py
|
richardbarlow/python-sounddevice
|
afe0a822b5591c2e3ae7fe9f1517d7342e048c46
|
[
"MIT"
] | null | null | null |
examples/play_stream.py
|
richardbarlow/python-sounddevice
|
afe0a822b5591c2e3ae7fe9f1517d7342e048c46
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Play a web stream.
ffmpeg-python (https://github.com/kkroening/ffmpeg-python) has to be installed.
If you don't know a stream URL, try http://icecast.spc.org:8000/longplayer
(see https://longplayer.org/ for a description).
"""
import argparse
import queue
import sys
import ffmpeg
import sounddevice as sd
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument(
'url', metavar='URL',
help='stream URL')
parser.add_argument(
'-d', '--device', type=int_or_str,
help='output device (numeric ID or substring)')
parser.add_argument(
'-b', '--blocksize', type=int, default=1024,
help='block size (default: %(default)s)')
parser.add_argument(
'-q', '--buffersize', type=int, default=20,
help='number of blocks used for buffering (default: %(default)s)')
args = parser.parse_args(remaining)
if args.blocksize == 0:
parser.error('blocksize must not be zero')
if args.buffersize < 1:
parser.error('buffersize must be at least 1')
q = queue.Queue(maxsize=args.buffersize)
print('Getting stream information ...')
try:
info = ffmpeg.probe(args.url)
except ffmpeg.Error as e:
sys.stderr.buffer.write(e.stderr)
parser.exit(e)
streams = info.get('streams', [])
if len(streams) != 1:
parser.exit('There must be exactly one stream available')
stream = streams[0]
if stream.get('codec_type') != 'audio':
parser.exit('The stream must be an audio stream')
channels = stream['channels']
samplerate = float(stream['sample_rate'])
def callback(outdata, frames, time, status):
assert frames == args.blocksize
if status.output_underflow:
print('Output underflow: increase blocksize?', file=sys.stderr)
raise sd.CallbackAbort
assert not status
try:
data = q.get_nowait()
except queue.Empty:
print('Buffer is empty: increase buffersize?', file=sys.stderr)
raise sd.CallbackAbort
assert len(data) == len(outdata)
outdata[:] = data
try:
print('Opening stream ...')
process = ffmpeg.input(
args.url
).output(
'pipe:',
format='f32le',
acodec='pcm_f32le',
ac=channels,
ar=samplerate,
loglevel='quiet',
).run_async(pipe_stdout=True)
stream = sd.RawOutputStream(
samplerate=samplerate, blocksize=args.blocksize,
device=args.device, channels=channels, dtype='float32',
callback=callback)
read_size = args.blocksize * channels * stream.samplesize
print('Buffering ...')
for _ in range(args.buffersize):
q.put_nowait(process.stdout.read(read_size))
print('Starting Playback ...')
with stream:
timeout = args.blocksize * args.buffersize / samplerate
while True:
q.put(process.stdout.read(read_size), timeout=timeout)
except KeyboardInterrupt:
parser.exit('\nInterrupted by user')
except queue.Full:
# A timeout occurred, i.e. there was an error in the callback
parser.exit(1)
except Exception as e:
parser.exit(type(e).__name__ + ': ' + str(e))
| 28.444444
| 79
| 0.674944
|
a308adc4b8f6b5aa81fe7b9faaeeeb23d121e9a9
| 862
|
py
|
Python
|
visualize/visual_csv_submit.py
|
lyw615/code_accumulation
|
5b2a5c039a725def37e19f226906ed1087880118
|
[
"MIT"
] | 3
|
2021-11-09T07:15:00.000Z
|
2021-12-11T06:30:47.000Z
|
visualize/visual_csv_submit.py
|
lyw615/code_accumulation
|
5b2a5c039a725def37e19f226906ed1087880118
|
[
"MIT"
] | null | null | null |
visualize/visual_csv_submit.py
|
lyw615/code_accumulation
|
5b2a5c039a725def37e19f226906ed1087880118
|
[
"MIT"
] | null | null | null |
import sys, os
file_path = os.path.abspath(__file__)
sys.path.append(os.path.abspath(os.path.join(file_path, "..", "..")))
from code_aculat.visualize.visual_base import show_two_image
import pandas as pd
import os.path as osp
from PIL import Image, ImageDraw
csv_path = r"/home/lyw/t_ensemble.csv"
with open(csv_path, 'r') as f:
record = f.readlines()
record.pop(0)
image_dir = r"/home/data1/yw/mmdetection/data/water_detection/split_folds/fold_v1/foldv1_yolo/test/tt/images"
for _ in record:
_ = _.strip('\n').split(',')
_[3:] = list(map(int, _[3:]))
show_img = Image.open(osp.join(image_dir, _[1] + ".jpg"))
draw = ImageDraw.Draw(show_img)
draw.rectangle([(_[3], _[4]), (_[5], _[6])], width=3)
import numpy as np
mat_show = np.array(show_img)
print(_[3:])
show_two_image(mat_show, mat_show, _[1])
print("pp")
| 27.806452
| 109
| 0.674014
|
01547ae202864010adda84ec270bfdb9ab5bdf04
| 8,967
|
py
|
Python
|
crfmnes/alg.py
|
bakanaouji/crfmnes
|
e21ae1f084f796feaec61cb1b4b7b3680c755ce7
|
[
"MIT"
] | null | null | null |
crfmnes/alg.py
|
bakanaouji/crfmnes
|
e21ae1f084f796feaec61cb1b4b7b3680c755ce7
|
[
"MIT"
] | null | null | null |
crfmnes/alg.py
|
bakanaouji/crfmnes
|
e21ae1f084f796feaec61cb1b4b7b3680c755ce7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import numpy as np
import copy
# evaluation value of the infeasible solution
INFEASIBLE = np.inf
def get_h_inv(dim):
f = lambda a, b: ((1. + a * a) * math.exp(a * a / 2.) / 0.24) - 10. - dim
f_prime = lambda a: (1. / 0.24) * a * math.exp(a * a / 2.) * (3. + a * a)
h_inv = 1.0
while abs(f(h_inv, dim)) > 1e-10:
h_inv = h_inv - 0.5 * (f(h_inv, dim) / f_prime(h_inv))
return h_inv
def sort_indices_by(evals, z):
lam = evals.size
sorted_indices = np.argsort(evals)
sorted_evals = evals[sorted_indices]
no_of_feasible_solutions = np.where(sorted_evals != INFEASIBLE)[0].size
if no_of_feasible_solutions != lam:
infeasible_z = z[:, np.where(evals == INFEASIBLE)[0]]
distances = np.sum(infeasible_z ** 2, axis=0)
infeasible_indices = sorted_indices[no_of_feasible_solutions:]
indices_sorted_by_distance = np.argsort(distances)
sorted_indices[no_of_feasible_solutions:] = infeasible_indices[indices_sorted_by_distance]
return sorted_indices
class CRFMNES:
def __init__(self, dim, f, m, sigma, lamb, **kwargs):
if 'seed' in kwargs.keys():
np.random.seed(kwargs['seed'])
self.dim = dim
self.f = f
self.m = m
self.sigma = sigma
self.lamb = lamb
self.v = kwargs.get('v', np.random.randn(dim, 1) / np.sqrt(dim))
self.D = np.ones([dim, 1])
self.constraint = kwargs.get('constraint', [[- np.inf, np.inf] for _ in range(dim)])
self.penalty_coef = kwargs.get('penalty_coef', 1e5)
self.use_constraint_violation = kwargs.get('use_constraint_violation', True)
self.w_rank_hat = (np.log(self.lamb / 2 + 1) - np.log(np.arange(1, self.lamb + 1))).reshape(self.lamb, 1)
self.w_rank_hat[np.where(self.w_rank_hat < 0)] = 0
self.w_rank = self.w_rank_hat / sum(self.w_rank_hat) - (1. / self.lamb)
self.mueff = 1 / ((self.w_rank + (1 / self.lamb)).T @ (self.w_rank + (1 / self.lamb)))[0][0]
self.cs = (self.mueff + 2.) / (self.dim + self.mueff + 5.)
self.cc = (4. + self.mueff / self.dim) / (self.dim + 4. + 2. * self.mueff / self.dim)
self.c1_cma = 2. / (math.pow(self.dim + 1.3, 2) + self.mueff)
# initialization
self.chiN = np.sqrt(self.dim) * (1. - 1. / (4. * self.dim) + 1. / (21. * self.dim * self.dim))
self.pc = np.zeros([self.dim, 1])
self.ps = np.zeros([self.dim, 1])
# distance weight parameter
self.h_inv = get_h_inv(self.dim)
self.alpha_dist = lambda lambF: self.h_inv * min(1., math.sqrt(float(self.lamb) / self.dim)) * math.sqrt(
float(lambF) / self.lamb)
self.w_dist_hat = lambda z, lambF: math.exp(self.alpha_dist(lambF) * np.linalg.norm(z))
# learning rate
self.eta_m = 1.0
self.eta_move_sigma = 1.
self.eta_stag_sigma = lambda lambF: math.tanh((0.024 * lambF + 0.7 * self.dim + 20.) / (self.dim + 12.))
self.eta_conv_sigma = lambda lambF: 2. * math.tanh((0.025 * lambF + 0.75 * self.dim + 10.) / (self.dim + 4.))
self.c1 = lambda lambF: self.c1_cma * (self.dim - 5) / 6 * (float(lambF) / self.lamb)
self.eta_B = lambda lambF: np.tanh((min(0.02 * lambF, 3 * np.log(self.dim)) + 5) / (0.23 * self.dim + 25))
self.g = 0
self.no_of_evals = 0
self.idxp = np.arange(self.lamb / 2, dtype=int)
self.idxm = np.arange(self.lamb / 2, self.lamb, dtype=int)
self.z = np.zeros([self.dim, self.lamb])
self.f_best = float('inf')
self.x_best = np.empty(self.dim)
def calc_violations(self, x):
violations = np.zeros(self.lamb)
for i in range(self.lamb):
for j in range(self.dim):
violations[i] += (- min(0, x[j][i] - self.constraint[j][0]) + max(0, x[j][i] - self.constraint[j][1])) * self.penalty_coef
return violations
def optimize(self, iterations):
for _ in range(iterations):
# print("f_best:{}".format(self.f_best))
_ = self.one_iteration()
return self.x_best, self.f_best
def one_iteration(self):
d = self.dim
lamb = self.lamb
zhalf = np.random.randn(d, int(lamb / 2)) # dim x lamb/2
self.z[:, self.idxp] = zhalf
self.z[:, self.idxm] = -zhalf
normv = np.linalg.norm(self.v)
normv2 = normv ** 2
vbar = self.v / normv
y = self.z + (np.sqrt(1 + normv2) - 1) * vbar @ (vbar.T @ self.z)
x = self.m + self.sigma * y * self.D
evals_no_sort = np.array([self.f(np.array(x[:, i].reshape(self.dim, 1))) for i in range(self.lamb)])
xs_no_sort = [x[:, i] for i in range(lamb)]
violations = np.zeros(lamb)
if self.use_constraint_violation:
violations = self.calc_violations(x)
sorted_indices = sort_indices_by(evals_no_sort + violations, self.z)
else:
sorted_indices = sort_indices_by(evals_no_sort, self.z)
best_eval_id = sorted_indices[0]
f_best = evals_no_sort[best_eval_id]
x_best = x[:, best_eval_id]
self.z = self.z[:, sorted_indices]
y = y[:, sorted_indices]
x = x[:, sorted_indices]
self.no_of_evals += self.lamb
self.g += 1
if f_best < self.f_best:
self.f_best = f_best
self.x_best = x_best
# This operation assumes that if the solution is infeasible, infinity comes in as input.
lambF = np.sum(evals_no_sort < np.finfo(float).max)
# evolution path p_sigma
self.ps = (1 - self.cs) * self.ps + np.sqrt(self.cs * (2. - self.cs) * self.mueff) * (self.z @ self.w_rank)
ps_norm = np.linalg.norm(self.ps)
# distance weight
w_tmp = np.array(
[self.w_rank_hat[i] * self.w_dist_hat(np.array(self.z[:, i]), lambF) for i in range(self.lamb)]).reshape(
self.lamb, 1)
weights_dist = w_tmp / sum(w_tmp) - 1. / self.lamb
# switching weights and learning rate
weights = weights_dist if ps_norm >= self.chiN else self.w_rank
eta_sigma = self.eta_move_sigma if ps_norm >= self.chiN else self.eta_stag_sigma(
lambF) if ps_norm >= 0.1 * self.chiN else self.eta_conv_sigma(lambF)
l_c = 1.0 if ps_norm >= self.chiN else 0.0
# update pc, m
wxm = (x - self.m) @ weights
self.pc = (1. - self.cc) * self.pc + np.sqrt(self.cc * (2. - self.cc) * self.mueff) * wxm / self.sigma
self.m += self.eta_m * wxm
# calculate s, t
# step1
normv4 = normv2 ** 2
exY = np.append(y, self.pc / self.D, axis=1) # dim x lamb+1
yy = exY * exY # dim x lamb+1
ip_yvbar = vbar.T @ exY
yvbar = exY * vbar # dim x lamb+1. exYのそれぞれの列にvbarがかかる
gammav = 1. + normv2
vbarbar = vbar * vbar
alphavd = np.min(
[1, np.sqrt(normv4 + (2 * gammav - np.sqrt(gammav)) / np.max(vbarbar)) / (2 + normv2)]) # scalar
t = exY * ip_yvbar - vbar * (ip_yvbar ** 2 + gammav) / 2 # dim x lamb+1
b = -(1 - alphavd ** 2) * normv4 / gammav + 2 * alphavd ** 2
H = np.ones([self.dim, 1]) * 2 - (b + 2 * alphavd ** 2) * vbarbar # dim x 1
invH = H ** (-1)
s_step1 = yy - normv2 / gammav * (yvbar * ip_yvbar) - np.ones([self.dim, self.lamb + 1]) # dim x lamb+1
ip_vbart = vbar.T @ t # 1 x lamb+1
s_step2 = s_step1 - alphavd / gammav * ((2 + normv2) * (t * vbar) - normv2 * vbarbar @ ip_vbart) # dim x lamb+1
invHvbarbar = invH * vbarbar
ip_s_step2invHvbarbar = invHvbarbar.T @ s_step2 # 1 x lamb+1
s = (s_step2 * invH) - b / (
1 + b * vbarbar.T @ invHvbarbar) * invHvbarbar @ ip_s_step2invHvbarbar # dim x lamb+1
ip_svbarbar = vbarbar.T @ s # 1 x lamb+1
t = t - alphavd * ((2 + normv2) * (s * vbar) - vbar @ ip_svbarbar) # dim x lamb+1
# update v, D
exw = np.append(self.eta_B(lambF) * weights, np.array([l_c * self.c1(lambF)]).reshape(1, 1),
axis=0) # lamb+1 x 1
oldv = copy.deepcopy(self.v)
self.v = self.v + (t @ exw) / normv
oldD = copy.deepcopy(self.D)
self.D = self.D + (s @ exw) * self.D
# calculate detAold, detA
nthrootdetAold = np.exp(np.sum(np.log(oldD)) / self.dim + np.log(1 + oldv.T @ oldv) / (2 * self.dim))[0][0]
nthrootdetA = np.exp(np.sum(np.log(self.D)) / self.dim + np.log(1 + self.v.T @ self.v) / (2 * self.dim))[0][0]
# update s, D
G_s = np.sum((self.z * self.z - np.ones([self.dim, self.lamb])) @ weights) / self.dim
l_s = 1.0 if ps_norm >= self.chiN and G_s < 0 else 0.0
self.sigma = self.sigma * np.exp((1 - l_s) * eta_sigma / 2 * G_s) * nthrootdetA / nthrootdetAold
self.D = self.D / nthrootdetA
return xs_no_sort, evals_no_sort, violations
| 46.46114
| 138
| 0.568752
|
ca73a40e19bad146cd257494d537b1a2c30875b0
| 75
|
py
|
Python
|
Glyph Nanny.roboFontExt/lib/glyphNannyToggleObserverVisibility.py
|
miguelsousa/glyph-nanny
|
9115cf2fbeb19043f2f1114bbdf1fc7415d92211
|
[
"MIT"
] | null | null | null |
Glyph Nanny.roboFontExt/lib/glyphNannyToggleObserverVisibility.py
|
miguelsousa/glyph-nanny
|
9115cf2fbeb19043f2f1114bbdf1fc7415d92211
|
[
"MIT"
] | null | null | null |
Glyph Nanny.roboFontExt/lib/glyphNannyToggleObserverVisibility.py
|
miguelsousa/glyph-nanny
|
9115cf2fbeb19043f2f1114bbdf1fc7415d92211
|
[
"MIT"
] | null | null | null |
from glyphNanny import toggleObserverVisibility
toggleObserverVisibility()
| 25
| 47
| 0.906667
|
9a09ac1c2fb8ac46523abde2f593c93e4109c63a
| 1,847
|
py
|
Python
|
mopidy_iris/__init__.py
|
diabl0w/Iris
|
bf6b80bfb0bec0c8f920b7d795abe0743b9ede13
|
[
"Apache-2.0"
] | null | null | null |
mopidy_iris/__init__.py
|
diabl0w/Iris
|
bf6b80bfb0bec0c8f920b7d795abe0743b9ede13
|
[
"Apache-2.0"
] | null | null | null |
mopidy_iris/__init__.py
|
diabl0w/Iris
|
bf6b80bfb0bec0c8f920b7d795abe0743b9ede13
|
[
"Apache-2.0"
] | null | null | null |
import logging
import pathlib
from mopidy import config, ext
__version__ = "3.54.1"
logger = logging.getLogger(__name__)
##
# Core extension class
#
# Loads config and gets the party started. Initiates any additional frontends, etc.
##
class Extension(ext.Extension):
dist_name = "Mopidy-Iris"
ext_name = "iris"
version = __version__
def get_default_config(self):
return config.read(pathlib.Path(__file__).parent / "ext.conf")
def get_config_schema(self):
schema = config.ConfigSchema(self.ext_name)
schema["enabled"] = config.Boolean()
schema["country"] = config.String()
schema["locale"] = config.String()
schema["spotify_authorization_url"] = config.String()
schema["lastfm_authorization_url"] = config.String()
schema["genius_authorization_url"] = config.String()
schema["data_dir"] = config.String() # Deprecated
return schema
def setup(self, registry):
from .frontend import IrisFrontend
# Add web extension
registry.add(
"http:app", {"name": self.ext_name, "factory": iris_factory}
)
# Add our frontend
registry.add("frontend", IrisFrontend)
##
# Frontend factory
##
def iris_factory(config, core):
from tornado.web import StaticFileHandler
from .handlers import HttpHandler, ReactRouterHandler, WebsocketHandler
path = pathlib.Path(__file__).parent / "static"
return [
(r"/http/([^/]*)", HttpHandler, {"core": core, "config": config}),
(r"/ws/?", WebsocketHandler, {"core": core, "config": config}),
(r"/assets/(.*)", StaticFileHandler, {"path": path / "assets"}),
(r"/((.*)(?:css|js|json|map)$)", StaticFileHandler, {"path": path}),
(r"/(.*)", ReactRouterHandler, {"path": path / "index.html"}),
]
| 28.859375
| 83
| 0.63346
|
f6a9acb8e422dab30c389f592770d5a45563366b
| 3,934
|
py
|
Python
|
venv/lib/python3.7/site-packages/convertdate/positivist.py
|
vchiapaikeo/prophet
|
e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/convertdate/positivist.py
|
vchiapaikeo/prophet
|
e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/convertdate/positivist.py
|
vchiapaikeo/prophet
|
e8c250ca7bfffc280baa7dabc80a2c2d1f72c6a7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# This file is part of convertdate.
# http://github.com/fitnr/convertdate
# Licensed under the MIT license:
# http://opensource.org/licenses/MIT
# Copyright (c) 2016, fitnr <fitnr@fakeisthenewreal>
from calendar import isleap
from . import gregorian
from .utils import floor
from .data import positivist as data
'''
Convert between Gregorian/Julian Day and Comte's Positivist calendar.
The Positivist calendar has 13 months and one or two festival days.
Festival days are given as the fourteenth month.
The Gregorian date 1789-01-01 is Positivist 0001-01-01.
'''
# Positivist calendar has 13 28-day months and one festival day
EPOCH = 2374479.5
YEAR_EPOCH = 1789
DAYS_IN_YEAR = 365
MONTHS = (
'Moses', 'Homer', 'Aristotle', 'Archimedes',
'Caesar', 'Saint Paul', 'Charlemagne', 'Dante',
'Gutenberg', 'Shakespeare', 'Descartes', 'Frederic',
'Bichat', ''
)
def legal_date(year, month, day):
'''Checks if a given date is a legal positivist date'''
try:
assert year >= 1
assert 0 < month <= 14
assert 0 < day <= 28
if month == 14:
if isleap(year + YEAR_EPOCH - 1):
assert day <= 2
else:
assert day == 1
except AssertionError:
raise ValueError("Invalid Positivist date: ({}, {}, {})".format(year, month, day))
return True
def to_jd(year, month, day):
'''Convert a Positivist date to Julian day count.'''
legal_date(year, month, day)
gyear = year + YEAR_EPOCH - 1
return (
gregorian.EPOCH - 1 + (365 * (gyear - 1)) +
floor((gyear - 1) / 4) + (-floor((gyear - 1) / 100)) +
floor((gyear - 1) / 400) + (month - 1) * 28 + day
)
def from_jd(jd):
'''Convert a Julian day count to Positivist date.'''
try:
assert jd >= EPOCH
except AssertionError:
raise ValueError('Invalid Julian day')
depoch = floor(jd - 0.5) + 0.5 - gregorian.EPOCH
quadricent = floor(depoch / gregorian.INTERCALATION_CYCLE_DAYS)
dqc = depoch % gregorian.INTERCALATION_CYCLE_DAYS
cent = floor(dqc / gregorian.LEAP_SUPPRESSION_DAYS)
dcent = dqc % gregorian.LEAP_SUPPRESSION_DAYS
quad = floor(dcent / gregorian.LEAP_CYCLE_DAYS)
dquad = dcent % gregorian.LEAP_CYCLE_DAYS
yindex = floor(dquad / gregorian.YEAR_DAYS)
year = (
quadricent * gregorian.INTERCALATION_CYCLE_YEARS +
cent * gregorian.LEAP_SUPPRESSION_YEARS +
quad * gregorian.LEAP_CYCLE_YEARS + yindex
)
if yindex == 4:
yearday = 365
year = year - 1
else:
yearday = (
depoch -
quadricent * gregorian.INTERCALATION_CYCLE_DAYS -
cent * gregorian.LEAP_SUPPRESSION_DAYS -
quad * gregorian.LEAP_CYCLE_DAYS -
yindex * gregorian.YEAR_DAYS
)
month = floor(yearday / 28)
return (year - YEAR_EPOCH + 2, month + 1, int(yearday - (month * 28)) + 1)
def from_gregorian(year, month, day):
return from_jd(gregorian.to_jd(year, month, day))
def to_gregorian(year, month, day):
return gregorian.from_jd(to_jd(year, month, day))
def dayname(year, month, day):
'''
Give the name of the month and day for a given date.
Returns:
tuple month_name, day_name
'''
legal_date(year, month, day)
yearday = (month - 1) * 28 + day
if isleap(year + YEAR_EPOCH - 1):
dname = data.day_names_leap[yearday - 1]
else:
dname = data.day_names[yearday - 1]
return MONTHS[month - 1], dname
def weekday(day):
'''
Gives the weekday (0=Monday) of a positivist month and day.
Note that the festival month does not have a day.
'''
return (day % 7) - 1
def festival(month, day):
'''
Gives the festival day for a month and day.
Returns None if inapplicable.
'''
return data.festivals.get((month, day))
| 26.05298
| 90
| 0.624047
|
1ccc541a4d90272d13a56e006166a53c4a674eac
| 10,728
|
py
|
Python
|
3. Motion Estimation/Assignment_3_ME.py
|
IbrahimEl-Shal/Digital_Video_Processing
|
ce5649dba94ba5c50bc3fe6740d3059a99a6ea8f
|
[
"MIT"
] | 2
|
2021-03-08T01:59:33.000Z
|
2021-03-08T01:59:39.000Z
|
3. Motion Estimation/Assignment_3_ME.py
|
IbrahimEl-Shal/Digital_Video_Processing
|
ce5649dba94ba5c50bc3fe6740d3059a99a6ea8f
|
[
"MIT"
] | null | null | null |
3. Motion Estimation/Assignment_3_ME.py
|
IbrahimEl-Shal/Digital_Video_Processing
|
ce5649dba94ba5c50bc3fe6740d3059a99a6ea8f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 4 00:32:16 2019
@author: Ibrahim El-Shal
Assignment 3:
- Develop a Motion Estimation algorithm using Window Search to encode two images in YUV format.
- Use 8x8 block.
- Reconstruct the images using Motion Compensation.
- Compute PSNR between the Source Image and the Reconstructed Images.
- Compare between the two Algorithms
- Bonus : you may choose more than one matching criteria
- Bonus : you may choose more than these two algorithms
"""
# In[1]: Import Packages
import os
import sys
import cv2
import math
import time
import numpy as np
# In[1-2]:
GRID_SIZE = 8
OVERLAPPED_WIDTH = 10
OVERLAPPED_HEIGHT = 10
# In[2]: Functions of Image
def ReadFrames(FrameNumber):
return(cv2.imread("frames/frame%d.jpg"%FrameNumber))
def RGB2YUV(RGBImage):
return(cv2.cvtColor(RGBImage, cv2.COLOR_BGR2YUV))
def YUV2RGB(YUVImage):
return(cv2.cvtColor(YUVImage,cv2.COLOR_YUV2BGR))
def Split_Channels(img):
return (cv2.split((img)))
def Create_Dir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
def Save_Image(Name,Image):
return(cv2.imwrite(Image, Name))
def Get_PSNR(arr):
mse = (arr ** 2).mean()
psnr = 10 * math.log10((255 ** 2) / mse)
return psnr
def psnr(orignal_picture,compressed_picture):
# peak signal-to-noise ratio
mse =0
#mean squared error
for i in range(len(orignal_picture)):
for j in range(len(orignal_picture[i])):
mse=mse+(orignal_picture[i][j]-compressed_picture[i][j])*(orignal_picture[i][j]-compressed_picture[i][j])
mse=mse/(len(orignal_picture)*len(orignal_picture[i]))
mx_value=0
for lst in orignal_picture:
value=max(lst)
if value > mx_value:
mx_value=value
psnr_=10*math.log( mx_value*mx_value/ mse, 10)
return psnr_
# In[3]: Convert Video Into frames
def Video2Frames(VideoName):
cap = cv2.VideoCapture(VideoName)
Create_Dir('./frames/')
frame_counter = 0
if not cap.isOpened():
print('{} not opened'.format(VideoName))
sys.exit(1)
while(1):
return_flag, frame = cap.read()
if not return_flag:
print('Video Reach End')
break
#Start
cv2.imwrite('./frames/' + 'frame%d.jpg' % frame_counter, frame)
frame_counter += 1
#End
cap.release()
return(1)
# In[4]: Get the needed search area
def Search_Range(w_min, h_min, w_max, h_max, curr_w, curr_h, w_size, h_size):
start_w = curr_w - w_size if curr_w - w_size > w_min else w_min
end_w = curr_w + w_size if curr_w + w_size < w_max else curr_w
start_h = curr_h - h_size if curr_h - h_size > h_min else h_min
end_h = curr_h + h_size if curr_h + h_size < h_max else curr_h
return (start_w, start_h, end_w, end_h)
# In[5]: Get Needed blocked 8x8
def Needed_Blocks(Frame):
img_blocks = []
img_blocks_idx = []
# shape of image
height, width = Frame.shape
for h_idx in range(0, height, GRID_SIZE):
micro_block_per_row = []
micro_block_idx_per_row = []
for w_idx in range(0, width, GRID_SIZE):
micro_block_per_row.append(Frame[h_idx: h_idx + GRID_SIZE, w_idx: w_idx + GRID_SIZE])
micro_block_idx_per_row.append((w_idx, h_idx))
img_blocks_idx.append(micro_block_idx_per_row)
img_blocks.append(micro_block_per_row)
return(img_blocks_idx, img_blocks)
# In[6]:Get the Movtion Vector of each picked Block to comparison with others
def MotionVector(Current_Block, Next_Frame, x_micro, y_micro, search_area):
mv = (0, 0)
min_value = np.inf
start_w, start_h, end_w, end_h = search_area
for y in range(start_h, end_h + 1):
for x in range(start_w, end_w + 1):
# search range
window_block = Next_Frame[y:y + GRID_SIZE, x:x + GRID_SIZE]
value = np.sum(np.abs(Current_Block - window_block))
if value < min_value:
mv = (x - x_micro, y - y_micro)
min_value = value
return(mv)
# In[7]:
def Block_Matching(curr_frame, next_frame):
height, width = curr_frame.shape
block_idx_list, block_list = Needed_Blocks(curr_frame)
frame_motion_vector = [[0 for j in range(len(block_idx_list[0]))] for i in range(len(block_list))]
for h in range(len(block_idx_list)):
for w in range(len(block_list[0])):
# search range
micro_x, micro_y = block_idx_list[h][w]
Grid_Block = block_list[h][w]
search_range = Search_Range(0, 0, width, height, micro_x, micro_y, GRID_SIZE, GRID_SIZE)
frame_motion_vector[h][w] = MotionVector(Grid_Block,next_frame,
micro_x, micro_y, search_range)
return frame_motion_vector
# In[8]:
def TSS_Block_Matching(curr_frame, next_frame):
TSS_GRID_SIZE = GRID_SIZE
height, width = curr_frame.shape
block_idx_list, block_list = Needed_Blocks(curr_frame)
frame_motion_vector = [[(0,0) for j in range(len(block_idx_list[0]))] for i in range(len(block_list))]
for h in range(len(block_idx_list)-1):
for w in range(len(block_list[0])-1):
# search range
micro_x, micro_y = block_idx_list[h][w]
Grid_Block = block_list[h][w]
TSS_GRID_SIZE = GRID_SIZE
for i in range(3):
TSS_GRID_SIZE = TSS_GRID_SIZE // 2
search_range = Search_Range(0, 0, width, height, micro_x, micro_y,
TSS_GRID_SIZE, TSS_GRID_SIZE)
frame_motion_vector[h][w] = MotionVector(Grid_Block,next_frame,
micro_x, micro_y, search_range)
micro_x, micro_y = frame_motion_vector[h][w]
return frame_motion_vector
# In[8]:
def Overlapped_Motion_Vector(Current_frame, motion_vector):
height, width = Current_frame.shape
Current_frame = Current_frame.astype(np.uint32)
overlapped_range = [[[] for j in range(len(motion_vector[i]))] for i in range(len(motion_vector))]
overlapped_width = int((OVERLAPPED_WIDTH - GRID_SIZE) / 2)
overlapped_height = int((OVERLAPPED_HEIGHT - GRID_SIZE) / 2)
overlapped_motion_vector = [[[] for j in range(width)] for i in range(height)]
for h in range(0, int(height / GRID_SIZE)):
for w in range(0, int(width / GRID_SIZE)):
temp_w = w * GRID_SIZE
temp_h = h * GRID_SIZE
s_x = temp_w - overlapped_width if temp_w - overlapped_width >= 0 else temp_w
s_y = temp_h - overlapped_height if temp_h - overlapped_height >= 0 else temp_h
e_x = (w + 1) * GRID_SIZE
e_x = e_x + overlapped_width if e_x + overlapped_width < width else e_x
e_y = (h + 1) * GRID_SIZE
e_y = e_y + overlapped_height if e_y + overlapped_height < height else e_y
overlapped_range[h][w] = (motion_vector[h][w], [[s_x, s_y], [e_x, e_y]])
for y in range(s_y, e_y):
for x in range(s_x, e_x):
overlapped_motion_vector[y][x].append(motion_vector[h][w])
return(overlapped_motion_vector)
# In[9]:
#Function to reconstruct a frame from a reference frame given the motion vectors in a macroblock
#Inputs: Reference Frame, Macroblocks containing motion vectors
#Outputs:reconstructed_frame
def Create_Compressed_Image(Curr_frame, Post_frame, overlapped_MV):
height, width = Curr_frame.shape
Post_frame = Post_frame.astype(np.uint32)
interpolated_frame = [[0 for j in range(width)] for i in range(height)]
for y in range(height):
for x in range(width):
sum = 0
for mv in overlapped_MV[y][x]:
prev_y = y + mv[1]
if prev_y >= height or prev_y < 0:
prev_y = 0 if prev_y < 0 else height - 1
prev_x = x + mv[0]
if prev_x >= width or prev_x < 0:
prev_x = 0 if prev_x < 0 else width - 1
next_y = y - mv[1]
if next_y >= height or next_y < 0:
next_y = 0 if next_y < 0 else height - 1
next_x = x - mv[0]
if next_x >= width or next_x < 0:
next_x = 0 if next_x < 0 else width - 1
sum += Curr_frame[prev_y][prev_x] + Post_frame[next_y, next_x]
l = len(overlapped_MV[y][x]) * 2
res = sum / l
res = np.array(res).T
interpolated_frame[y][x] = res.astype(np.uint8)
Final_Image = np.array(interpolated_frame)
return(Final_Image)
# In[10]:
def Window_Full_Search():
current_frame = ReadFrames(0)
next_frame = ReadFrames(1)
#Convert to YUV Image
current_yuv = RGB2YUV(current_frame)
next_yuv = RGB2YUV(next_frame)
###Get Channels
curr_Y, curr_U, curr_V = Split_Channels(current_yuv)
next_Y, next_U, next_V = Split_Channels(next_yuv)
Mv = Block_Matching(curr_Y,next_Y)
Overlapped_Mv = Overlapped_Motion_Vector(curr_Y, Mv)
Img = Create_Compressed_Image(curr_Y, next_Y, Overlapped_Mv)
return(Img)
def TSS_Search():
current_frame = ReadFrames(0)
next_frame = ReadFrames(1)
#Convert to YUV Image
current_yuv = RGB2YUV(current_frame)
next_yuv = RGB2YUV(next_frame)
###Get Channels
curr_Y, curr_U, curr_V = Split_Channels(current_yuv)
next_Y, next_U, next_V = Split_Channels(next_yuv)
Save_Image(curr_Y,"Original Img.jpg")
Mv = TSS_Block_Matching(curr_Y,next_Y)
Overlapped_Mv = Overlapped_Motion_Vector(curr_Y, Mv)
Img = Create_Compressed_Image(curr_Y, next_Y, Overlapped_Mv)
return(Img)
# In[11]:
def main():
#Video2Frames('./video.mp4')
start = time.time()
WinImg = Window_Full_Search()
end = time.time()
res_psnr = Get_PSNR(WinImg)
print('PSNR at Window Matching:',res_psnr)
print('Window Matching Running Time:',(end - start))
start = time.time()
TssImg = TSS_Search()
end = time.time()
res_psnr = Get_PSNR(WinImg)
print('\nPSNR at TSS:',res_psnr)
print('TSS Running Time:',(end - start))
return(WinImg,TssImg)
# In[11]:
## call the main function
if __name__ == '__main__':
WinImg,TssImg = main()
Save_Image(WinImg,"Img of Window.jpg")
Save_Image(TssImg,"Img of Thee Step.jpg")
| 31.276968
| 117
| 0.617263
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.