hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e213749b77d191808c8d741cf75cf8e041640c18
| 1,336
|
py
|
Python
|
config/schema.py
|
creimers/knowd-backend
|
189a8bf28ae9b1e22fd2ac9542d5130fa8b550ca
|
[
"MIT"
] | null | null | null |
config/schema.py
|
creimers/knowd-backend
|
189a8bf28ae9b1e22fd2ac9542d5130fa8b550ca
|
[
"MIT"
] | 2
|
2020-05-05T20:37:52.000Z
|
2020-05-05T21:01:49.000Z
|
config/schema.py
|
creimers/knowd-backend
|
189a8bf28ae9b1e22fd2ac9542d5130fa8b550ca
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
import graphene
import graphql_jwt
from graphene_django.types import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from apps.custom_user.mutations import (
Register,
ConfirmEmail,
ResetPassword,
ResetPasswordConfirm,
)
USER = get_user_model()
class UserType(DjangoObjectType):
class Meta:
model = USER
exclude = ("password", "is_superuser")
class ViewerType(graphene.ObjectType):
user = graphene.Field(UserType)
def resolve_user(self, info, **kwargs):
return info.context.user
class RootQuery(graphene.ObjectType):
viewer = graphene.Field(ViewerType)
def resolve_viewer(self, info, **kwargs):
if info.context.user.is_authenticated:
return info.context.user
return None
class Mutation(graphene.ObjectType):
token_auth = graphql_jwt.ObtainJSONWebToken.Field()
verify_token = graphql_jwt.Verify.Field()
refresh_token = graphql_jwt.Refresh.Field()
revoke_token = graphql_jwt.Revoke.Field()
# account
register = Register.Field()
confirm_email = ConfirmEmail.Field()
reset_password = ResetPassword.Field()
reset_password_confirm = ResetPasswordConfirm.Field()
schema = graphene.Schema(query=RootQuery, mutation=Mutation)
| 24.740741
| 62
| 0.737275
|
18baa1bd20f36df40379f243a933db3fd91aec9c
| 1,672
|
py
|
Python
|
openaerostruct/structures/non_intersecting_thickness.py
|
carlosferpereira/OpenAeroStruct
|
35e1ff8aac5c67e40b1925829cfbc203ba1b2f2d
|
[
"Apache-2.0"
] | null | null | null |
openaerostruct/structures/non_intersecting_thickness.py
|
carlosferpereira/OpenAeroStruct
|
35e1ff8aac5c67e40b1925829cfbc203ba1b2f2d
|
[
"Apache-2.0"
] | null | null | null |
openaerostruct/structures/non_intersecting_thickness.py
|
carlosferpereira/OpenAeroStruct
|
35e1ff8aac5c67e40b1925829cfbc203ba1b2f2d
|
[
"Apache-2.0"
] | 1
|
2021-04-09T16:45:27.000Z
|
2021-04-09T16:45:27.000Z
|
import numpy as np
import openmdao.api as om
class NonIntersectingThickness(om.ExplicitComponent):
"""
Create a constraint so the thickness of the spar does not intersect
itself in the center of the spar. Basically, the thickness must be less
than or equal to the radius.
parameters
----------
thickness[ny-1] : numpy array
Thickness of each element of the FEM spar.
radius[ny-1] : numpy array
Radius of each element of the FEM spar.
Returns
-------
thickness_intersects[ny-1] : numpy array
If all the values are negative, each element does not intersect itself.
If a value is positive, then the thickness within the hollow spar
intersects itself and presents an impossible design.
Add a constraint as
`OASProblem.add_constraint('thickness_intersects', upper=0.)`
"""
def initialize(self):
self.options.declare('surface', types=dict)
def setup(self):
self.surface = surface = self.options['surface']
self.ny = surface['mesh'].shape[1]
self.add_input('thickness', val=np.zeros((self.ny-1)), units='m')
self.add_input('radius', val=np.zeros((self.ny-1)), units='m')
self.add_output('thickness_intersects', val=np.zeros((self.ny-1)), units='m')
arange = np.arange(self.ny-1)
self.declare_partials('thickness_intersects', 'thickness', rows=arange, cols=arange, val=1.)
self.declare_partials('thickness_intersects', 'radius', rows=arange, cols=arange, val=-1.)
def compute(self, inputs, outputs):
outputs['thickness_intersects'] = inputs['thickness'] - inputs['radius']
| 35.574468
| 100
| 0.662679
|
b897d387a7fd88708f4561b2c3d036a725b273f0
| 1,648
|
py
|
Python
|
Scalable-NLP-with-Apache-Spark/Solutions/Includes/Classroom-Setup.py
|
databricks-academy/natural-language-processing
|
e4cae957cf0513c2d23eee658961a422cd38ed1c
|
[
"CC0-1.0"
] | null | null | null |
Scalable-NLP-with-Apache-Spark/Solutions/Includes/Classroom-Setup.py
|
databricks-academy/natural-language-processing
|
e4cae957cf0513c2d23eee658961a422cd38ed1c
|
[
"CC0-1.0"
] | null | null | null |
Scalable-NLP-with-Apache-Spark/Solutions/Includes/Classroom-Setup.py
|
databricks-academy/natural-language-processing
|
e4cae957cf0513c2d23eee658961a422cd38ed1c
|
[
"CC0-1.0"
] | 1
|
2022-02-22T13:40:19.000Z
|
2022-02-22T13:40:19.000Z
|
# Databricks notebook source
import sys
# Suggested fix from Jason Kim & Ka-Hing Cheung
# https://databricks.atlassian.net/browse/ES-176458
wsfsPaths = list(filter(lambda p : p.startswith("/Workspace"), sys.path))
defaultPaths = list(filter(lambda p : not p.startswith("/Workspace"), sys.path))
sys.path = defaultPaths + wsfsPaths
spark.conf.set("com.databricks.training.module-name", "nlp")
# filter out warnings from python
# issue: https://github.com/RaRe-Technologies/smart_open/issues/319
import warnings
warnings.filterwarnings("ignore")
displayHTML("Preparing the learning environment...")
# COMMAND ----------
# MAGIC %run "./Class-Utility-Methods"
# COMMAND ----------
# MAGIC %run "./Dataset-Mounts"
# COMMAND ----------
def init_mlflow_as_job():
import mlflow
job_experiment_id = sc._jvm.scala.collection.JavaConversions.mapAsJavaMap(
dbutils.entry_point.getDbutils().notebook().getContext().tags()
)["jobId"]
if job_experiment_id:
mlflow.set_experiment(f"/Curriculum/Test Results/Experiments/{job_experiment_id}")
init_mlflow_as_job()
# COMMAND ----------
courseType = "il"
username = getUsername()
userhome = getUserhome()
workingDir = getWorkingDir(courseType).replace("_pil", "")
# COMMAND ----------
courseAdvertisements = dict()
courseAdvertisements["username"] = (
"v",
username,
"No additional information was provided.",
)
courseAdvertisements["userhome"] = (
"v",
userhome,
"No additional information was provided.",
)
courseAdvertisements["workingDir"] = (
"v",
workingDir,
"No additional information was provided.",
)
allDone(courseAdvertisements)
| 24.597015
| 86
| 0.707524
|
184c5c33efc5773d0cbede5980c691722e8e914f
| 66
|
py
|
Python
|
run_examples.py
|
akabos/NearPy
|
485ee37952fe68fd7fed85f4bf8f47e2076e9e36
|
[
"MIT"
] | 2
|
2015-06-04T03:18:13.000Z
|
2016-01-12T08:01:36.000Z
|
run_examples.py
|
akabos/NearPy
|
485ee37952fe68fd7fed85f4bf8f47e2076e9e36
|
[
"MIT"
] | null | null | null |
run_examples.py
|
akabos/NearPy
|
485ee37952fe68fd7fed85f4bf8f47e2076e9e36
|
[
"MIT"
] | null | null | null |
import unittest
from nearpy.examples import example2
example2()
| 11
| 36
| 0.818182
|
64b18ffabe63390ccaf825a15e918b1723b0d0bf
| 5,281
|
py
|
Python
|
pontoon/base/signals.py
|
julen/pontoon
|
872496bd2cd148a2e5a60d669d5be538166f75e7
|
[
"BSD-3-Clause"
] | null | null | null |
pontoon/base/signals.py
|
julen/pontoon
|
872496bd2cd148a2e5a60d669d5be538166f75e7
|
[
"BSD-3-Clause"
] | null | null | null |
pontoon/base/signals.py
|
julen/pontoon
|
872496bd2cd148a2e5a60d669d5be538166f75e7
|
[
"BSD-3-Clause"
] | null | null | null |
from guardian.models import GroupObjectPermission
from django.contrib.auth.models import User, Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_save, pre_save, post_delete, pre_delete
from django.dispatch import receiver
from pontoon.base import errors
from pontoon.base.models import (
Locale,
Project,
ProjectLocale,
TranslatedResource,
UserProfile,
)
@receiver(post_delete, sender=ProjectLocale)
def project_locale_removed(sender, **kwargs):
"""
When locale is removed from a project, delete TranslatedResources
and aggregate project and locale stats.
"""
project_locale = kwargs.get('instance', None)
if project_locale is not None:
project = project_locale.project
locale = project_locale.locale
TranslatedResource.objects.filter(resource__project=project, locale=locale).delete()
project.aggregate_stats()
locale.aggregate_stats()
@receiver(pre_delete, sender=Locale)
def locale_deleted(sender, **kwargs):
"""
Before locale is deleted, aggregate stats for all locale projects.
"""
locale = kwargs.get('instance', None)
if locale is not None:
for project in locale.project_set.all():
project.aggregate_stats()
@receiver(pre_delete, sender=Project)
def project_deleted(sender, **kwargs):
"""
Before project is deleted, aggregate stats for all project locales.
"""
project = kwargs.get('instance', None)
if project is not None:
for locale in project.locales.all():
locale.aggregate_stats()
def create_group(instance, group_name, perms, name_prefix):
"""
Create all objects related to a group of users, e.g. translators, managers.
"""
ct = ContentType.objects.get(app_label='base', model=instance.__class__.__name__.lower())
group, _ = Group.objects.get_or_create(name='{} {}'.format(name_prefix, group_name))
for perm_name in perms:
perm = Permission.objects.get(content_type=ct, codename=perm_name)
group.permissions.add(perm)
setattr(instance, '{}_group'.format(group_name), group)
def assign_group_permissions(instance, group_name, perms):
"""
Create group object permissions.
"""
ct = ContentType.objects.get(app_label='base', model=instance.__class__.__name__.lower())
for perm_name in perms:
perm = Permission.objects.get(content_type=ct, codename=perm_name)
group = getattr(instance, '{}_group'.format(group_name))
GroupObjectPermission.objects.get_or_create(object_pk=instance.pk,
content_type=ct,
group=group,
permission=perm)
@receiver(pre_save, sender=Locale)
def create_locale_permissions_groups(sender, **kwargs):
"""
Creates translators and managers groups for a given Locale.
"""
instance = kwargs['instance']
if kwargs['raw'] or instance.managers_group is not None:
return
try:
create_group(instance, 'translators', ['can_translate_locale'], '{} translators'.format(instance.code))
create_group(instance, 'managers', ['can_translate_locale', 'can_manage_locale'], '{} managers'.format(instance.code)) # noqa
except ObjectDoesNotExist as e:
errors.send_exception(e)
@receiver(pre_save, sender=ProjectLocale)
def create_project_locale_permissions_groups(sender, **kwargs):
"""
Creates translators group for a given ProjectLocale.
"""
instance = kwargs['instance']
if kwargs['raw'] or instance.translators_group is not None:
return
try:
create_group(instance, 'translators', ['can_translate_project_locale'], '{}/{} translators'.format(
instance.project.slug, instance.locale.code,
))
except ObjectDoesNotExist as e:
errors.send_exception(e)
@receiver(post_save, sender=Locale)
def assign_locale_group_permissions(sender, **kwargs):
"""
After creation of locale, we have to assign translation and management
permissions to groups of translators and managers assigned to locale.
"""
if kwargs['raw'] or not kwargs['created']:
return
instance = kwargs['instance']
try:
assign_group_permissions(instance, 'translators', ['can_translate_locale'])
assign_group_permissions(instance, 'managers', ['can_translate_locale', 'can_manage_locale'])
except ObjectDoesNotExist as e:
errors.send_exception(e)
@receiver(post_save, sender=ProjectLocale)
def assign_project_locale_group_permissions(sender, **kwargs):
"""
Assign permissions group to a given ProjectLocale.
"""
if kwargs['raw'] or not kwargs['created']:
return
instance = kwargs['instance']
try:
assign_group_permissions(instance, 'translators', ['can_translate_project_locale'])
except ObjectDoesNotExist as e:
errors.send_exception(e)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
| 33.213836
| 134
| 0.689074
|
2a7f1fbd51b6af61a284e2968e3214332ae5067d
| 2,857
|
py
|
Python
|
tests/cli/web/test_capture.py
|
EasyPost/biggraphite
|
d4f8b0ed76605c41d982a572c7462dea5b79de70
|
[
"Apache-2.0"
] | 125
|
2016-05-02T17:10:32.000Z
|
2022-02-12T17:23:27.000Z
|
tests/cli/web/test_capture.py
|
EasyPost/biggraphite
|
d4f8b0ed76605c41d982a572c7462dea5b79de70
|
[
"Apache-2.0"
] | 515
|
2016-05-03T13:23:02.000Z
|
2022-03-31T23:21:38.000Z
|
tests/cli/web/test_capture.py
|
EasyPost/biggraphite
|
d4f8b0ed76605c41d982a572c7462dea5b79de70
|
[
"Apache-2.0"
] | 43
|
2016-05-03T17:46:48.000Z
|
2022-02-21T10:57:57.000Z
|
#!/usr/bin/env python
# Copyright 2018 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Capture."""
import logging
import sys
from unittest import TestCase
from biggraphite.cli.web.capture import Capture
class TestCapture(TestCase):
def test_capture_should_get_stdout_content_from_outside_context(self):
content = "foo"
with Capture() as capture:
sys.stdout.write(content)
self.assertEqual(capture.get_content(), content)
def test_capture_should_get_stdout_content_from_inside_context(self):
content = "foo"
with Capture() as capture:
sys.stdout.write(content)
self.assertEqual(capture.get_content(), content)
def test_capture_should_get_stderr_content_from_outside_context(self):
content = "foo"
with Capture() as capture:
sys.stderr.write(content)
self.assertEqual(capture.get_content(), content)
def test_capture_should_get_stderr_content_from_inside_context(self):
content = "foo"
with Capture() as capture:
sys.stderr.write(content)
self.assertEqual(capture.get_content(), content)
def test_capture_should_get_stdout_content_only_from_context(self):
unexpected_content = "foo"
expected_content = "bar"
sys.stdout.write(unexpected_content)
with Capture() as capture:
sys.stdout.write(expected_content)
self.assertEqual(capture.get_content(), expected_content)
def test_capture_should_get_logger_content_with_line_break(self):
content = "foo"
logger = logging.getLogger("test-logger")
with Capture() as capture:
logger.info(content)
self.assertEqual(capture.get_content(), content + "\n")
def test_capture_should_get_print_content(self):
content = "Hello"
with Capture() as capture:
print(content)
self.assertEqual(capture.get_content(), content + "\n")
def test_capture_should_handle_line_breaks(self):
content_line_1 = "Hello"
content_line_2 = "World"
expected_result = "%s\n%s\n" % (content_line_1, content_line_2)
with Capture() as capture:
print(content_line_1)
print(content_line_2)
self.assertEqual(capture.get_content(), expected_result)
| 36.628205
| 74
| 0.694435
|
b390f2da7094dd273de91f73944bec5b33564fdf
| 870
|
py
|
Python
|
stanCode_Projects/boggle_game_solver_recursion/largest_digit.py
|
b911533/sc-projects
|
5841706113161f6aa49b001a47335eca45d0ae2e
|
[
"MIT"
] | null | null | null |
stanCode_Projects/boggle_game_solver_recursion/largest_digit.py
|
b911533/sc-projects
|
5841706113161f6aa49b001a47335eca45d0ae2e
|
[
"MIT"
] | null | null | null |
stanCode_Projects/boggle_game_solver_recursion/largest_digit.py
|
b911533/sc-projects
|
5841706113161f6aa49b001a47335eca45d0ae2e
|
[
"MIT"
] | null | null | null |
"""
File: largest_digit.py
Name: Jim Chan
----------------------------------
This file recursively prints the biggest digit in
5 different integers, 12345, 281, 6, -111, -9453
If your implementation is correct, you should see
5, 8, 6, 1, 9 on Console.
"""
def main():
print(find_largest_digit(12345)) # 5
print(find_largest_digit(281)) # 8
print(find_largest_digit(6)) # 6
print(find_largest_digit(-111)) # 1
print(find_largest_digit(-9453)) # 9
def find_largest_digit(n):
"""
:param n: int.
:return: bool.
"""
highest = -float('inf')
if n < 0:
n = -n
return find_largest_digit_helper(n, highest)
def find_largest_digit_helper(n, highest):
if n < 1:
return highest
else:
last_num = n % 10
highest = max(highest, last_num)
return find_largest_digit_helper(n//10, highest)
if __name__ == '__main__':
main()
| 20.714286
| 50
| 0.644828
|
4034c62da57e762f8d6673ffbb0a84c272f28aa3
| 1,240
|
py
|
Python
|
altair/vegalite/tests/test_common.py
|
hydrosquall/altair
|
ded897b0967a88a467828b1e2c133bd92862de23
|
[
"BSD-3-Clause"
] | null | null | null |
altair/vegalite/tests/test_common.py
|
hydrosquall/altair
|
ded897b0967a88a467828b1e2c133bd92862de23
|
[
"BSD-3-Clause"
] | null | null | null |
altair/vegalite/tests/test_common.py
|
hydrosquall/altair
|
ded897b0967a88a467828b1e2c133bd92862de23
|
[
"BSD-3-Clause"
] | null | null | null |
"""Tests of functionality that should work in all vegalite versions"""
import pytest
from .. import v1, v2
@pytest.fixture
def basic_spec():
return {
'data': {'url': 'data.csv'},
'mark': 'line',
'encoding': {
'color': {'type': 'nominal', 'field': 'color'},
'x': {'type': 'quantitative', 'field': 'xval'},
'y': {'type': 'ordinal', 'field': 'yval'}
},
'height': 300,
'width': 400
}
@pytest.mark.parametrize('alt', [v1, v2])
def test_basic_chart_to_dict(alt, basic_spec):
chart = alt.Chart('data.csv').mark_line().encode(
alt.X('xval:Q'),
y = alt.Y('yval:O'),
color = 'color:N'
)
dct = chart.to_dict()
# schema should be in the top level
assert dct.pop('$schema').startswith('http')
# remainder of spec should match the basic spec
assert dct == basic_spec
@pytest.mark.parametrize('alt', [v1, v2])
def test_basic_chart_from_dict(alt, basic_spec):
chart = alt.Chart.from_dict(basic_spec)
dct = chart.to_dict()
# schema should be in the top level
assert dct.pop('$schema').startswith('http')
# remainder of spec should match the basic spec
assert dct == basic_spec
| 25.306122
| 70
| 0.58871
|
a1f6d9897310e5d5f8e648a7bf2b5a643c0fccb2
| 4,448
|
py
|
Python
|
adept_envs/components/robot/robot_test.py
|
isabella232/DBAP-simulation
|
bdba0b58c4a01e0742e97299ce3bd1587ad2aa25
|
[
"Apache-2.0"
] | 2
|
2021-08-09T23:45:17.000Z
|
2021-11-05T15:28:41.000Z
|
adept_envs/components/robot/robot_test.py
|
google-research/DBAP-simulation
|
bdba0b58c4a01e0742e97299ce3bd1587ad2aa25
|
[
"Apache-2.0"
] | 1
|
2022-01-16T14:03:20.000Z
|
2022-01-16T14:03:20.000Z
|
adept_envs/components/robot/robot_test.py
|
isabella232/DBAP-simulation
|
bdba0b58c4a01e0742e97299ce3bd1587ad2aa25
|
[
"Apache-2.0"
] | 2
|
2021-11-05T15:28:31.000Z
|
2022-01-16T12:00:17.000Z
|
"""
Copyright 2021 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Unit tests for RobotComponent and RobotGroupConfig."""
from typing import Any
from absl.testing import absltest
import numpy as np
from adept_envs.components.robot.robot import ControlMode, RobotComponent
from adept_envs.utils.testing.mock_sim_scene import MockSimScene
class RobotComponentTest(absltest.TestCase):
"""Unit test class for RobotComponent."""
def test_get_state(self):
"""Tests querying the state of multiple groups."""
sim_scene = MockSimScene(nq=10) # type: Any
sim_scene.data.qpos[:] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
sim_scene.data.qvel[:] = [11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
robot = RobotComponent(
sim_scene,
groups={
'a': {
'qpos_indices': [0, 1, 3, 5],
},
'b': {
'qpos_indices': [2, 6],
'qvel_indices': [7, 8, 9],
},
})
a_state, b_state = robot.get_state(['a', 'b'])
np.testing.assert_array_equal(a_state.qpos, [1, 2, 4, 6])
np.testing.assert_array_equal(a_state.qvel, [11, 12, 14, 16])
np.testing.assert_array_equal(b_state.qpos, [3, 7])
np.testing.assert_array_equal(b_state.qvel, [18, 19, 20])
def test_step(self):
"""Tests stepping with an action for multiple groups."""
sim_scene = MockSimScene(nq=10, ctrl_range=[-1, 1]) # type: Any
robot = RobotComponent(
sim_scene,
groups={
'a': {
'qpos_indices': [0, 1, 3, 5],
},
'b': {
'actuator_indices': [7, 8, 9],
},
})
robot.step({
'a': np.array([.2, .4, .6, .8]),
'b': np.array([.1, .3, .5])
})
np.testing.assert_allclose(sim_scene.data.ctrl,
[.2, .4, 0, .6, 0, .8, 0, .1, .3, .5])
self.assertEqual(sim_scene.advance.call_count, 1)
def test_step_denormalize(self):
"""Tests denormalizing the actions to the sim control range."""
sim_scene = MockSimScene(nq=5, ctrl_range=[0, 10]) # type: Any
robot = RobotComponent(
sim_scene, groups={'a': {
'qpos_indices': [0, 1, 2, 3, 4],
}})
robot.step({
'a': np.array([-1, 1, -0.5, 0.5, 0]),
})
np.testing.assert_allclose(sim_scene.data.ctrl, [0, 10, 2.5, 7.5, 5])
def test_step_position_control_bounds(self):
"""Tests action clamping when doing position control."""
sim_scene = MockSimScene(nq=5, ctrl_range=[-1, 1]) # type: Any
sim_scene.data.qpos[:] = [-0.4, -0.2, 0, 0.2, 0.4]
robot = RobotComponent(
sim_scene,
groups={
'a': {
'qpos_indices': [0, 1, 2, 3, 4],
'qpos_range': [(-0.5, 0.5)] * 5,
'qvel_range': [(-0.2, 0.2)] * 5,
}
})
robot.step({'a': np.array([-1, -1, 0.2, 1, 1])})
np.testing.assert_allclose(sim_scene.data.ctrl,
[-0.5, -0.4, 0.1, 0.4, 0.5])
def test_step_velocity_control_bounds(self):
"""Tests action clamping when doing velocity control."""
sim_scene = MockSimScene(nq=3, ctrl_range=[-10, 10]) # type: Any
robot = RobotComponent(
sim_scene,
groups={
'a': {
'control_mode': ControlMode.JOINT_VELOCITY,
'qpos_indices': [0, 1, 2],
'qvel_range': [(-2, 2), (-1, 5), (-7, -4)],
}
})
robot.step({'a': np.array([-0.5, 1, -1])})
np.testing.assert_allclose(sim_scene.data.ctrl, [-1, 5, -7])
if __name__ == '__main__':
absltest.main()
| 36.459016
| 77
| 0.529227
|
670a59845030135169434f76d06f791ea8665fb6
| 381
|
py
|
Python
|
fdw/wsgi.py
|
nautilebleu/django-fdw
|
e31690bb9a0dd27b5ea5cacbab51c2b087f63c18
|
[
"BSD-2-Clause"
] | 5
|
2015-03-20T09:27:18.000Z
|
2021-05-06T02:17:50.000Z
|
fdw/wsgi.py
|
nautilebleu/django-fdw
|
e31690bb9a0dd27b5ea5cacbab51c2b087f63c18
|
[
"BSD-2-Clause"
] | null | null | null |
fdw/wsgi.py
|
nautilebleu/django-fdw
|
e31690bb9a0dd27b5ea5cacbab51c2b087f63c18
|
[
"BSD-2-Clause"
] | null | null | null |
"""
WSGI config for fdw project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fdw.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 25.4
| 78
| 0.784777
|
141fa60e49bcea970a5f9cc8960a39a8127709cc
| 1,889
|
py
|
Python
|
tools/harness-automation/cases_R140/commissioner_8_1_6.py
|
AdityaHPatwardhan/openthread
|
a201e9d5d0273bb51fa20efc8758be20a725018e
|
[
"BSD-3-Clause"
] | 2,962
|
2016-05-11T15:06:06.000Z
|
2022-03-27T20:06:16.000Z
|
tools/harness-automation/cases_R140/commissioner_8_1_6.py
|
AdityaHPatwardhan/openthread
|
a201e9d5d0273bb51fa20efc8758be20a725018e
|
[
"BSD-3-Clause"
] | 5,899
|
2016-05-11T19:21:49.000Z
|
2022-03-31T18:17:20.000Z
|
tools/harness-automation/cases_R140/commissioner_8_1_6.py
|
AdityaHPatwardhan/openthread
|
a201e9d5d0273bb51fa20efc8758be20a725018e
|
[
"BSD-3-Clause"
] | 1,113
|
2016-05-11T15:37:42.000Z
|
2022-03-31T09:37:04.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class Commissioner_8_1_6(HarnessCase):
role = HarnessCase.ROLE_COMMISSIONER
case = '8 1 6'
golden_devices_required = 1
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| 41.065217
| 77
| 0.771837
|
c5cd57f9ffeb41ad7af77770fca2ea31325ec2c6
| 906
|
py
|
Python
|
savepointradio/core/querysets.py
|
RecursiveGreen/spradio-django2
|
ef7c6e535fdf4bf096b0a1669005600fa8e5ac92
|
[
"MIT"
] | null | null | null |
savepointradio/core/querysets.py
|
RecursiveGreen/spradio-django2
|
ef7c6e535fdf4bf096b0a1669005600fa8e5ac92
|
[
"MIT"
] | null | null | null |
savepointradio/core/querysets.py
|
RecursiveGreen/spradio-django2
|
ef7c6e535fdf4bf096b0a1669005600fa8e5ac92
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.utils import timezone
class EnabledQuerySet(models.QuerySet):
"""
Queryset to select all objects that are enabled or not.
"""
def enabled(self):
return self.filter(disabled=False)
def disabled(self):
return self.filter(disabled=True)
class PublishedQuerySet(models.QuerySet):
"""
Queryset to select all objects that have been published or not.
"""
def published(self):
results = self.filter(
models.Q(published_date__isnull=False) &
models.Q(published_date__lte=timezone.now())
)
return results
def unpublished(self):
results = self.filter(
models.Q(published_date__isnull=True) |
models.Q(published_date__gte=timezone.now())
)
return results
| 27.454545
| 67
| 0.59713
|
f8f3a9eb4c0dafe4ca2d6c3addab9cc54492a44f
| 2,567
|
py
|
Python
|
Lib/objc/_SetupAssistant.py
|
snazari/Pyto
|
bcea7bbef35cab21ce73087b1a0c00a07d07ec72
|
[
"MIT"
] | 701
|
2018-10-22T11:54:09.000Z
|
2022-03-31T14:39:30.000Z
|
Lib/objc/_SetupAssistant.py
|
snazari/Pyto
|
bcea7bbef35cab21ce73087b1a0c00a07d07ec72
|
[
"MIT"
] | 229
|
2018-10-24T09:15:31.000Z
|
2021-12-24T16:51:37.000Z
|
Lib/objc/_SetupAssistant.py
|
snazari/Pyto
|
bcea7bbef35cab21ce73087b1a0c00a07d07ec72
|
[
"MIT"
] | 131
|
2018-11-25T18:33:03.000Z
|
2022-03-24T03:18:07.000Z
|
"""
Classes from the 'SetupAssistant' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
BYAnalyticsExpressRestore = _Class("BYAnalyticsExpressRestore")
BYAnalyticsEventRecommendedLocale = _Class("BYAnalyticsEventRecommendedLocale")
BYBuddyDaemonMigrationSourceClient = _Class("BYBuddyDaemonMigrationSourceClient")
BYAnalyticsManager = _Class("BYAnalyticsManager")
BYLicenseAgreement = _Class("BYLicenseAgreement")
BYSetupStateNotifier = _Class("BYSetupStateNotifier")
BYAnalyticsEvent = _Class("BYAnalyticsEvent")
BYNetworkMonitor = _Class("BYNetworkMonitor")
BYTelephonyStateNotifier = _Class("BYTelephonyStateNotifier")
BYBuddyDaemonProximitySourceClient = _Class("BYBuddyDaemonProximitySourceClient")
BYBuddyDaemonProximityTargetClient = _Class("BYBuddyDaemonProximityTargetClient")
BYAnalyticsLazyEvent = _Class("BYAnalyticsLazyEvent")
BYDeviceSetupSourceSession = _Class("BYDeviceSetupSourceSession")
BYRunState = _Class("BYRunState")
BYPaneAnalyticsManager = _Class("BYPaneAnalyticsManager")
BYSettingsManagerClient = _Class("BYSettingsManagerClient")
BYLocationController = _Class("BYLocationController")
BYDeviceConfiguration = _Class("BYDeviceConfiguration")
BFFSettingsManager = _Class("BFFSettingsManager")
BYDeviceMigrationManager = _Class("BYDeviceMigrationManager")
BYSourceDeviceMigration = _Class("BYSourceDeviceMigration")
BYAppleIDAccountsManager = _Class("BYAppleIDAccountsManager")
BYFlowSkipController = _Class("BYFlowSkipController")
BYChronicle = _Class("BYChronicle")
BYChronicleEntry = _Class("BYChronicleEntry")
BYPreferencesController = _Class("BYPreferencesController")
BYMigrationSourceController = _Class("BYMigrationSourceController")
BYBuddyDaemonGeneralClient = _Class("BYBuddyDaemonGeneralClient")
BYLocaleCountry = _Class("BYLocaleCountry")
BYSilentLoginUpgradeGuarantor = _Class("BYSilentLoginUpgradeGuarantor")
BYManagedAppleIDBootstrap = _Class("BYManagedAppleIDBootstrap")
BYDevice = _Class("BYDevice")
BYDeviceForTest = _Class("BYDeviceForTest")
BYLocaleDataSource = _Class("BYLocaleDataSource")
BYCapabilities = _Class("BYCapabilities")
BYXPCActivity = _Class("BYXPCActivity")
BYXPCActivityRegistrar = _Class("BYXPCActivityRegistrar")
BYSetupStateManager = _Class("BYSetupStateManager")
BYBuddyDaemonCloudSyncClient = _Class("BYBuddyDaemonCloudSyncClient")
BYSiriUtilities = _Class("BYSiriUtilities")
BYBackupMetadata = _Class("BYBackupMetadata")
| 42.081967
| 81
| 0.835216
|
a009a0ffab5f3e1f1d48fb55b4f1d86564bb5d8c
| 991
|
py
|
Python
|
sorting/selection_sort.py
|
okebinda/algorithms.python
|
b62d69a09fac4ca4e4b02ba99e4aae986e766e89
|
[
"MIT"
] | null | null | null |
sorting/selection_sort.py
|
okebinda/algorithms.python
|
b62d69a09fac4ca4e4b02ba99e4aae986e766e89
|
[
"MIT"
] | null | null | null |
sorting/selection_sort.py
|
okebinda/algorithms.python
|
b62d69a09fac4ca4e4b02ba99e4aae986e766e89
|
[
"MIT"
] | null | null | null |
"""Sorting Algorithm: Selection Sort"""
def selection_sort(a):
"""Selection Sort
Time complexity: O(n^2)
Space complexity: O(1)
:param a: A list to be sorted
:type a: list
:return: A new sorted list
:rtype: list
"""
b = [*a]
n = len(b)
for i in range(n):
min = i
for j in range(i+1, n):
if b[j] < b[min]:
min = j
b[i], b[min] = b[min], b[i]
return b
if __name__ == "__main__":
import unittest
from random import shuffle
class TestSelectionSort(unittest.TestCase):
def setUp(self):
self.ordered = [x for x in range(20)]
self.shuffled = [*self.ordered]
while self.ordered == self.shuffled:
shuffle(self.shuffled)
def test_selection_sort(self):
self.assertEqual(self.ordered, selection_sort(self.shuffled))
self.assertNotEqual(self.ordered, self.shuffled)
unittest.main()
| 21.085106
| 73
| 0.557013
|
d8feea50e76a4f5d528073470f376ec77eb11b58
| 6,496
|
py
|
Python
|
grappelli/templatetags/grp_tags.py
|
browniebroke/django-grappelli
|
0de7e9c7937b28b6f7181e26d9e4d21e59ff5681
|
[
"BSD-3-Clause"
] | 2,210
|
2015-01-01T09:44:32.000Z
|
2022-03-31T19:57:35.000Z
|
grappelli/templatetags/grp_tags.py
|
browniebroke/django-grappelli
|
0de7e9c7937b28b6f7181e26d9e4d21e59ff5681
|
[
"BSD-3-Clause"
] | 416
|
2015-01-02T11:39:49.000Z
|
2022-02-23T16:26:30.000Z
|
grappelli/templatetags/grp_tags.py
|
browniebroke/django-grappelli
|
0de7e9c7937b28b6f7181e26d9e4d21e59ff5681
|
[
"BSD-3-Clause"
] | 532
|
2015-01-06T09:38:25.000Z
|
2022-03-28T04:23:14.000Z
|
# coding: utf-8
import json
from functools import wraps
from django import template
from django.contrib.contenttypes.models import ContentType
from django.template.loader import get_template
from django.utils.formats import get_format
from django.utils.safestring import mark_safe
from django.utils.translation import get_language
from django.utils.translation import gettext as _
from grappelli.settings import (ADMIN_TITLE, ADMIN_URL, CLEAN_INPUT_TYPES,
SWITCH_USER, SWITCH_USER_ORIGINAL,
SWITCH_USER_TARGET)
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User
register = template.Library()
# GENERIC OBJECTS
class do_get_generic_objects(template.Node):
def __init__(self):
pass
def render(self, context):
objects = {}
for c in ContentType.objects.all().order_by('id'):
objects[c.id] = {'pk': c.id, 'app': c.app_label, 'model': c.model}
return json.dumps(objects)
@register.tag
def get_content_types(parser, token):
"""
Returns a list of installed applications and models.
Needed for lookup of generic relationships.
"""
return do_get_generic_objects()
# ADMIN_TITLE
@register.simple_tag
def get_admin_title():
"""
Returns the Title for the Admin-Interface.
"""
return ADMIN_TITLE
# SITE_TITLE
@register.simple_tag
def get_site_title():
"""
Returns the Title for the Admin-Interface.
"""
return ADMIN_TITLE or _("Django site admin")
# RETURNS CURRENT LANGUAGE
@register.simple_tag
def get_lang():
return get_language()
# ADMIN_URL
@register.simple_tag
def get_admin_url():
"""
Returns the URL for the Admin-Interface.
"""
return ADMIN_URL
@register.simple_tag
def get_date_format():
return get_format('DATE_INPUT_FORMATS')[0]
@register.simple_tag
def get_time_format():
return get_format('TIME_INPUT_FORMATS')[0]
@register.simple_tag
def get_datetime_format():
return get_format('DATETIME_INPUT_FORMATS')[0]
@register.simple_tag
def grappelli_admin_title():
return ADMIN_TITLE
@register.simple_tag
def grappelli_clean_input_types():
return CLEAN_INPUT_TYPES
@register.filter
def classname(obj, arg=None):
classname = obj.__class__.__name__.lower()
if arg:
if arg.lower() == classname:
return True
return False
return classname
@register.filter
def classpath(obj):
module = obj.__module__
classname = obj.__class__.__name__
return "%s,%s" % (module, classname)
# FORMSETSORT FOR SORTABLE INLINES
@register.filter
def formsetsort(formset, arg):
"""
Takes a list of formset dicts, returns that list sorted by the sortable field.
"""
if arg:
sorted_list = []
unsorted_list = []
for item in formset:
position = item.form[arg].value()
if isinstance(position, int) and item.original: # normal view
sorted_list.append((position, item))
elif position and hasattr(item.form, 'cleaned_data'): # error validation
sorted_list.append((int(position), item))
else:
unsorted_list.append(item)
sorted_list.sort(key=lambda i: i[0])
sorted_list = [item[1] for item in sorted_list] + unsorted_list
else:
sorted_list = formset
return sorted_list
# RELATED LOOKUPS
def safe_json_else_list_tag(f):
"""
Decorator. Registers function as a simple_tag.
Try: Return value of the decorated function marked safe and json encoded.
Except: Return []
"""
@wraps(f)
def inner(model_admin):
try:
return mark_safe(json.dumps(f(model_admin)))
except:
return []
return register.simple_tag(inner)
@safe_json_else_list_tag
def get_related_lookup_fields_fk(model_admin):
return model_admin.related_lookup_fields.get("fk", [])
@safe_json_else_list_tag
def get_related_lookup_fields_m2m(model_admin):
return model_admin.related_lookup_fields.get("m2m", [])
@safe_json_else_list_tag
def get_related_lookup_fields_generic(model_admin):
return model_admin.related_lookup_fields.get("generic", [])
# AUTOCOMPLETES
@safe_json_else_list_tag
def get_autocomplete_lookup_fields_fk(model_admin):
return model_admin.autocomplete_lookup_fields.get("fk", [])
@safe_json_else_list_tag
def get_autocomplete_lookup_fields_m2m(model_admin):
return model_admin.autocomplete_lookup_fields.get("m2m", [])
@safe_json_else_list_tag
def get_autocomplete_lookup_fields_generic(model_admin):
return model_admin.autocomplete_lookup_fields.get("generic", [])
# SORTABLE EXCLUDES
@safe_json_else_list_tag
def get_sortable_excludes(model_admin):
return model_admin.sortable_excludes
@register.filter
def prettylabel(value):
return mark_safe(value.replace(":</label>", "</label>"))
# CUSTOM ADMIN LIST FILTER
# WITH TEMPLATE DEFINITION
@register.simple_tag
def admin_list_filter(cl, spec):
field_name = getattr(spec, "field", None)
parameter_name = getattr(spec, "parameter_name", None)
if field_name is not None:
field_name = spec.field.name
elif parameter_name is not None:
field_name = spec.parameter_name
try:
tpl = get_template(cl.model_admin.change_list_filter_template)
except: # noqa
tpl = get_template(spec.template)
return tpl.render({
'title': spec.title,
'choices': list(spec.choices(cl)),
'field_name': field_name,
'spec': spec,
})
@register.simple_tag(takes_context=True)
def switch_user_dropdown(context):
if SWITCH_USER:
tpl = get_template("admin/includes_grappelli/switch_user_dropdown.html")
request = context["request"]
session_user = request.session.get("original_user", {"id": request.user.id, "username": request.user.get_username()})
try:
original_user = User.objects.get(pk=session_user["id"], is_staff=True)
except User.DoesNotExist:
return ""
if SWITCH_USER_ORIGINAL(original_user):
object_list = [user for user in User.objects.filter(is_staff=True).exclude(pk=original_user.pk) if SWITCH_USER_TARGET(original_user, user)]
return tpl.render({
'request': request,
'object_list': object_list
})
return ""
| 26.299595
| 151
| 0.694889
|
d724d6681e94f02813aad7e7e6ee44cb22e662c7
| 86,765
|
py
|
Python
|
ibm_platform_services/iam_identity_v1.py
|
zachsirotto/platform-services-python-sdk
|
32a080b7a93567f9528867a31bd0b47423297bab
|
[
"Apache-2.0"
] | null | null | null |
ibm_platform_services/iam_identity_v1.py
|
zachsirotto/platform-services-python-sdk
|
32a080b7a93567f9528867a31bd0b47423297bab
|
[
"Apache-2.0"
] | null | null | null |
ibm_platform_services/iam_identity_v1.py
|
zachsirotto/platform-services-python-sdk
|
32a080b7a93567f9528867a31bd0b47423297bab
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# (C) Copyright IBM Corp. 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# IBM OpenAPI SDK Code Generator Version: 99-SNAPSHOT-8d569e8f-20201030-111043
"""
The IAM Identity Service API allows for the management of Identities (Service IDs,
ApiKeys).
"""
from enum import Enum
from typing import Dict, List
import json
from ibm_cloud_sdk_core import BaseService, DetailedResponse
from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator
from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment
from ibm_cloud_sdk_core.utils import convert_model
from .common import get_sdk_headers
##############################################################################
# Service
##############################################################################
class IamIdentityV1(BaseService):
"""The iam_identity V1 service."""
DEFAULT_SERVICE_URL = 'https://iam.cloud.ibm.com'
DEFAULT_SERVICE_NAME = 'iam_identity'
@classmethod
def new_instance(cls,
service_name: str = DEFAULT_SERVICE_NAME,
) -> 'IamIdentityV1':
"""
Return a new client for the iam_identity service using the specified
parameters and external configuration.
"""
authenticator = get_authenticator_from_environment(service_name)
service = cls(
authenticator
)
service.configure_service(service_name)
return service
def __init__(self,
authenticator: Authenticator = None,
) -> None:
"""
Construct a new client for the iam_identity service.
:param Authenticator authenticator: The authenticator specifies the authentication mechanism.
Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md
about initializing the authenticator of your choice.
"""
BaseService.__init__(self,
service_url=self.DEFAULT_SERVICE_URL,
authenticator=authenticator)
#########################
# Identity Operations
#########################
def list_api_keys(self,
*,
account_id: str = None,
iam_id: str = None,
pagesize: int = None,
pagetoken: str = None,
scope: str = None,
type: str = None,
sort: str = None,
order: str = None,
include_history: bool = None,
**kwargs
) -> DetailedResponse:
"""
Get API keys for a given service or user IAM ID and account ID.
Returns the list of API key details for a given service or user IAM ID and account
ID. Users can manage user API keys for themself, or service ID API keys for
service IDs that are bound to an entity they have access to. In case of service
IDs and their API keys, a user must be either an account owner, a IBM Cloud org
manager or IBM Cloud space developer in order to manage service IDs of the
entity.
:param str account_id: (optional) Account ID of the API keys(s) to query.
If a service IAM ID is specified in iam_id then account_id must match the
account of the IAM ID. If a user IAM ID is specified in iam_id then then
account_id must match the account of the Authorization token.
:param str iam_id: (optional) IAM ID of the API key(s) to be queried. The
IAM ID may be that of a user or a service. For a user IAM ID iam_id must
match the Authorization token.
:param int pagesize: (optional) Optional size of a single page. Default is
20 items per page. Valid range is 1 to 100.
:param str pagetoken: (optional) Optional Prev or Next page token returned
from a previous query execution. Default is start with first page.
:param str scope: (optional) Optional parameter to define the scope of the
queried API Keys. Can be 'entity' (default) or 'account'.
:param str type: (optional) Optional parameter to filter the type of the
queried API Keys. Can be 'user' or 'serviceid'.
:param str sort: (optional) Optional sort property, valid values are name,
description, created_at and created_by. If specified, the items are sorted
by the value of this property.
:param str order: (optional) Optional sort order, valid values are asc and
desc. Default: asc.
:param bool include_history: (optional) Defines if the entity history is
included in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ApiKeyList` object
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_api_keys')
headers.update(sdk_headers)
params = {
'account_id': account_id,
'iam_id': iam_id,
'pagesize': pagesize,
'pagetoken': pagetoken,
'scope': scope,
'type': type,
'sort': sort,
'order': order,
'include_history': include_history
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/v1/apikeys'
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def create_api_key(self,
name: str,
iam_id: str,
*,
description: str = None,
account_id: str = None,
apikey: str = None,
store_value: bool = None,
entity_lock: str = None,
**kwargs
) -> DetailedResponse:
"""
Create an API key.
Creates an API key for a UserID or service ID. Users can manage user API keys for
themself, or service ID API keys for service IDs that are bound to an entity they
have access to.
:param str name: Name of the API key. The name is not checked for
uniqueness. Therefore multiple names with the same value can exist. Access
is done via the UUID of the API key.
:param str iam_id: The iam_id that this API key authenticates.
:param str description: (optional) The optional description of the API key.
The 'description' property is only available if a description was provided
during a create of an API key.
:param str account_id: (optional) The account ID of the API key.
:param str apikey: (optional) You can optionally passthrough the API key
value for this API key. If passed, NO validation of that apiKey value is
done, i.e. the value can be non-URL safe. If omitted, the API key
management will create an URL safe opaque API key value. The value of the
API key is checked for uniqueness. Please ensure enough variations when
passing in this value.
:param bool store_value: (optional) Send true or false to set whether the
API key value is retrievable in the future by using the Get details of an
API key request. If you create an API key for a user, you must specify
`false` or omit the value. We don't allow storing of API keys for users.
:param str entity_lock: (optional) Indicates if the API key is locked for
further write operations. False by default.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ApiKey` object
"""
if name is None:
raise ValueError('name must be provided')
if iam_id is None:
raise ValueError('iam_id must be provided')
headers = {
'Entity-Lock': entity_lock
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='create_api_key')
headers.update(sdk_headers)
data = {
'name': name,
'iam_id': iam_id,
'description': description,
'account_id': account_id,
'apikey': apikey,
'store_value': store_value
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/v1/apikeys'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def get_api_keys_details(self,
*,
iam_api_key: str = None,
include_history: bool = None,
**kwargs
) -> DetailedResponse:
"""
Get details of an API key by its value.
Returns the details of an API key by its value. Users can manage user API keys for
themself, or service ID API keys for service IDs that are bound to an entity they
have access to.
:param str iam_api_key: (optional) API key value.
:param bool include_history: (optional) Defines if the entity history is
included in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ApiKey` object
"""
headers = {
'IAM-ApiKey': iam_api_key
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_api_keys_details')
headers.update(sdk_headers)
params = {
'include_history': include_history
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/v1/apikeys/details'
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def get_api_key(self,
id: str,
*,
include_history: bool = None,
**kwargs
) -> DetailedResponse:
"""
Get details of an API key.
Returns the details of an API key. Users can manage user API keys for themself, or
service ID API keys for service IDs that are bound to an entity they have access
to. In case of service IDs and their API keys, a user must be either an account
owner, a IBM Cloud org manager or IBM Cloud space developer in order to manage
service IDs of the entity.
:param str id: Unique ID of the API key.
:param bool include_history: (optional) Defines if the entity history is
included in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ApiKey` object
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_api_key')
headers.update(sdk_headers)
params = {
'include_history': include_history
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['id']
path_param_values = self.encode_path_vars(id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v1/apikeys/{id}'.format(**path_param_dict)
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def update_api_key(self,
id: str,
if_match: str,
*,
name: str = None,
description: str = None,
**kwargs
) -> DetailedResponse:
"""
Updates an API key.
Updates properties of an API key. This does NOT affect existing access tokens.
Their token content will stay unchanged until the access token is refreshed. To
update an API key, pass the property to be modified. To delete one property's
value, pass the property with an empty value "".Users can manage user API keys for
themself, or service ID API keys for service IDs that are bound to an entity they
have access to.
:param str id: Unique ID of the API key to be updated.
:param str if_match: Version of the API key to be updated. Specify the
version that you retrieved when reading the API key. This value helps
identifying parallel usage of this API. Pass * to indicate to update any
version available. This might result in stale updates.
:param str name: (optional) The name of the API key to update. If specified
in the request the parameter must not be empty. The name is not checked for
uniqueness. Failure to this will result in an Error condition.
:param str description: (optional) The description of the API key to
update. If specified an empty description will clear the description of the
API key. If a non empty value is provided the API key will be updated.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ApiKey` object
"""
if id is None:
raise ValueError('id must be provided')
if if_match is None:
raise ValueError('if_match must be provided')
headers = {
'If-Match': if_match
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='update_api_key')
headers.update(sdk_headers)
data = {
'name': name,
'description': description
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['id']
path_param_values = self.encode_path_vars(id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v1/apikeys/{id}'.format(**path_param_dict)
request = self.prepare_request(method='PUT',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def delete_api_key(self,
id: str,
**kwargs
) -> DetailedResponse:
"""
Deletes an API key.
Deletes an API key. Existing tokens will remain valid until expired. Refresh
tokens will not work any more for this API key. Users can manage user API keys
for themself, or service ID API keys for service IDs that are bound to an entity
they have access to.
:param str id: Unique ID of the API key.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_api_key')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
path_param_keys = ['id']
path_param_values = self.encode_path_vars(id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v1/apikeys/{id}'.format(**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
def lock_api_key(self,
id: str,
**kwargs
) -> DetailedResponse:
"""
Lock the API key.
Locks an API key by ID. Users can manage user API keys for themself, or service ID
API keys for service IDs that are bound to an entity they have access to. In case
of service IDs and their API keys, a user must be either an account owner, a IBM
Cloud org manager or IBM Cloud space developer in order to manage service IDs of
the entity.
:param str id: Unique ID of the API key.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='lock_api_key')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
path_param_keys = ['id']
path_param_values = self.encode_path_vars(id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v1/apikeys/{id}/lock'.format(**path_param_dict)
request = self.prepare_request(method='POST',
url=url,
headers=headers)
response = self.send(request)
return response
def unlock_api_key(self,
id: str,
**kwargs
) -> DetailedResponse:
"""
Unlock the API key.
Unlocks an API key by ID. Users can manage user API keys for themself, or service
ID API keys for service IDs that are bound to an entity they have access to. In
case of service IDs and their API keys, a user must be either an account owner, a
IBM Cloud org manager or IBM Cloud space developer in order to manage service IDs
of the entity.
:param str id: Unique ID of the API key.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='unlock_api_key')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
path_param_keys = ['id']
path_param_values = self.encode_path_vars(id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v1/apikeys/{id}/lock'.format(**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
def list_service_ids(self,
*,
account_id: str = None,
name: str = None,
pagesize: int = None,
pagetoken: str = None,
sort: str = None,
order: str = None,
include_history: bool = None,
**kwargs
) -> DetailedResponse:
"""
List service IDs.
Returns a list of service IDs. Users can manage user API keys for themself, or
service ID API keys for service IDs that are bound to an entity they have access
to.
:param str account_id: (optional) Account ID of the service ID(s) to query.
This parameter is required (unless using a pagetoken).
:param str name: (optional) Name of the service ID(s) to query. Optional.20
items per page. Valid range is 1 to 100.
:param int pagesize: (optional) Optional size of a single page. Default is
20 items per page. Valid range is 1 to 100.
:param str pagetoken: (optional) Optional Prev or Next page token returned
from a previous query execution. Default is start with first page.
:param str sort: (optional) Optional sort property, valid values are name,
description, created_at and modified_at. If specified, the items are sorted
by the value of this property.
:param str order: (optional) Optional sort order, valid values are asc and
desc. Default: asc.
:param bool include_history: (optional) Defines if the entity history is
included in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ServiceIdList` object
"""
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='list_service_ids')
headers.update(sdk_headers)
params = {
'account_id': account_id,
'name': name,
'pagesize': pagesize,
'pagetoken': pagetoken,
'sort': sort,
'order': order,
'include_history': include_history
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/v1/serviceids/'
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def create_service_id(self,
account_id: str,
name: str,
*,
description: str = None,
unique_instance_crns: List[str] = None,
apikey: 'CreateApiKeyRequest' = None,
entity_lock: str = None,
**kwargs
) -> DetailedResponse:
"""
Create a service ID.
Creates a service ID for an IBM Cloud account. Users can manage user API keys for
themself, or service ID API keys for service IDs that are bound to an entity they
have access to.
:param str account_id: ID of the account the service ID belongs to.
:param str name: Name of the Service Id. The name is not checked for
uniqueness. Therefore multiple names with the same value can exist. Access
is done via the UUID of the Service Id.
:param str description: (optional) The optional description of the Service
Id. The 'description' property is only available if a description was
provided during a create of a Service Id.
:param List[str] unique_instance_crns: (optional) Optional list of CRNs
(string array) which point to the services connected to the service ID.
:param CreateApiKeyRequest apikey: (optional) Input body parameters for the
Create API key V1 REST request.
:param str entity_lock: (optional) Indicates if the service ID is locked
for further write operations. False by default.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ServiceId` object
"""
if account_id is None:
raise ValueError('account_id must be provided')
if name is None:
raise ValueError('name must be provided')
if apikey is not None:
apikey = convert_model(apikey)
headers = {
'Entity-Lock': entity_lock
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='create_service_id')
headers.update(sdk_headers)
data = {
'account_id': account_id,
'name': name,
'description': description,
'unique_instance_crns': unique_instance_crns,
'apikey': apikey
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
url = '/v1/serviceids/'
request = self.prepare_request(method='POST',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def get_service_id(self,
id: str,
*,
include_history: bool = None,
**kwargs
) -> DetailedResponse:
"""
Get details of a service ID.
Returns the details of a service ID. Users can manage user API keys for themself,
or service ID API keys for service IDs that are bound to an entity they have
access to.
:param str id: Unique ID of the service ID.
:param bool include_history: (optional) Defines if the entity history is
included in the response.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ServiceId` object
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='get_service_id')
headers.update(sdk_headers)
params = {
'include_history': include_history
}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['id']
path_param_values = self.encode_path_vars(id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v1/serviceids/{id}'.format(**path_param_dict)
request = self.prepare_request(method='GET',
url=url,
headers=headers,
params=params)
response = self.send(request)
return response
def update_service_id(self,
id: str,
if_match: str,
*,
name: str = None,
description: str = None,
unique_instance_crns: List[str] = None,
**kwargs
) -> DetailedResponse:
"""
Update service ID.
Updates properties of a service ID. This does NOT affect existing access tokens.
Their token content will stay unchanged until the access token is refreshed. To
update a service ID, pass the property to be modified. To delete one property's
value, pass the property with an empty value "".Users can manage user API keys for
themself, or service ID API keys for service IDs that are bound to an entity they
have access to.
:param str id: Unique ID of the service ID to be updated.
:param str if_match: Version of the service ID to be updated. Specify the
version that you retrieved as entity_tag (ETag header) when reading the
service ID. This value helps identifying parallel usage of this API. Pass *
to indicate to update any version available. This might result in stale
updates.
:param str name: (optional) The name of the service ID to update. If
specified in the request the parameter must not be empty. The name is not
checked for uniqueness. Failure to this will result in an Error condition.
:param str description: (optional) The description of the service ID to
update. If specified an empty description will clear the description of the
service ID. If an non empty value is provided the service ID will be
updated.
:param List[str] unique_instance_crns: (optional) List of CRNs which point
to the services connected to this service ID. If specified an empty list
will clear all existing unique instance crns of the service ID.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse with `dict` result representing a `ServiceId` object
"""
if id is None:
raise ValueError('id must be provided')
if if_match is None:
raise ValueError('if_match must be provided')
headers = {
'If-Match': if_match
}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='update_service_id')
headers.update(sdk_headers)
data = {
'name': name,
'description': description,
'unique_instance_crns': unique_instance_crns
}
data = {k: v for (k, v) in data.items() if v is not None}
data = json.dumps(data)
headers['content-type'] = 'application/json'
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
headers['Accept'] = 'application/json'
path_param_keys = ['id']
path_param_values = self.encode_path_vars(id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v1/serviceids/{id}'.format(**path_param_dict)
request = self.prepare_request(method='PUT',
url=url,
headers=headers,
data=data)
response = self.send(request)
return response
def delete_service_id(self,
id: str,
**kwargs
) -> DetailedResponse:
"""
Deletes a service ID and associated API keys.
Deletes a service ID and all API keys associated to it. Before deleting the
service ID, all associated API keys are deleted. In case a Delete Conflict (status
code 409) a retry of the request may help as the service ID is only deleted if the
associated API keys were successfully deleted before. Users can manage user API
keys for themself, or service ID API keys for service IDs that are bound to an
entity they have access to.
:param str id: Unique ID of the service ID.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='delete_service_id')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
path_param_keys = ['id']
path_param_values = self.encode_path_vars(id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v1/serviceids/{id}'.format(**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
def lock_service_id(self,
id: str,
**kwargs
) -> DetailedResponse:
"""
Lock the service ID.
Locks a service ID by ID. Users can manage user API keys for themself, or service
ID API keys for service IDs that are bound to an entity they have access to. In
case of service IDs and their API keys, a user must be either an account owner, a
IBM Cloud org manager or IBM Cloud space developer in order to manage service IDs
of the entity.
:param str id: Unique ID of the service ID.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='lock_service_id')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
path_param_keys = ['id']
path_param_values = self.encode_path_vars(id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v1/serviceids/{id}/lock'.format(**path_param_dict)
request = self.prepare_request(method='POST',
url=url,
headers=headers)
response = self.send(request)
return response
def unlock_service_id(self,
id: str,
**kwargs
) -> DetailedResponse:
"""
Unlock the service ID.
Unlocks a service ID by ID. Users can manage user API keys for themself, or
service ID API keys for service IDs that are bound to an entity they have access
to. In case of service IDs and their API keys, a user must be either an account
owner, a IBM Cloud org manager or IBM Cloud space developer in order to manage
service IDs of the entity.
:param str id: Unique ID of the service ID.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if id is None:
raise ValueError('id must be provided')
headers = {}
sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME,
service_version='V1',
operation_id='unlock_service_id')
headers.update(sdk_headers)
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
path_param_keys = ['id']
path_param_values = self.encode_path_vars(id)
path_param_dict = dict(zip(path_param_keys, path_param_values))
url = '/v1/serviceids/{id}/lock'.format(**path_param_dict)
request = self.prepare_request(method='DELETE',
url=url,
headers=headers)
response = self.send(request)
return response
class ListApiKeysEnums:
"""
Enums for list_api_keys parameters.
"""
class Scope(str, Enum):
"""
Optional parameter to define the scope of the queried API Keys. Can be 'entity'
(default) or 'account'.
"""
ENTITY = 'entity'
ACCOUNT = 'account'
class Type(str, Enum):
"""
Optional parameter to filter the type of the queried API Keys. Can be 'user' or
'serviceid'.
"""
USER = 'user'
SERVICEID = 'serviceid'
class Order(str, Enum):
"""
Optional sort order, valid values are asc and desc. Default: asc.
"""
ASC = 'asc'
DESC = 'desc'
class ListServiceIdsEnums:
"""
Enums for list_service_ids parameters.
"""
class Order(str, Enum):
"""
Optional sort order, valid values are asc and desc. Default: asc.
"""
ASC = 'asc'
DESC = 'desc'
##############################################################################
# Models
##############################################################################
class ApiKey():
"""
Response body format for API key V1 REST requests.
:attr ResponseContext context: (optional) Context with key properties for
problem determination.
:attr str id: Unique identifier of this API Key.
:attr str entity_tag: (optional) Version of the API Key details object. You need
to specify this value when updating the API key to avoid stale updates.
:attr str crn: Cloud Resource Name of the item. Example Cloud Resource Name:
'crn:v1:bluemix:public:iam-identity:us-south:a/myaccount::apikey:1234-9012-5678'.
:attr bool locked: The API key cannot be changed if set to true.
:attr str created_at: (optional) If set contains a date time string of the
creation date in ISO format.
:attr str created_by: IAM ID of the user or service which created the API key.
:attr str modified_at: (optional) If set contains a date time string of the last
modification date in ISO format.
:attr str name: Name of the API key. The name is not checked for uniqueness.
Therefore multiple names with the same value can exist. Access is done via the
UUID of the API key.
:attr str description: (optional) The optional description of the API key. The
'description' property is only available if a description was provided during a
create of an API key.
:attr str iam_id: The iam_id that this API key authenticates.
:attr str account_id: ID of the account that this API key authenticates for.
:attr str apikey: The API key value. This property only contains the API key
value for the following cases: create an API key, update a service ID API key
that stores the API key value as retrievable, or get a service ID API key that
stores the API key value as retrievable. All other operations don't return the
API key value, for example all user API key related operations, except for
create, don't contain the API key value.
:attr List[EnityHistoryRecord] history: (optional) History of the API key.
"""
def __init__(self,
id: str,
crn: str,
locked: bool,
created_by: str,
name: str,
iam_id: str,
account_id: str,
apikey: str,
*,
context: 'ResponseContext' = None,
entity_tag: str = None,
created_at: str = None,
modified_at: str = None,
description: str = None,
history: List['EnityHistoryRecord'] = None) -> None:
"""
Initialize a ApiKey object.
:param str id: Unique identifier of this API Key.
:param str crn: Cloud Resource Name of the item. Example Cloud Resource
Name:
'crn:v1:bluemix:public:iam-identity:us-south:a/myaccount::apikey:1234-9012-5678'.
:param bool locked: The API key cannot be changed if set to true.
:param str created_by: IAM ID of the user or service which created the API
key.
:param str name: Name of the API key. The name is not checked for
uniqueness. Therefore multiple names with the same value can exist. Access
is done via the UUID of the API key.
:param str iam_id: The iam_id that this API key authenticates.
:param str account_id: ID of the account that this API key authenticates
for.
:param str apikey: The API key value. This property only contains the API
key value for the following cases: create an API key, update a service ID
API key that stores the API key value as retrievable, or get a service ID
API key that stores the API key value as retrievable. All other operations
don't return the API key value, for example all user API key related
operations, except for create, don't contain the API key value.
:param ResponseContext context: (optional) Context with key properties for
problem determination.
:param str entity_tag: (optional) Version of the API Key details object.
You need to specify this value when updating the API key to avoid stale
updates.
:param str created_at: (optional) If set contains a date time string of the
creation date in ISO format.
:param str modified_at: (optional) If set contains a date time string of
the last modification date in ISO format.
:param str description: (optional) The optional description of the API key.
The 'description' property is only available if a description was provided
during a create of an API key.
:param List[EnityHistoryRecord] history: (optional) History of the API key.
"""
self.context = context
self.id = id
self.entity_tag = entity_tag
self.crn = crn
self.locked = locked
self.created_at = created_at
self.created_by = created_by
self.modified_at = modified_at
self.name = name
self.description = description
self.iam_id = iam_id
self.account_id = account_id
self.apikey = apikey
self.history = history
@classmethod
def from_dict(cls, _dict: Dict) -> 'ApiKey':
"""Initialize a ApiKey object from a json dictionary."""
args = {}
if 'context' in _dict:
args['context'] = ResponseContext.from_dict(_dict.get('context'))
if 'id' in _dict:
args['id'] = _dict.get('id')
else:
raise ValueError('Required property \'id\' not present in ApiKey JSON')
if 'entity_tag' in _dict:
args['entity_tag'] = _dict.get('entity_tag')
if 'crn' in _dict:
args['crn'] = _dict.get('crn')
else:
raise ValueError('Required property \'crn\' not present in ApiKey JSON')
if 'locked' in _dict:
args['locked'] = _dict.get('locked')
else:
raise ValueError('Required property \'locked\' not present in ApiKey JSON')
if 'created_at' in _dict:
args['created_at'] = _dict.get('created_at')
if 'created_by' in _dict:
args['created_by'] = _dict.get('created_by')
else:
raise ValueError('Required property \'created_by\' not present in ApiKey JSON')
if 'modified_at' in _dict:
args['modified_at'] = _dict.get('modified_at')
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError('Required property \'name\' not present in ApiKey JSON')
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'iam_id' in _dict:
args['iam_id'] = _dict.get('iam_id')
else:
raise ValueError('Required property \'iam_id\' not present in ApiKey JSON')
if 'account_id' in _dict:
args['account_id'] = _dict.get('account_id')
else:
raise ValueError('Required property \'account_id\' not present in ApiKey JSON')
if 'apikey' in _dict:
args['apikey'] = _dict.get('apikey')
else:
raise ValueError('Required property \'apikey\' not present in ApiKey JSON')
if 'history' in _dict:
args['history'] = [EnityHistoryRecord.from_dict(x) for x in _dict.get('history')]
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ApiKey object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'context') and self.context is not None:
_dict['context'] = self.context.to_dict()
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'entity_tag') and self.entity_tag is not None:
_dict['entity_tag'] = self.entity_tag
if hasattr(self, 'crn') and self.crn is not None:
_dict['crn'] = self.crn
if hasattr(self, 'locked') and self.locked is not None:
_dict['locked'] = self.locked
if hasattr(self, 'created_at') and self.created_at is not None:
_dict['created_at'] = self.created_at
if hasattr(self, 'created_by') and self.created_by is not None:
_dict['created_by'] = self.created_by
if hasattr(self, 'modified_at') and self.modified_at is not None:
_dict['modified_at'] = self.modified_at
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'iam_id') and self.iam_id is not None:
_dict['iam_id'] = self.iam_id
if hasattr(self, 'account_id') and self.account_id is not None:
_dict['account_id'] = self.account_id
if hasattr(self, 'apikey') and self.apikey is not None:
_dict['apikey'] = self.apikey
if hasattr(self, 'history') and self.history is not None:
_dict['history'] = [x.to_dict() for x in self.history]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ApiKey object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ApiKey') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ApiKey') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ApiKeyList():
"""
Response body format for the List API keys V1 REST request.
:attr ResponseContext context: (optional) Context with key properties for
problem determination.
:attr int offset: (optional) The offset of the current page.
:attr int limit: (optional) Optional size of a single page. Default is 20 items
per page. Valid range is 1 to 100.
:attr str first: (optional) Link to the first page.
:attr str previous: (optional) Link to the previous available page. If
'previous' property is not part of the response no previous page is available.
:attr str next: (optional) Link to the next available page. If 'next' property
is not part of the response no next page is available.
:attr List[ApiKey] apikeys: List of API keys based on the query paramters and
the page size. The apikeys array is always part of the response but might be
empty depending on the query parameters values provided.
"""
def __init__(self,
apikeys: List['ApiKey'],
*,
context: 'ResponseContext' = None,
offset: int = None,
limit: int = None,
first: str = None,
previous: str = None,
next: str = None) -> None:
"""
Initialize a ApiKeyList object.
:param List[ApiKey] apikeys: List of API keys based on the query paramters
and the page size. The apikeys array is always part of the response but
might be empty depending on the query parameters values provided.
:param ResponseContext context: (optional) Context with key properties for
problem determination.
:param int offset: (optional) The offset of the current page.
:param int limit: (optional) Optional size of a single page. Default is 20
items per page. Valid range is 1 to 100.
:param str first: (optional) Link to the first page.
:param str previous: (optional) Link to the previous available page. If
'previous' property is not part of the response no previous page is
available.
:param str next: (optional) Link to the next available page. If 'next'
property is not part of the response no next page is available.
"""
self.context = context
self.offset = offset
self.limit = limit
self.first = first
self.previous = previous
self.next = next
self.apikeys = apikeys
@classmethod
def from_dict(cls, _dict: Dict) -> 'ApiKeyList':
"""Initialize a ApiKeyList object from a json dictionary."""
args = {}
if 'context' in _dict:
args['context'] = ResponseContext.from_dict(_dict.get('context'))
if 'offset' in _dict:
args['offset'] = _dict.get('offset')
if 'limit' in _dict:
args['limit'] = _dict.get('limit')
if 'first' in _dict:
args['first'] = _dict.get('first')
if 'previous' in _dict:
args['previous'] = _dict.get('previous')
if 'next' in _dict:
args['next'] = _dict.get('next')
if 'apikeys' in _dict:
args['apikeys'] = [ApiKey.from_dict(x) for x in _dict.get('apikeys')]
else:
raise ValueError('Required property \'apikeys\' not present in ApiKeyList JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ApiKeyList object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'context') and self.context is not None:
_dict['context'] = self.context.to_dict()
if hasattr(self, 'offset') and self.offset is not None:
_dict['offset'] = self.offset
if hasattr(self, 'limit') and self.limit is not None:
_dict['limit'] = self.limit
if hasattr(self, 'first') and self.first is not None:
_dict['first'] = self.first
if hasattr(self, 'previous') and self.previous is not None:
_dict['previous'] = self.previous
if hasattr(self, 'next') and self.next is not None:
_dict['next'] = self.next
if hasattr(self, 'apikeys') and self.apikeys is not None:
_dict['apikeys'] = [x.to_dict() for x in self.apikeys]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ApiKeyList object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ApiKeyList') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ApiKeyList') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class CreateApiKeyRequest():
"""
Input body parameters for the Create API key V1 REST request.
:attr str name: Name of the API key. The name is not checked for uniqueness.
Therefore multiple names with the same value can exist. Access is done via the
UUID of the API key.
:attr str description: (optional) The optional description of the API key. The
'description' property is only available if a description was provided during a
create of an API key.
:attr str iam_id: The iam_id that this API key authenticates.
:attr str account_id: (optional) The account ID of the API key.
:attr str apikey: (optional) You can optionally passthrough the API key value
for this API key. If passed, NO validation of that apiKey value is done, i.e.
the value can be non-URL safe. If omitted, the API key management will create an
URL safe opaque API key value. The value of the API key is checked for
uniqueness. Please ensure enough variations when passing in this value.
:attr bool store_value: (optional) Send true or false to set whether the API key
value is retrievable in the future by using the Get details of an API key
request. If you create an API key for a user, you must specify `false` or omit
the value. We don't allow storing of API keys for users.
"""
def __init__(self,
name: str,
iam_id: str,
*,
description: str = None,
account_id: str = None,
apikey: str = None,
store_value: bool = None) -> None:
"""
Initialize a CreateApiKeyRequest object.
:param str name: Name of the API key. The name is not checked for
uniqueness. Therefore multiple names with the same value can exist. Access
is done via the UUID of the API key.
:param str iam_id: The iam_id that this API key authenticates.
:param str description: (optional) The optional description of the API key.
The 'description' property is only available if a description was provided
during a create of an API key.
:param str account_id: (optional) The account ID of the API key.
:param str apikey: (optional) You can optionally passthrough the API key
value for this API key. If passed, NO validation of that apiKey value is
done, i.e. the value can be non-URL safe. If omitted, the API key
management will create an URL safe opaque API key value. The value of the
API key is checked for uniqueness. Please ensure enough variations when
passing in this value.
:param bool store_value: (optional) Send true or false to set whether the
API key value is retrievable in the future by using the Get details of an
API key request. If you create an API key for a user, you must specify
`false` or omit the value. We don't allow storing of API keys for users.
"""
self.name = name
self.description = description
self.iam_id = iam_id
self.account_id = account_id
self.apikey = apikey
self.store_value = store_value
@classmethod
def from_dict(cls, _dict: Dict) -> 'CreateApiKeyRequest':
"""Initialize a CreateApiKeyRequest object from a json dictionary."""
args = {}
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError('Required property \'name\' not present in CreateApiKeyRequest JSON')
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'iam_id' in _dict:
args['iam_id'] = _dict.get('iam_id')
else:
raise ValueError('Required property \'iam_id\' not present in CreateApiKeyRequest JSON')
if 'account_id' in _dict:
args['account_id'] = _dict.get('account_id')
if 'apikey' in _dict:
args['apikey'] = _dict.get('apikey')
if 'store_value' in _dict:
args['store_value'] = _dict.get('store_value')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a CreateApiKeyRequest object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'iam_id') and self.iam_id is not None:
_dict['iam_id'] = self.iam_id
if hasattr(self, 'account_id') and self.account_id is not None:
_dict['account_id'] = self.account_id
if hasattr(self, 'apikey') and self.apikey is not None:
_dict['apikey'] = self.apikey
if hasattr(self, 'store_value') and self.store_value is not None:
_dict['store_value'] = self.store_value
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this CreateApiKeyRequest object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'CreateApiKeyRequest') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'CreateApiKeyRequest') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class EnityHistoryRecord():
"""
Response body format for an entity history record.
:attr str timestamp: Timestamp when the action was triggered.
:attr str iam_id: IAM ID of the identity which triggered the action.
:attr str iam_id_account: Account of the identity which triggered the action.
:attr str action: Action of the history entry.
:attr List[str] params: Params of the history entry.
:attr str message: Message which summarizes the executed action.
"""
def __init__(self,
timestamp: str,
iam_id: str,
iam_id_account: str,
action: str,
params: List[str],
message: str) -> None:
"""
Initialize a EnityHistoryRecord object.
:param str timestamp: Timestamp when the action was triggered.
:param str iam_id: IAM ID of the identity which triggered the action.
:param str iam_id_account: Account of the identity which triggered the
action.
:param str action: Action of the history entry.
:param List[str] params: Params of the history entry.
:param str message: Message which summarizes the executed action.
"""
self.timestamp = timestamp
self.iam_id = iam_id
self.iam_id_account = iam_id_account
self.action = action
self.params = params
self.message = message
@classmethod
def from_dict(cls, _dict: Dict) -> 'EnityHistoryRecord':
"""Initialize a EnityHistoryRecord object from a json dictionary."""
args = {}
if 'timestamp' in _dict:
args['timestamp'] = _dict.get('timestamp')
else:
raise ValueError('Required property \'timestamp\' not present in EnityHistoryRecord JSON')
if 'iam_id' in _dict:
args['iam_id'] = _dict.get('iam_id')
else:
raise ValueError('Required property \'iam_id\' not present in EnityHistoryRecord JSON')
if 'iam_id_account' in _dict:
args['iam_id_account'] = _dict.get('iam_id_account')
else:
raise ValueError('Required property \'iam_id_account\' not present in EnityHistoryRecord JSON')
if 'action' in _dict:
args['action'] = _dict.get('action')
else:
raise ValueError('Required property \'action\' not present in EnityHistoryRecord JSON')
if 'params' in _dict:
args['params'] = _dict.get('params')
else:
raise ValueError('Required property \'params\' not present in EnityHistoryRecord JSON')
if 'message' in _dict:
args['message'] = _dict.get('message')
else:
raise ValueError('Required property \'message\' not present in EnityHistoryRecord JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a EnityHistoryRecord object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'timestamp') and self.timestamp is not None:
_dict['timestamp'] = self.timestamp
if hasattr(self, 'iam_id') and self.iam_id is not None:
_dict['iam_id'] = self.iam_id
if hasattr(self, 'iam_id_account') and self.iam_id_account is not None:
_dict['iam_id_account'] = self.iam_id_account
if hasattr(self, 'action') and self.action is not None:
_dict['action'] = self.action
if hasattr(self, 'params') and self.params is not None:
_dict['params'] = self.params
if hasattr(self, 'message') and self.message is not None:
_dict['message'] = self.message
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this EnityHistoryRecord object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'EnityHistoryRecord') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'EnityHistoryRecord') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ResponseContext():
"""
Context with key properties for problem determination.
:attr str transaction_id: (optional) The transaction ID of the inbound REST
request.
:attr str operation: (optional) The operation of the inbound REST request.
:attr str user_agent: (optional) The user agent of the inbound REST request.
:attr str url: (optional) The URL of that cluster.
:attr str instance_id: (optional) The instance ID of the server instance
processing the request.
:attr str thread_id: (optional) The thread ID of the server instance processing
the request.
:attr str host: (optional) The host of the server instance processing the
request.
:attr str start_time: (optional) The start time of the request.
:attr str end_time: (optional) The finish time of the request.
:attr str elapsed_time: (optional) The elapsed time in msec.
:attr str cluster_name: (optional) The cluster name.
"""
def __init__(self,
*,
transaction_id: str = None,
operation: str = None,
user_agent: str = None,
url: str = None,
instance_id: str = None,
thread_id: str = None,
host: str = None,
start_time: str = None,
end_time: str = None,
elapsed_time: str = None,
cluster_name: str = None) -> None:
"""
Initialize a ResponseContext object.
:param str transaction_id: (optional) The transaction ID of the inbound
REST request.
:param str operation: (optional) The operation of the inbound REST request.
:param str user_agent: (optional) The user agent of the inbound REST
request.
:param str url: (optional) The URL of that cluster.
:param str instance_id: (optional) The instance ID of the server instance
processing the request.
:param str thread_id: (optional) The thread ID of the server instance
processing the request.
:param str host: (optional) The host of the server instance processing the
request.
:param str start_time: (optional) The start time of the request.
:param str end_time: (optional) The finish time of the request.
:param str elapsed_time: (optional) The elapsed time in msec.
:param str cluster_name: (optional) The cluster name.
"""
self.transaction_id = transaction_id
self.operation = operation
self.user_agent = user_agent
self.url = url
self.instance_id = instance_id
self.thread_id = thread_id
self.host = host
self.start_time = start_time
self.end_time = end_time
self.elapsed_time = elapsed_time
self.cluster_name = cluster_name
@classmethod
def from_dict(cls, _dict: Dict) -> 'ResponseContext':
"""Initialize a ResponseContext object from a json dictionary."""
args = {}
if 'transaction_id' in _dict:
args['transaction_id'] = _dict.get('transaction_id')
if 'operation' in _dict:
args['operation'] = _dict.get('operation')
if 'user_agent' in _dict:
args['user_agent'] = _dict.get('user_agent')
if 'url' in _dict:
args['url'] = _dict.get('url')
if 'instance_id' in _dict:
args['instance_id'] = _dict.get('instance_id')
if 'thread_id' in _dict:
args['thread_id'] = _dict.get('thread_id')
if 'host' in _dict:
args['host'] = _dict.get('host')
if 'start_time' in _dict:
args['start_time'] = _dict.get('start_time')
if 'end_time' in _dict:
args['end_time'] = _dict.get('end_time')
if 'elapsed_time' in _dict:
args['elapsed_time'] = _dict.get('elapsed_time')
if 'cluster_name' in _dict:
args['cluster_name'] = _dict.get('cluster_name')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ResponseContext object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'transaction_id') and self.transaction_id is not None:
_dict['transaction_id'] = self.transaction_id
if hasattr(self, 'operation') and self.operation is not None:
_dict['operation'] = self.operation
if hasattr(self, 'user_agent') and self.user_agent is not None:
_dict['user_agent'] = self.user_agent
if hasattr(self, 'url') and self.url is not None:
_dict['url'] = self.url
if hasattr(self, 'instance_id') and self.instance_id is not None:
_dict['instance_id'] = self.instance_id
if hasattr(self, 'thread_id') and self.thread_id is not None:
_dict['thread_id'] = self.thread_id
if hasattr(self, 'host') and self.host is not None:
_dict['host'] = self.host
if hasattr(self, 'start_time') and self.start_time is not None:
_dict['start_time'] = self.start_time
if hasattr(self, 'end_time') and self.end_time is not None:
_dict['end_time'] = self.end_time
if hasattr(self, 'elapsed_time') and self.elapsed_time is not None:
_dict['elapsed_time'] = self.elapsed_time
if hasattr(self, 'cluster_name') and self.cluster_name is not None:
_dict['cluster_name'] = self.cluster_name
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ResponseContext object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ResponseContext') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ResponseContext') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ServiceId():
"""
Response body format for service ID V1 REST requests.
:attr ResponseContext context: (optional) Context with key properties for
problem determination.
:attr str id: Unique identifier of this Service Id.
:attr str iam_id: Cloud wide identifier for identities of this service ID.
:attr str entity_tag: (optional) Version of the service ID details object. You
need to specify this value when updating the service ID to avoid stale updates.
:attr str crn: Cloud Resource Name of the item. Example Cloud Resource Name:
'crn:v1:bluemix:public:iam-identity:us-south:a/myaccount::serviceid:1234-5678-9012'.
:attr bool locked: The service ID cannot be changed if set to true.
:attr str created_at: (optional) If set contains a date time string of the
creation date in ISO format.
:attr str modified_at: (optional) If set contains a date time string of the last
modification date in ISO format.
:attr str account_id: ID of the account the service ID belongs to.
:attr str name: Name of the Service Id. The name is not checked for uniqueness.
Therefore multiple names with the same value can exist. Access is done via the
UUID of the Service Id.
:attr str description: (optional) The optional description of the Service Id.
The 'description' property is only available if a description was provided
during a create of a Service Id.
:attr List[str] unique_instance_crns: (optional) Optional list of CRNs (string
array) which point to the services connected to the service ID.
:attr List[EnityHistoryRecord] history: (optional) History of the Service ID.
:attr ApiKey apikey: Response body format for API key V1 REST requests.
"""
def __init__(self,
id: str,
iam_id: str,
crn: str,
locked: bool,
account_id: str,
name: str,
apikey: 'ApiKey',
*,
context: 'ResponseContext' = None,
entity_tag: str = None,
created_at: str = None,
modified_at: str = None,
description: str = None,
unique_instance_crns: List[str] = None,
history: List['EnityHistoryRecord'] = None) -> None:
"""
Initialize a ServiceId object.
:param str id: Unique identifier of this Service Id.
:param str iam_id: Cloud wide identifier for identities of this service ID.
:param str crn: Cloud Resource Name of the item. Example Cloud Resource
Name:
'crn:v1:bluemix:public:iam-identity:us-south:a/myaccount::serviceid:1234-5678-9012'.
:param bool locked: The service ID cannot be changed if set to true.
:param str account_id: ID of the account the service ID belongs to.
:param str name: Name of the Service Id. The name is not checked for
uniqueness. Therefore multiple names with the same value can exist. Access
is done via the UUID of the Service Id.
:param ApiKey apikey: Response body format for API key V1 REST requests.
:param ResponseContext context: (optional) Context with key properties for
problem determination.
:param str entity_tag: (optional) Version of the service ID details object.
You need to specify this value when updating the service ID to avoid stale
updates.
:param str created_at: (optional) If set contains a date time string of the
creation date in ISO format.
:param str modified_at: (optional) If set contains a date time string of
the last modification date in ISO format.
:param str description: (optional) The optional description of the Service
Id. The 'description' property is only available if a description was
provided during a create of a Service Id.
:param List[str] unique_instance_crns: (optional) Optional list of CRNs
(string array) which point to the services connected to the service ID.
:param List[EnityHistoryRecord] history: (optional) History of the Service
ID.
"""
self.context = context
self.id = id
self.iam_id = iam_id
self.entity_tag = entity_tag
self.crn = crn
self.locked = locked
self.created_at = created_at
self.modified_at = modified_at
self.account_id = account_id
self.name = name
self.description = description
self.unique_instance_crns = unique_instance_crns
self.history = history
self.apikey = apikey
@classmethod
def from_dict(cls, _dict: Dict) -> 'ServiceId':
"""Initialize a ServiceId object from a json dictionary."""
args = {}
if 'context' in _dict:
args['context'] = ResponseContext.from_dict(_dict.get('context'))
if 'id' in _dict:
args['id'] = _dict.get('id')
else:
raise ValueError('Required property \'id\' not present in ServiceId JSON')
if 'iam_id' in _dict:
args['iam_id'] = _dict.get('iam_id')
else:
raise ValueError('Required property \'iam_id\' not present in ServiceId JSON')
if 'entity_tag' in _dict:
args['entity_tag'] = _dict.get('entity_tag')
if 'crn' in _dict:
args['crn'] = _dict.get('crn')
else:
raise ValueError('Required property \'crn\' not present in ServiceId JSON')
if 'locked' in _dict:
args['locked'] = _dict.get('locked')
else:
raise ValueError('Required property \'locked\' not present in ServiceId JSON')
if 'created_at' in _dict:
args['created_at'] = _dict.get('created_at')
if 'modified_at' in _dict:
args['modified_at'] = _dict.get('modified_at')
if 'account_id' in _dict:
args['account_id'] = _dict.get('account_id')
else:
raise ValueError('Required property \'account_id\' not present in ServiceId JSON')
if 'name' in _dict:
args['name'] = _dict.get('name')
else:
raise ValueError('Required property \'name\' not present in ServiceId JSON')
if 'description' in _dict:
args['description'] = _dict.get('description')
if 'unique_instance_crns' in _dict:
args['unique_instance_crns'] = _dict.get('unique_instance_crns')
if 'history' in _dict:
args['history'] = [EnityHistoryRecord.from_dict(x) for x in _dict.get('history')]
if 'apikey' in _dict:
args['apikey'] = ApiKey.from_dict(_dict.get('apikey'))
else:
raise ValueError('Required property \'apikey\' not present in ServiceId JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ServiceId object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'context') and self.context is not None:
_dict['context'] = self.context.to_dict()
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'iam_id') and self.iam_id is not None:
_dict['iam_id'] = self.iam_id
if hasattr(self, 'entity_tag') and self.entity_tag is not None:
_dict['entity_tag'] = self.entity_tag
if hasattr(self, 'crn') and self.crn is not None:
_dict['crn'] = self.crn
if hasattr(self, 'locked') and self.locked is not None:
_dict['locked'] = self.locked
if hasattr(self, 'created_at') and self.created_at is not None:
_dict['created_at'] = self.created_at
if hasattr(self, 'modified_at') and self.modified_at is not None:
_dict['modified_at'] = self.modified_at
if hasattr(self, 'account_id') and self.account_id is not None:
_dict['account_id'] = self.account_id
if hasattr(self, 'name') and self.name is not None:
_dict['name'] = self.name
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
if hasattr(self, 'unique_instance_crns') and self.unique_instance_crns is not None:
_dict['unique_instance_crns'] = self.unique_instance_crns
if hasattr(self, 'history') and self.history is not None:
_dict['history'] = [x.to_dict() for x in self.history]
if hasattr(self, 'apikey') and self.apikey is not None:
_dict['apikey'] = self.apikey.to_dict()
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ServiceId object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ServiceId') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ServiceId') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
class ServiceIdList():
"""
Response body format for the list service ID V1 REST request.
:attr ResponseContext context: (optional) Context with key properties for
problem determination.
:attr int offset: (optional) The offset of the current page.
:attr int limit: (optional) Optional size of a single page. Default is 20 items
per page. Valid range is 1 to 100.
:attr str first: (optional) Link to the first page.
:attr str previous: (optional) Link to the previous available page. If
'previous' property is not part of the response no previous page is available.
:attr str next: (optional) Link to the next available page. If 'next' property
is not part of the response no next page is available.
:attr List[ServiceId] serviceids: List of service IDs based on the query
paramters and the page size. The service IDs array is always part of the
response but might be empty depending on the query parameter values provided.
"""
def __init__(self,
serviceids: List['ServiceId'],
*,
context: 'ResponseContext' = None,
offset: int = None,
limit: int = None,
first: str = None,
previous: str = None,
next: str = None) -> None:
"""
Initialize a ServiceIdList object.
:param List[ServiceId] serviceids: List of service IDs based on the query
paramters and the page size. The service IDs array is always part of the
response but might be empty depending on the query parameter values
provided.
:param ResponseContext context: (optional) Context with key properties for
problem determination.
:param int offset: (optional) The offset of the current page.
:param int limit: (optional) Optional size of a single page. Default is 20
items per page. Valid range is 1 to 100.
:param str first: (optional) Link to the first page.
:param str previous: (optional) Link to the previous available page. If
'previous' property is not part of the response no previous page is
available.
:param str next: (optional) Link to the next available page. If 'next'
property is not part of the response no next page is available.
"""
self.context = context
self.offset = offset
self.limit = limit
self.first = first
self.previous = previous
self.next = next
self.serviceids = serviceids
@classmethod
def from_dict(cls, _dict: Dict) -> 'ServiceIdList':
"""Initialize a ServiceIdList object from a json dictionary."""
args = {}
if 'context' in _dict:
args['context'] = ResponseContext.from_dict(_dict.get('context'))
if 'offset' in _dict:
args['offset'] = _dict.get('offset')
if 'limit' in _dict:
args['limit'] = _dict.get('limit')
if 'first' in _dict:
args['first'] = _dict.get('first')
if 'previous' in _dict:
args['previous'] = _dict.get('previous')
if 'next' in _dict:
args['next'] = _dict.get('next')
if 'serviceids' in _dict:
args['serviceids'] = [ServiceId.from_dict(x) for x in _dict.get('serviceids')]
else:
raise ValueError('Required property \'serviceids\' not present in ServiceIdList JSON')
return cls(**args)
@classmethod
def _from_dict(cls, _dict):
"""Initialize a ServiceIdList object from a json dictionary."""
return cls.from_dict(_dict)
def to_dict(self) -> Dict:
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'context') and self.context is not None:
_dict['context'] = self.context.to_dict()
if hasattr(self, 'offset') and self.offset is not None:
_dict['offset'] = self.offset
if hasattr(self, 'limit') and self.limit is not None:
_dict['limit'] = self.limit
if hasattr(self, 'first') and self.first is not None:
_dict['first'] = self.first
if hasattr(self, 'previous') and self.previous is not None:
_dict['previous'] = self.previous
if hasattr(self, 'next') and self.next is not None:
_dict['next'] = self.next
if hasattr(self, 'serviceids') and self.serviceids is not None:
_dict['serviceids'] = [x.to_dict() for x in self.serviceids]
return _dict
def _to_dict(self):
"""Return a json dictionary representing this model."""
return self.to_dict()
def __str__(self) -> str:
"""Return a `str` version of this ServiceIdList object."""
return json.dumps(self.to_dict(), indent=2)
def __eq__(self, other: 'ServiceIdList') -> bool:
"""Return `true` when self and other are equal, false otherwise."""
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other: 'ServiceIdList') -> bool:
"""Return `true` when self and other are not equal, false otherwise."""
return not self == other
| 43.556727
| 107
| 0.601222
|
89a3b041ce03407ff6b0ccc891c0b5d75236cb51
| 10,868
|
py
|
Python
|
synapse/http/federation/well_known_resolver.py
|
ThiefMaster/synapse
|
f2af3e4fc550e7e93be1b0f425c3e9c484b96293
|
[
"Apache-2.0"
] | 1
|
2020-07-21T17:51:02.000Z
|
2020-07-21T17:51:02.000Z
|
synapse/http/federation/well_known_resolver.py
|
mjvaldez/synapse
|
de119063f248981510e961e83f1515a3add19a21
|
[
"Apache-2.0"
] | null | null | null |
synapse/http/federation/well_known_resolver.py
|
mjvaldez/synapse
|
de119063f248981510e961e83f1515a3add19a21
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import random
import time
import attr
from twisted.internet import defer
from twisted.web.client import RedirectAgent, readBody
from twisted.web.http import stringToDatetime
from twisted.web.http_headers import Headers
from synapse.logging.context import make_deferred_yieldable
from synapse.util import Clock
from synapse.util.caches.ttlcache import TTLCache
from synapse.util.metrics import Measure
# period to cache .well-known results for by default
WELL_KNOWN_DEFAULT_CACHE_PERIOD = 24 * 3600
# jitter factor to add to the .well-known default cache ttls
WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER = 0.1
# period to cache failure to fetch .well-known for
WELL_KNOWN_INVALID_CACHE_PERIOD = 1 * 3600
# period to cache failure to fetch .well-known if there has recently been a
# valid well-known for that domain.
WELL_KNOWN_DOWN_CACHE_PERIOD = 2 * 60
# period to remember there was a valid well-known after valid record expires
WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID = 2 * 3600
# cap for .well-known cache period
WELL_KNOWN_MAX_CACHE_PERIOD = 48 * 3600
# lower bound for .well-known cache period
WELL_KNOWN_MIN_CACHE_PERIOD = 5 * 60
# Attempt to refetch a cached well-known N% of the TTL before it expires.
# e.g. if set to 0.2 and we have a cached entry with a TTL of 5mins, then
# we'll start trying to refetch 1 minute before it expires.
WELL_KNOWN_GRACE_PERIOD_FACTOR = 0.2
# Number of times we retry fetching a well-known for a domain we know recently
# had a valid entry.
WELL_KNOWN_RETRY_ATTEMPTS = 3
logger = logging.getLogger(__name__)
_well_known_cache = TTLCache("well-known")
_had_valid_well_known_cache = TTLCache("had-valid-well-known")
@attr.s(slots=True, frozen=True)
class WellKnownLookupResult(object):
delegated_server = attr.ib()
class WellKnownResolver(object):
"""Handles well-known lookups for matrix servers.
"""
def __init__(
self,
reactor,
agent,
user_agent,
well_known_cache=None,
had_well_known_cache=None,
):
self._reactor = reactor
self._clock = Clock(reactor)
if well_known_cache is None:
well_known_cache = _well_known_cache
if had_well_known_cache is None:
had_well_known_cache = _had_valid_well_known_cache
self._well_known_cache = well_known_cache
self._had_valid_well_known_cache = had_well_known_cache
self._well_known_agent = RedirectAgent(agent)
self.user_agent = user_agent
@defer.inlineCallbacks
def get_well_known(self, server_name):
"""Attempt to fetch and parse a .well-known file for the given server
Args:
server_name (bytes): name of the server, from the requested url
Returns:
Deferred[WellKnownLookupResult]: The result of the lookup
"""
try:
prev_result, expiry, ttl = self._well_known_cache.get_with_expiry(
server_name
)
now = self._clock.time()
if now < expiry - WELL_KNOWN_GRACE_PERIOD_FACTOR * ttl:
return WellKnownLookupResult(delegated_server=prev_result)
except KeyError:
prev_result = None
# TODO: should we linearise so that we don't end up doing two .well-known
# requests for the same server in parallel?
try:
with Measure(self._clock, "get_well_known"):
result, cache_period = yield self._fetch_well_known(server_name)
except _FetchWellKnownFailure as e:
if prev_result and e.temporary:
# This is a temporary failure and we have a still valid cached
# result, so lets return that. Hopefully the next time we ask
# the remote will be back up again.
return WellKnownLookupResult(delegated_server=prev_result)
result = None
if self._had_valid_well_known_cache.get(server_name, False):
# We have recently seen a valid well-known record for this
# server, so we cache the lack of well-known for a shorter time.
cache_period = WELL_KNOWN_DOWN_CACHE_PERIOD
else:
cache_period = WELL_KNOWN_INVALID_CACHE_PERIOD
# add some randomness to the TTL to avoid a stampeding herd
cache_period *= random.uniform(
1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
)
if cache_period > 0:
self._well_known_cache.set(server_name, result, cache_period)
return WellKnownLookupResult(delegated_server=result)
@defer.inlineCallbacks
def _fetch_well_known(self, server_name):
"""Actually fetch and parse a .well-known, without checking the cache
Args:
server_name (bytes): name of the server, from the requested url
Raises:
_FetchWellKnownFailure if we fail to lookup a result
Returns:
Deferred[Tuple[bytes,int]]: The lookup result and cache period.
"""
had_valid_well_known = self._had_valid_well_known_cache.get(server_name, False)
# We do this in two steps to differentiate between possibly transient
# errors (e.g. can't connect to host, 503 response) and more permenant
# errors (such as getting a 404 response).
response, body = yield self._make_well_known_request(
server_name, retry=had_valid_well_known
)
try:
if response.code != 200:
raise Exception("Non-200 response %s" % (response.code,))
parsed_body = json.loads(body.decode("utf-8"))
logger.info("Response from .well-known: %s", parsed_body)
result = parsed_body["m.server"].encode("ascii")
except defer.CancelledError:
# Bail if we've been cancelled
raise
except Exception as e:
logger.info("Error parsing well-known for %s: %s", server_name, e)
raise _FetchWellKnownFailure(temporary=False)
cache_period = _cache_period_from_headers(
response.headers, time_now=self._reactor.seconds
)
if cache_period is None:
cache_period = WELL_KNOWN_DEFAULT_CACHE_PERIOD
# add some randomness to the TTL to avoid a stampeding herd every 24 hours
# after startup
cache_period *= random.uniform(
1 - WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
1 + WELL_KNOWN_DEFAULT_CACHE_PERIOD_JITTER,
)
else:
cache_period = min(cache_period, WELL_KNOWN_MAX_CACHE_PERIOD)
cache_period = max(cache_period, WELL_KNOWN_MIN_CACHE_PERIOD)
# We got a success, mark as such in the cache
self._had_valid_well_known_cache.set(
server_name,
bool(result),
cache_period + WELL_KNOWN_REMEMBER_DOMAIN_HAD_VALID,
)
return result, cache_period
@defer.inlineCallbacks
def _make_well_known_request(self, server_name, retry):
"""Make the well known request.
This will retry the request if requested and it fails (with unable
to connect or receives a 5xx error).
Args:
server_name (bytes)
retry (bool): Whether to retry the request if it fails.
Returns:
Deferred[tuple[IResponse, bytes]] Returns the response object and
body. Response may be a non-200 response.
"""
uri = b"https://%s/.well-known/matrix/server" % (server_name,)
uri_str = uri.decode("ascii")
headers = {
b"User-Agent": [self.user_agent],
}
i = 0
while True:
i += 1
logger.info("Fetching %s", uri_str)
try:
response = yield make_deferred_yieldable(
self._well_known_agent.request(
b"GET", uri, headers=Headers(headers)
)
)
body = yield make_deferred_yieldable(readBody(response))
if 500 <= response.code < 600:
raise Exception("Non-200 response %s" % (response.code,))
return response, body
except defer.CancelledError:
# Bail if we've been cancelled
raise
except Exception as e:
if not retry or i >= WELL_KNOWN_RETRY_ATTEMPTS:
logger.info("Error fetching %s: %s", uri_str, e)
raise _FetchWellKnownFailure(temporary=True)
logger.info("Error fetching %s: %s. Retrying", uri_str, e)
# Sleep briefly in the hopes that they come back up
yield self._clock.sleep(0.5)
def _cache_period_from_headers(headers, time_now=time.time):
cache_controls = _parse_cache_control(headers)
if b"no-store" in cache_controls:
return 0
if b"max-age" in cache_controls:
try:
max_age = int(cache_controls[b"max-age"])
return max_age
except ValueError:
pass
expires = headers.getRawHeaders(b"expires")
if expires is not None:
try:
expires_date = stringToDatetime(expires[-1])
return expires_date - time_now()
except ValueError:
# RFC7234 says 'A cache recipient MUST interpret invalid date formats,
# especially the value "0", as representing a time in the past (i.e.,
# "already expired").
return 0
return None
def _parse_cache_control(headers):
cache_controls = {}
for hdr in headers.getRawHeaders(b"cache-control", []):
for directive in hdr.split(b","):
splits = [x.strip() for x in directive.split(b"=", 1)]
k = splits[0].lower()
v = splits[1] if len(splits) > 1 else None
cache_controls[k] = v
return cache_controls
@attr.s()
class _FetchWellKnownFailure(Exception):
# True if we didn't get a non-5xx HTTP response, i.e. this may or may not be
# a temporary failure.
temporary = attr.ib()
| 34.501587
| 87
| 0.644829
|
7b4b2e719e4365fccccc1bf88e377a57146c67f6
| 74
|
py
|
Python
|
tests/asp/gringo/choice.006.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 19
|
2015-12-03T08:53:45.000Z
|
2022-03-31T02:09:43.000Z
|
tests/asp/gringo/choice.006.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 80
|
2017-11-25T07:57:32.000Z
|
2018-06-10T19:03:30.000Z
|
tests/asp/gringo/choice.006.test.py
|
bernardocuteri/wasp
|
05c8f961776dbdbf7afbf905ee00fc262eba51ad
|
[
"Apache-2.0"
] | 6
|
2015-01-15T07:51:48.000Z
|
2020-06-18T14:47:48.000Z
|
input = """
{a,b} :- c.
c | d.
:- a.
"""
output = """
{c}
{c, b}
{d}
"""
| 6.166667
| 12
| 0.283784
|
3741f46a8f86b3be6bfaa8cc31fef2fd51039a7c
| 4,253
|
py
|
Python
|
elliot/evaluation/metrics/fairness/MAD/ItemMADrating.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 175
|
2021-03-04T15:46:25.000Z
|
2022-03-31T05:56:58.000Z
|
elliot/evaluation/metrics/fairness/MAD/ItemMADrating.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 15
|
2021-03-06T17:53:56.000Z
|
2022-03-24T17:02:07.000Z
|
elliot/evaluation/metrics/fairness/MAD/ItemMADrating.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 39
|
2021-03-04T15:46:26.000Z
|
2022-03-09T15:37:12.000Z
|
"""
This is the implementation of the Item MAD rating metric.
It proceeds from a user-wise computation, and average the values over the users.
"""
__version__ = '0.3.1'
__author__ = 'Vito Walter Anelli, Claudio Pomo'
__email__ = 'vitowalter.anelli@poliba.it, claudio.pomo@poliba.it'
import numpy as np
import pandas as pd
from elliot.evaluation.metrics.base_metric import BaseMetric
class ItemMADrating(BaseMetric):
r"""
Item MAD Rating-based
This class represents the implementation of the Item MAD rating recommendation metric.
For further details, please refer to the `paper <https://dl.acm.org/doi/abs/10.1145/3269206.3271795>`_
.. math::
\mathrm {MAD}={avg}_{i, j}({MAD}(R^{(i)}, R^{(j)}))
To compute the metric, add it to the config file adopting the following pattern:
.. code:: yaml
complex_metrics:
- metric: ItemMADrating
clustering_name: ItemPopularity
clustering_file: ../data/movielens_1m/i_pop.tsv
"""
def __init__(self, recommendations, config, params, eval_objects, additional_data):
"""
Constructor
:param recommendations: list of recommendations in the form {user: [(item1,value1),...]}
:param config: SimpleNameSpace that represents the configuration of the experiment
:param params: Parameters of the model
:param eval_objects: list of objects that may be useful for the computation of the different metrics
"""
super().__init__(recommendations, config, params, eval_objects, additional_data)
self._cutoff = self._evaluation_objects.cutoff
self._relevance = self._evaluation_objects.relevance.binary_relevance
self._item_clustering_path = self._additional_data.get("clustering_file", False)
self._item_clustering_name = self._additional_data.get("clustering_name", "")
if self._item_clustering_path:
self._item_clustering = pd.read_csv(self._additional_data["clustering_file"], sep="\t", header=None)
self._n_clusters = self._item_clustering[1].nunique()
self._item_clustering = dict(zip(self._item_clustering[0], self._item_clustering[1]))
else:
self._n_clusters = 1
self._item_clustering = {}
self._sum = np.zeros(self._n_clusters)
self._n_items = np.zeros(self._n_clusters)
self._item_count = {}
self._item_gain = {}
def name(self):
"""
Metric Name Getter
:return: returns the public name of the metric
"""
return f"ItemMADrating_{self._item_clustering_name}"
def __item_mad(self, user_recommendations, cutoff, user_relevant_items):
"""
Per User Item MAD rating
:param user_recommendations: list of user recommendation in the form [(item1,value1),...]
:param cutoff: numerical threshold to limit the recommendation list
:param user_relevant_items: list of user relevant items in the form [item1,...]
:return: the value of the Precision metric for the specific user
"""
for i, r in user_recommendations[:cutoff]:
self._item_count[i] = self._item_count.get(i, 0) + 1
self._item_gain[i] = self._item_gain.get(i, 0) + (r if i in user_relevant_items else 0)
def eval(self):
"""
Evaluation function
:return: the overall averaged value of Item MAD rating
"""
for u, u_r in self._recommendations.items():
if len(self._relevance.get_user_rel(u)):
self.__item_mad(u_r, self._cutoff, self._relevance.get_user_rel(u))
for item, gain in self._item_gain.items():
v = gain/self._item_count[item]
cluster = self._item_clustering.get(item, None)
if cluster is not None:
self._sum[cluster] += v
self._n_items[cluster] += 1
avg = [self._sum[i]/self._n_items[i] for i in range(self._n_clusters)]
differences = []
for i in range(self._n_clusters):
for j in range(i+1, self._n_clusters):
differences.append(abs(avg[i] - avg[j]))
return np.average(differences)
def get(self):
return [self]
| 38.315315
| 112
| 0.653186
|
b681b2b2bfe0a3ad29a927cd8edcfead858ae53f
| 17,659
|
py
|
Python
|
pynars/utils/_trash/sparse_lut_v2/branch_list.py
|
AIxer/PyNARS
|
443b6a5e1c9779a1b861df1ca51ce5a190998d2e
|
[
"MIT"
] | null | null | null |
pynars/utils/_trash/sparse_lut_v2/branch_list.py
|
AIxer/PyNARS
|
443b6a5e1c9779a1b861df1ca51ce5a190998d2e
|
[
"MIT"
] | null | null | null |
pynars/utils/_trash/sparse_lut_v2/branch_list.py
|
AIxer/PyNARS
|
443b6a5e1c9779a1b861df1ca51ce5a190998d2e
|
[
"MIT"
] | null | null | null |
from ctypes import Union
from typing import Any, Callable, Dict, List, Tuple, Type, Set
import typing
from collections import OrderedDict
from copy import deepcopy, copy
import marshal
# import matplotlib
import networkx as nx
import matplotlib.pyplot as plt
from ordered_set import OrderedSet
import sty
import cython
# deepcopy = lambda x: marshal.loads(marshal.dumps(x))
deepcopy2 = lambda x: marshal.loads(marshal.dumps(x))
class Node:
next_nodes: Union[typing.OrderedDict[tuple, 'Node'], Set]
last_nodes: typing.OrderedDict[tuple, 'Node']
is_end: bool
def __init__(self, index: set, is_end=False, depth: int=-1, next_nodes: typing.OrderedDict[tuple, 'Node']=None, last_nodes: typing.OrderedDict[tuple, 'Node']=None) -> None:
self.index = index
self.is_end = is_end
self.next_nodes = next_nodes or (None if is_end else OrderedDict())
self.last_nodes= last_nodes or OrderedDict()
self.depth = depth
pass
def append(self, node: Type['Node']):
self.next_nodes[tuple(node.index)] = node
node.last_nodes[(tuple(self.index), id(self))] = self
def duplicate_shallow(self, index: set=None):
node = Node(index or self.index, self.is_end, self.depth)
for next_node in self.next_nodes_list:
node.append(next_node)
return node
def duplicate_deep(self, index:set=None):
node = Node(index or self.index, self.is_end, self.depth)
for next_node in self.next_nodes_list:
next_node = next_node.duplicate_deep()
node.append(next_node)
return node
def remove_next(self, node: Type['Node']):
''''''
self.next_nodes.pop(tuple(node.index), None)
node.last_nodes.pop((tuple(self.index), id(self)), None)
def remove_last(self, node: Type['Node']):
''''''
self.last_nodes.pop((tuple(node.index), id(node)), None)
node.next_nodes.pop(tuple(self.index), None)
def reset_index(self, index):
''''''
# index_old = tuple(self.index)
next_nodes: List[Node] = self.next_nodes_list
last_nodes: List[Node] = self.last_nodes_list
for node in next_nodes: node.remove_last(self)
for node in last_nodes: node.remove_next(self)
self.index = index
for node in next_nodes: self.append(node)
for node in last_nodes: node.append(self)
@property
def is_fan_in(self):
return (self.last_nodes is not None) and (len(self.last_nodes) > 1)
@property
def next_nodes_list(self):
return list(self.next_nodes.values()) if self.next_nodes is not None else []
@property
def last_nodes_list(self):
return list(self.last_nodes.values()) if self.last_nodes is not None else []
def __getitem__(self, i):
if i == 0: return self.index
elif i == 1: return self.next_nodes
else: return None
def __setitem__(self, i, value):
if i == 0: self.index = value
elif i == 1: self.next_nodes = value
else: raise "Invalid case."
def __repr__(self) -> str:
return f'<Node: {repr(self.index)} {repr(self.next_nodes)}>'
class BranchList:
blists: Node
def __init__(self, shape: tuple) -> None:
self.shape = tuple(shape)
self.blists = Node({})
self.lists = []
self.depth = len(self.shape) - 1
def _normalize(self, indices: list):
indices_norm = []
for i, index in enumerate(indices):
if isinstance(index, int):
indices_norm.append(set((index,)))
elif isinstance(index, list) or isinstance(index, tuple):
indices_norm.append(set(index))
elif index is Any or index is None:
indices_norm.append(set((*range(self.shape[i]), None)))
else:
raise "Invalid case."
return indices_norm
def _merge(self, blists: List[Node], blist_in: List[Node], blist_last: Node=None, blist_in_last: Node=None, is_new_blist: bool=False, depth=0):
'''merge the new indices into `self.blist`
blists: all the blist under the current depth.
blist_in: the new blist. Non-branch should be ensured.
depth: the current depth.
'''
if depth == 0: # OK
if len(blist_in) > 0: blist_in: Node = blist_in[0]
else: return
index_new = blist_in.index
index_new_diff = index_new - set().union(*(blist[0] for blist in blists))
if len(index_new_diff) > 0:
blist_new_diff = blist_in.duplicate_deep(index_new_diff)
blist_last.append(blist_new_diff)
_is_new_blist = is_new_blist
for blist in blists:
''''''
is_new_blist = _is_new_blist
# get index_common and index_old_diff.
index_old = blist[0] # e.g. index_old = {0, 1}
index_common = index_new & index_old
index_old_diff = index_old - index_new
index_new_diff = index_new - index_old
index_new = index_new_diff
if len(index_old_diff) > 0:
# keep the old one
blist.reset_index(index_old_diff)
# build the new one
if len(index_common) > 0:
# BUG
# since the indexes of son-nodes of `blist_last` are all different (orthogonal), the `index_common`s of the `blist`s are also different.
blist_in_common = blist_in.duplicate_deep(index_common)
# blist_in_common = Node(blist_in.index, blist_in.is_end, blist_in.depth)
blist_last.append(blist_in_common)
self._merge(blist.next_nodes_list, blist_in_common.next_nodes_list, blist, blist_in_common, True, depth+1)
else:
if len(index_common) > 0:
# needn't to build a new link
self._merge(blist.next_nodes_list, blist_in.next_nodes_list, blist, blist_in, False, depth+1)
pass
elif 0 < depth <= self.depth:
# if len(blist_in) > 0: blist_in: Node = blist_in[0]
# else: return
blist_in: Node = blist_in[0]
# blist_in_last.next_nodes = OrderedDict() # TODO: modify the edges.
index_new = blist_in.index
index_new_diff = index_new - set().union(*(blist[0] for blist in blists))
if len(index_new_diff) > 0:
# the new one to be add
blist_new = blist_in.duplicate_deep(index_new_diff)
if not is_new_blist: blist_last.append(blist_new)
else: blist_in_last.append(blist_new)
_is_new_blist = is_new_blist
for blist in blists:
''''''
is_new_blist = _is_new_blist
# get index_common and index_old_diff.
index_old = blist[0] # e.g. index_old = {0, 1}
index_common = index_new & index_old
index_old_diff = index_old - index_new
index_new_diff = index_new - index_old
index_new = index_new_diff
if blist.is_fan_in: # there are multiple input nodes.
# remove the blist from fan-in, and add a copied one.
if len(index_common) > 0:
blist_last.remove_next(blist)
blist = blist.duplicate_shallow()
blist_last.append(blist)
if len(index_old_diff) > 0:
# keep the old one
blist_old_diff = blist
blist_old_diff.reset_index(index_old_diff)
if is_new_blist:
blist_in_last.append(blist_old_diff)
# build the new one
if len(index_common) > 0:
# since the indexes of son-nodes of `blist_last` are all different (orthogonal), the `index_common`s of the `blist`s are also different.
blist_old_common = blist.duplicate_shallow(index_common)
blist_last.append(blist_old_common)
# blist_in_common = blist_in.duplicate_deep(index_common)
blist_in_common = Node(blist_in.index, blist_in.is_end, blist_in.depth)
if is_new_blist: # TODO: check here
blist_in_last.append(blist_in_common) # or blist_last.append(blist_in_common)?
else:
blist_last.append(blist_in_common)
self._merge(blist_old_common.next_nodes_list, blist_in.next_nodes_list, blist_old_common, blist_in_common, True, depth+1)
else: # len(index_old_diff) == 0
# BUG
if len(index_common) > 0:
# needn't to build a new link
# blist_in_common = blist_in.duplicate_shallow()
blist_in_common = blist_in
if is_new_blist:
blist_in_last.append(blist_in_common)
self._merge(blist.next_nodes_list, blist_in.next_nodes_list, blist, blist_in_common, True, depth+1)
pass
pass
else: # not fan_in
if len(index_old_diff) > 0:
# keep the old one
blist.reset_index(index_old_diff)
if is_new_blist:
blist_in_last.append(blist)
# build the new one
if len(index_common) > 0:
# since the indexes of son-nodes of `blist_last` are all different (orthogonal), the `index_common`s of the `blist`s are also different.
blist_old_common = blist.duplicate_shallow(index_common)
blist_last.append(blist_old_common)
# blist_in_common = blist_in.duplicate_deep(index_common)
blist_in_common = Node(blist_in.index, blist_in.is_end, blist_in.depth)
blist_in_last.append(blist_in_common)
self._merge(blist.next_nodes_list, blist_in.next_nodes_list, blist_old_common, blist_in_common, True, depth+1)
else:
# BUG
if len(index_common) > 0:
# needn't to build a new link
blist_in_common = blist_in.duplicate_shallow()
if is_new_blist:
blist_in_last.append(blist_in_common)
# if is_new_blist:
# # blist_in_common = blist_in.duplicate_deep(index_common)
# blist_in_common = Node(index_common, blist_in.index, blist_in.depth)
# blist_in_last.append(blist_in_common)
# else:
# blist_in_common = blist_in
# # blist_in_last.append(blist_in_common)
self._merge(blist.next_nodes_list, blist_in.next_nodes_list, blist, blist_in_common, is_new_blist, depth+1)
pass
pass
elif depth == self.depth:
pass
else:
pass
# @cython.cfunc
def _make_blist(self, indices):
blist_original = blist = Node(indices[0], False, 0)
if self.depth == 0: return blist_original
for i_depth, index in enumerate(indices[1:], 1):
if i_depth == self.depth:
blist.append(Node(index, True, i_depth))
else:
blist_new = Node(index, False, i_depth)
blist.append(blist_new)
blist = blist_new
return blist_original
def add(self, indices: list, value):
indices = self._normalize(indices)
self.lists.append((indices, value))
if len(self.blists.next_nodes) == 0:
self.blists.append(self._make_blist(indices))
return
# Now, `self.blist` is not None
blist_index = self._make_blist(indices)
self._merge(self.blists.next_nodes_list, [blist_index], self.blists)
pass
def build(self, value_func: Callable=OrderedSet, add_func: Callable=OrderedSet.add): # list, OrderedSet, etc.
# @cython.cfunc
# @cython.returns(cython.void)
# @cython.locals()
def set_value_by_func(blists: List[Node], indices, value_func: Callable, depth=0):
'''it should be ensured that `indices` is in `blists`'''
index: set = indices[depth]
for blist in blists:
if index.issuperset(blist[0]):
if depth < self.depth:
set_value_by_func(list(blist[1].values()), indices, value_func, depth+1)
else:
blist[1] = value_func()
# @cython.cfunc
# @cython.returns()
# @cython.locals()
def get_value(blists, indices, depth=0, ret=None):
'''it should be ensured that `indices` is in `blists`'''
ret = [] if ret is None else ret
index: set = indices[depth]
for blist in blists:
if index.issuperset(blist[0]):
if depth < self.depth:
get_value(list(blist[1].values()), indices, depth+1, ret)
else:
ret.append(blist[1])
return ret
blists = list(self.blists[1].values())
for indices, _ in self.lists:
set_value_by_func(blists, indices, value_func)
for indices, value in self.lists:
list_values = get_value(blists, indices)
for values in list_values:
assert values is not None
add_func(values, value)
def clear(self):
''''''
blists = list(self.blists[1].values())
if len(self.blists[1]) == 0: return
for blist in blists:
del blist
del blists
self.blists[1] = None
def draw(self, blists: List[Node]=None, show_labels=True):
''''''
# from matplotlib.text import Text
blists = list(self.blists[1].values())
if len(blists) == 0:
print('None BranchList.')
return
g = nx.DiGraph()
def add_nodes(g: nx.DiGraph, node_current, label_node_current, blists_next: List[Node], i_layer=0):
n_node = g.number_of_nodes()
args_next = []
if node_current not in g:
g.add_node(node_current, label=label_node_current, layer=i_layer)
n_node += 1
if i_layer <= self.depth:
# for node, blist_next in enumerate(blists_next, n_node):
for blist_next in blists_next:
label = blist_next[0]
node = id(blist_next)
g.add_node(node, label=label, layer=i_layer+1)
g.add_edge(node_current, node)
if blist_next[1] is not None and len(blist_next[1]) > 0:
args_next.append((node, label, (list(blist_next[1].values()) if i_layer < self.depth else blist_next[1]), i_layer+1))
for arg_next in args_next:
add_nodes(g, *arg_next)
else:
node = id(blists_next)
g.add_node(node, label=blists_next, layer=i_layer+1)
g.add_edge(node_current, node)
pass
add_nodes(g, "root", "root", blists)
plt.clf()
labels = {node: attr['label'] for node, attr in g.nodes.items()}
pos = nx.multipartite_layout(g, subset_key="layer")
if show_labels:
labels = nx.draw_networkx_labels(g,pos, labels)
for t in labels.values():
t.set_rotation(30)
nx.draw(g, pos, with_labels=False, node_size=5)
# nx.draw(g)
plt.show()
pass
if __name__ == '__main__':
blist = BranchList((4, 4, 4, 4))
blist.add([0, 0, 0, 1], "A")
blist.add([0, [1,2], 0, 1], "B")
blist.add([0, 3, 0, 1], "C")
blist.add([0, [0,1,2,3], 0, [1,2]], "D")
blist.draw()
blist.build()
blist.draw()
# blist = BranchList((3,3,3,4,3,3))
# blist.add([0, 0, 1, 0, 2, 3])
# blist.add([0, 0, 1, 2, 0, 1])
# blist.add([0, 0, 2, 0, 2, 3])
# blist.add([0, 0, [1,2], 0, [0,2], 3])
# blist.add([0,0,0,Any,0,0])
# blist.add([0,0,Any,[1,2],0,0])
# all = []
# bl = blist.blist
# for _ in range(blist.depth):
# all.append(bl[0])
# bl = bl[1][0]
pass
# if cython.compiled:
# print(f"{sty.fg.blue}[BranchList]Info{sty.fg.rs}: {sty.fg.green}Cython{sty.fg.rs} version.")
# else:
# print(f"{sty.fg.cyan}[BranchList]Warning{sty.fg.cyan}: {sty.fg.red}Python{sty.fg.red} version.")
| 39.952489
| 176
| 0.541027
|
55a42e169dc80decca130737ef595fa32abc559e
| 17,254
|
py
|
Python
|
crabageprediction/venv/Lib/site-packages/setuptools/_distutils/sysconfig.py
|
13rianlucero/CrabAgePrediction
|
92bc7fbe1040f49e820473e33cc3902a5a7177c7
|
[
"MIT"
] | 3
|
2022-02-21T11:40:21.000Z
|
2022-02-27T07:37:31.000Z
|
crabageprediction/venv/Lib/site-packages/setuptools/_distutils/sysconfig.py
|
13rianlucero/CrabAgePrediction
|
92bc7fbe1040f49e820473e33cc3902a5a7177c7
|
[
"MIT"
] | 9
|
2022-02-21T11:44:01.000Z
|
2022-03-14T15:36:08.000Z
|
crabageprediction/venv/Lib/site-packages/setuptools/_distutils/sysconfig.py
|
13rianlucero/CrabAgePrediction
|
92bc7fbe1040f49e820473e33cc3902a5a7177c7
|
[
"MIT"
] | 1
|
2022-01-05T08:56:59.000Z
|
2022-01-05T08:56:59.000Z
|
"""Provide access to Python's configuration information. The specific
configuration variables available depend heavily on the platform and
configuration. The values may be retrieved using
get_config_var(name), and the list of variables is available via
get_config_vars().keys(). Additional convenience functions are also
available.
Written by: Fred L. Drake, Jr.
Email: <fdrake@acm.org>
"""
import os
import re
import sys
import sysconfig
from .errors import DistutilsPlatformError
from . import py39compat
IS_PYPY = '__pypy__' in sys.builtin_module_names
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
BASE_PREFIX = os.path.normpath(sys.base_prefix)
BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix)
# Path to the base directory of the project. On Windows the binary may
# live in project/PCbuild/win32 or project/PCbuild/amd64.
# set for cross builds
if "_PYTHON_PROJECT_BASE" in os.environ:
project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"])
else:
if sys.executable:
project_base = os.path.dirname(os.path.abspath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
project_base = os.getcwd()
# python_build: (Boolean) if true, we're either building Python or
# building an extension with an un-installed Python, so we use
# different (hard-wired) directories.
def _is_python_source_dir(d):
for fn in ("Setup", "Setup.local"):
if os.path.isfile(os.path.join(d, "Modules", fn)):
return True
return False
_sys_home = getattr(sys, '_home', None)
if os.name == 'nt':
def _fix_pcbuild(d):
if d and os.path.normcase(d).startswith(
os.path.normcase(os.path.join(PREFIX, "PCbuild"))):
return PREFIX
return d
project_base = _fix_pcbuild(project_base)
_sys_home = _fix_pcbuild(_sys_home)
def _python_build():
if _sys_home:
return _is_python_source_dir(_sys_home)
return _is_python_source_dir(project_base)
python_build = _python_build()
# Calculate the build qualifier flags if they are defined. Adding the flags
# to the include and lib directories only makes sense for an installation, not
# an in-source build.
build_flags = ''
try:
if not python_build:
build_flags = sys.abiflags
except AttributeError:
# It's not a configure-based build, so the sys module doesn't have
# this attribute, which is fine.
pass
def get_python_version():
"""Return a string containing the major and minor Python version,
leaving off the patchlevel. Sample return values could be '1.5'
or '2.2'.
"""
return '%d.%d' % sys.version_info[:2]
def get_python_inc(plat_specific=0, prefix=None):
"""Return the directory containing installed Python header files.
If 'plat_specific' is false (the default), this is the path to the
non-platform-specific header files, i.e. Python.h and so on;
otherwise, this is the path to platform-specific header files
(namely pyconfig.h).
If 'prefix' is supplied, use it instead of sys.base_prefix or
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
if os.name == "posix":
if IS_PYPY and sys.version_info < (3, 8):
return os.path.join(prefix, 'include')
if python_build:
# Assume the executable is in the build directory. The
# pyconfig.h file should be in the same directory. Since
# the build directory may not be the source directory, we
# must use "srcdir" from the makefile to find the "Include"
# directory.
if plat_specific:
return _sys_home or project_base
else:
incdir = os.path.join(get_config_var('srcdir'), 'Include')
return os.path.normpath(incdir)
implementation = 'pypy' if IS_PYPY else 'python'
python_dir = implementation + get_python_version() + build_flags
return os.path.join(prefix, "include", python_dir)
elif os.name == "nt":
if python_build:
# Include both the include and PC dir to ensure we can find
# pyconfig.h
return (os.path.join(prefix, "include") + os.path.pathsep +
os.path.join(prefix, "PC"))
return os.path.join(prefix, "include")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its C header files "
"on platform '%s'" % os.name)
# allow this behavior to be monkey-patched. Ref pypa/distutils#2.
def _posix_lib(standard_lib, libpython, early_prefix, prefix):
if standard_lib:
return libpython
else:
return os.path.join(libpython, "site-packages")
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.base_prefix or
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
"""
if IS_PYPY and sys.version_info < (3, 8):
# PyPy-specific schema
if prefix is None:
prefix = PREFIX
if standard_lib:
return os.path.join(prefix, "lib-python", sys.version[0])
return os.path.join(prefix, 'site-packages')
early_prefix = prefix
if prefix is None:
if standard_lib:
prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
else:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
if plat_specific or standard_lib:
# Platform-specific modules (any module from a non-pure-Python
# module distribution) or standard Python library modules.
libdir = getattr(sys, "platlibdir", "lib")
else:
# Pure Python
libdir = "lib"
implementation = 'pypy' if IS_PYPY else 'python'
libpython = os.path.join(prefix, libdir,
implementation + get_python_version())
return _posix_lib(standard_lib, libpython, early_prefix, prefix)
elif os.name == "nt":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
return os.path.join(prefix, "Lib", "site-packages")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its library "
"on platform '%s'" % os.name)
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
if sys.platform == "darwin":
# Perform first-time customization of compiler-related
# config vars on OS X now that we know we need a compiler.
# This is primarily to support Pythons from binary
# installers. The kind and paths to build tools on
# the user system may vary significantly from the system
# that Python itself was built on. Also the user OS
# version and build tools may not support the same set
# of CPU architectures for universal builds.
global _config_vars
# Use get_config_var() to ensure _config_vars is initialized.
if not get_config_var('CUSTOMIZED_OSX_COMPILER'):
import _osx_support
_osx_support.customize_compiler(_config_vars)
_config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
(cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
get_config_vars(
'CC', 'CXX', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
if 'CC' in os.environ:
newcc = os.environ['CC']
if('LDSHARED' not in os.environ
and ldshared.startswith(cc)):
# If CC is overridden, use that as the default
# command for LDSHARED as well
ldshared = newcc + ldshared[len(cc):]
cc = newcc
if 'CXX' in os.environ:
cxx = os.environ['CXX']
if 'LDSHARED' in os.environ:
ldshared = os.environ['LDSHARED']
if 'CPP' in os.environ:
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
cflags = cflags + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
if 'AR' in os.environ:
ar = os.environ['AR']
if 'ARFLAGS' in os.environ:
archiver = ar + ' ' + os.environ['ARFLAGS']
else:
archiver = ar + ' ' + ar_flags
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc,
archiver=archiver)
if 'RANLIB' in os.environ and compiler.executables.get('ranlib', None):
compiler.set_executables(ranlib=os.environ['RANLIB'])
compiler.shared_lib_extension = shlib_suffix
def get_config_h_filename():
"""Return full pathname of installed pyconfig.h file."""
if python_build:
if os.name == "nt":
inc_dir = os.path.join(_sys_home or project_base, "PC")
else:
inc_dir = _sys_home or project_base
return os.path.join(inc_dir, 'pyconfig.h')
else:
return sysconfig.get_config_h_filename()
def get_makefile_filename():
"""Return full pathname of installed Makefile from the Python build."""
return sysconfig.get_makefile_filename()
def parse_config_h(fp, g=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
return sysconfig.parse_config_h(fp, vars=g)
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
def parse_makefile(fn, g=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
from distutils.text_file import TextFile
fp = TextFile(
fn, strip_comments=1, skip_blanks=1, join_lines=1,
errors="surrogateescape")
if g is None:
g = {}
done = {}
notdone = {}
while True:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
# do variable interpolation here
while notdone:
for name in list(notdone):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if name.startswith('PY_') and \
name[3:] in renamed_variables:
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
if name.startswith('PY_') \
and name[3:] in renamed_variables:
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
fp.close()
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
g.update(done)
return g
def expand_makefile_vars(s, vars):
"""Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
'string' according to 'vars' (a dictionary mapping variable names to
values). Variables not present in 'vars' are silently expanded to the
empty string. The variable values in 'vars' should not contain further
variable expansions; if 'vars' is the output of 'parse_makefile()',
you're fine. Returns a variable-expanded version of 's'.
"""
# This algorithm does multiple expansion, so if vars['foo'] contains
# "${bar}", it will expand ${foo} to ${bar}, and then expand
# ${bar}... and so forth. This is fine as long as 'vars' comes from
# 'parse_makefile()', which takes care of such expansions eagerly,
# according to make's variable expansion semantics.
while True:
m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
if m:
(beg, end) = m.span()
s = s[0:beg] + vars.get(m.group(1)) + s[end:]
else:
break
return s
_config_vars = None
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform. Generally this includes
everything needed to build extensions and install both pure modules and
extensions. On Unix, this means every variable defined in Python's
installed Makefile; on Windows it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _config_vars
if _config_vars is None:
_config_vars = sysconfig.get_config_vars().copy()
py39compat.add_ext_suffix(_config_vars)
if args:
vals = []
for name in args:
vals.append(_config_vars.get(name))
return vals
else:
return _config_vars
def get_config_var(name):
"""Return the value of a single variable using the dictionary
returned by 'get_config_vars()'. Equivalent to
get_config_vars().get(name)
"""
if name == 'SO':
import warnings
warnings.warn(
'SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2)
return get_config_vars().get(name)
| 35.871102
| 79
| 0.598818
|
160aef27d6e6330e7a8bdafe4096cecf8927a1d0
| 2,551
|
py
|
Python
|
withings_api/const.py
|
ronaldheft/python_withings_api
|
bb23e973c351808e2287056bad4a900c325f178f
|
[
"MIT"
] | 86
|
2019-10-06T17:20:21.000Z
|
2022-03-28T09:57:35.000Z
|
withings_api/const.py
|
ronaldheft/python_withings_api
|
bb23e973c351808e2287056bad4a900c325f178f
|
[
"MIT"
] | 59
|
2019-10-19T21:14:26.000Z
|
2022-03-31T19:16:33.000Z
|
withings_api/const.py
|
ronaldheft/python_withings_api
|
bb23e973c351808e2287056bad4a900c325f178f
|
[
"MIT"
] | 29
|
2019-10-11T13:55:42.000Z
|
2022-03-23T10:25:52.000Z
|
"""Constant values."""
from typing_extensions import Final
LOG_NAMESPACE: Final = "python_withings_api"
STATUS_SUCCESS: Final = (0,)
STATUS_AUTH_FAILED: Final = (100, 101, 102, 200, 401)
STATUS_INVALID_PARAMS: Final = (
201,
202,
203,
204,
205,
206,
207,
208,
209,
210,
211,
212,
213,
216,
217,
218,
220,
221,
223,
225,
227,
228,
229,
230,
234,
235,
236,
238,
240,
241,
242,
243,
244,
245,
246,
247,
248,
249,
250,
251,
252,
254,
260,
261,
262,
263,
264,
265,
266,
267,
271,
272,
275,
276,
283,
284,
285,
286,
287,
288,
290,
293,
294,
295,
297,
300,
301,
302,
303,
304,
321,
323,
324,
325,
326,
327,
328,
329,
330,
331,
332,
333,
334,
335,
336,
337,
338,
339,
340,
341,
342,
343,
344,
345,
346,
347,
348,
349,
350,
351,
352,
353,
380,
381,
382,
400,
501,
502,
503,
504,
505,
506,
509,
510,
511,
523,
532,
3017,
3018,
3019,
)
STATUS_UNAUTHORIZED: Final = (214, 277, 2553, 2554, 2555)
STATUS_ERROR_OCCURRED: Final = (
215,
219,
222,
224,
226,
231,
233,
237,
253,
255,
256,
257,
258,
259,
268,
269,
270,
273,
274,
278,
279,
280,
281,
282,
289,
291,
292,
296,
298,
305,
306,
308,
309,
310,
311,
312,
313,
314,
315,
316,
317,
318,
319,
320,
322,
370,
371,
372,
373,
374,
375,
383,
391,
402,
516,
517,
518,
519,
520,
521,
525,
526,
527,
528,
529,
530,
531,
533,
602,
700,
1051,
1052,
1053,
1054,
2551,
2552,
2556,
2557,
2558,
2559,
3000,
3001,
3002,
3003,
3004,
3005,
3006,
3007,
3008,
3009,
3010,
3011,
3012,
3013,
3014,
3015,
3016,
3020,
3021,
3022,
3023,
3024,
5000,
5001,
5005,
5006,
6000,
6010,
6011,
9000,
10000,
)
STATUS_TIMEOUT: Final = (522,)
STATUS_BAD_STATE: Final = (524,)
STATUS_TOO_MANY_REQUESTS: Final = (601,)
| 10.123016
| 57
| 0.411995
|
582894324dfcc54a57b923f6f86dfa483050d0f3
| 6,461
|
py
|
Python
|
qmcpack_fs_reader.py
|
Paul-St-Young/solid_hydrogen
|
dd218cd431a283dc1a371a0af5696074d63b8c6c
|
[
"MIT"
] | 2
|
2020-08-13T23:32:03.000Z
|
2021-03-28T01:14:06.000Z
|
qmcpack_fs_reader.py
|
Paul-St-Young/solid_hydrogen
|
dd218cd431a283dc1a371a0af5696074d63b8c6c
|
[
"MIT"
] | null | null | null |
qmcpack_fs_reader.py
|
Paul-St-Young/solid_hydrogen
|
dd218cd431a283dc1a371a0af5696074d63b8c6c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import numpy as np
import scipy.interpolate as interp
from qharv.reel import ascii_out
def get_dsk_amat(floc):
""" extract A matrix from qmcfinitesize output
k->0 behavior of 3D structure factor S(k) is fitted to a Gaussian
S(k) = k^T A k
Args:
floc (str): location of qmcfinitesize output
Returns:
np.array: A matrix (3x3)
"""
mm = ascii_out.read(floc)
amat = np.zeros([3,3])
# step 1: fill upper triangular part of amat
xyzm = {'x':0,'y':1,'z':2} # map x,y,z to index
keyl = ['a_xx','a_yy','a_zz','a_xy','a_xz','a_yz']
for key in keyl: # order of key matters!
val = ascii_out.name_sep_val(mm,key)
xyz_xyz = key.split('_')[-1]
idx = tuple([xyzm[xyz] for xyz in xyz_xyz])
amat[idx] = val
# end for
# step 2: symmetrize amat
amat[(1,0)] = amat[(0,1)]
amat[(2,1)] = amat[(1,2)]
amat[(2,0)] = amat[(0,2)]
return amat
# end def get_dsk_amat
def get_volume(fout):
mm = ascii_out.read(fout)
omega = ascii_out.name_sep_val(mm, 'Vol', pos=1)
return omega
def get_data_block(floc, name, nhead=0):
start_tag = '#'+name + '_START#'
stop_tag = '#'+name + '_STOP#'
mm = ascii_out.read(floc)
text = ascii_out.block_text(mm,start_tag,stop_tag)
lines= text.split('\n')[nhead:-1] # empty after the last \n
data = np.array(
[map(float,line.split()) for line in lines]
,dtype=float)
return data
# end def get_data_block
def add_mixed_vint(df2):
""" add mixed vint (\int vk Sk) column to extrapolated entries
df2 must have columns ['timestep','vint'], there must be a timestep=0
entry, and a timestep > 0 entry.
Args:
df2 (pd.DataFrame): DMC database
Returns:
None
"""
df2['vmixed'] = np.nan
for subdir in df2.subdir.unique():
sel = (df2.subdir==subdir)
ts0_sel = (df2.timestep==0)
# !!!! assume smallest non-zero timestep is best DMC
min_ts = df2.loc[sel&(~ts0_sel),'timestep'].min()
ts1_sel = (df2.timestep==min_ts)
# get mixed vint entry
entry = df2.loc[sel&(ts1_sel),'vint']
assert len(entry) == 1
vmixed = entry.values[0]
# transfer to pure entry
df2.loc[ts0_sel,'vmixed'] = vmixed
# end for
# end def add_mixed_vint
# ================= reproduce QMCPACK implementation ================= #
# step 1: get long-range Coulomb pair potential vk
def get_vk(fout):
""" long-range coulomb pair potential """
data = get_data_block(fout, 'VK')
vkx, vky = data.T
# QMCPACK vk is divided by volume, undo!
omega = get_volume(fout)
vky *= omega
return vkx, vky
def get_fvk(fout):
""" interpolated long-range coulomb pair potential """
vkx, vky = get_vk(fout)
tck = interp.splrep(vkx, vky)
fvk = lambda k:interp.splev(k, tck)
return fvk
# step 2: get raw static structure factor S(k)
def get_dsk(fjson, obs='dsk'):
""" raw structure factor """
import pandas as pd
df = pd.read_json(fjson)
kvecs = np.array(df.loc[0,'kvecs'])
skm = np.array(df.loc[0,'%s_mean'%obs])
ske = np.array(df.loc[0,'%s_error'%obs])
return kvecs, skm, ske
# step 3: get sum
def get_vsum(vk, skm, omega):
"""
skm should contain S(k) values at ALL supercell reciprocal vectors used
vk should be the same length as skm and NOT divided by volume omega
"""
summand = 0.5*vk*skm
vsum = 1/omega* summand.sum()
return vsum
def get_qmcpack_vsum(fjson, fout):
kvecs, skm, ske = get_dsk(fjson)
kmags = np.linalg.norm(kvecs, axis=1)
fvk = get_fvk(fout)
vk = fvk(kmags)
omega = get_volume(fout)
vsum = get_vsum(vk, skm, omega)
return vsum
# step 4: get sphericall averaged Savg(k) spline
def get_fsk(fout):
""" interpolated spherically-averaged structure factor """
data = get_data_block(fout, 'SK_SPLINE')
skx, sky = data.T
tck = interp.splrep(skx, sky)
fsk = lambda k:interp.splev(k, tck)
return fsk
# step 4: get 1D integrand
def get_intx_inty(fout):
fsk = get_fsk(fout)
vkx, vky = get_vk(fout)
myinty = 0.5*vkx**2*vky*fsk(vkx)
return vkx, myinty
# step 5: interpolate 1D integrand
def get_fint(fout):
intx, inty = get_intx_inty(fout)
padx = np.array([0.0])
pady = np.array([0.0]*len(padx))
myx = np.concatenate([padx, intx])
myy = np.concatenate([pady, inty])
tck = interp.splrep(myx, myy)
fint = lambda k:interp.splev(k, tck)
return fint
# step 6: get integral
def get_vint(fout):
from scipy.integrate import quad
vkx, vky = get_vk(fout)
fint = get_fint(fout)
intnorm = 1./(2*np.pi**2)
intval = quad(fint,0,max(vkx))[0]
vint = intnorm * intval
return vint
| 35.894444
| 80
| 0.456895
|
62e57492647caf43307cfad6671891bbb2b33cd4
| 679
|
py
|
Python
|
smartroads/forms.py
|
30Meridian/RozumneMistoSnapshot
|
67a83b3908674d01992561dfb37596e395b4d482
|
[
"BSD-3-Clause"
] | null | null | null |
smartroads/forms.py
|
30Meridian/RozumneMistoSnapshot
|
67a83b3908674d01992561dfb37596e395b4d482
|
[
"BSD-3-Clause"
] | null | null | null |
smartroads/forms.py
|
30Meridian/RozumneMistoSnapshot
|
67a83b3908674d01992561dfb37596e395b4d482
|
[
"BSD-3-Clause"
] | null | null | null |
from .models import SmartroadsIssues,SmartroadsStatuses
from django import forms
from ckeditor_uploader.widgets import CKEditorUploadingWidget
class Add(forms.ModelForm):
class Meta:
model = SmartroadsIssues
fields = ['title','description','resolution', 'what_to_do', 'cost', 'done_date','lon','lat']
widgets = {
'title': forms.TextInput(attrs={'placeholder': 'Введіть назву зони. Не більше 150 символів. Напр. "Зона 51. Вул. Сагайдачного - набережна Челночна"', 'class': 'form-control input-lg'}, ),
'text': CKEditorUploadingWidget(),
'lon': forms.HiddenInput(),
'lat': forms.HiddenInput()
}
| 42.4375
| 199
| 0.659794
|
039f826fa2fe1ab5573356f2c19c31d4f8aa3022
| 2,628
|
py
|
Python
|
src/wechaty_puppet_itchat/config.py
|
wj-Mcat/python-wechaty-puppet-itchat
|
f53071cef855c10e521b79ef9ae9931a1626948d
|
[
"Apache-2.0"
] | 13
|
2021-09-20T12:11:09.000Z
|
2022-02-05T06:04:01.000Z
|
src/wechaty_puppet_itchat/config.py
|
wj-Mcat/python-wechaty-puppet-itchat
|
f53071cef855c10e521b79ef9ae9931a1626948d
|
[
"Apache-2.0"
] | 33
|
2021-11-01T20:23:55.000Z
|
2022-03-31T20:31:50.000Z
|
src/wechaty_puppet_itchat/config.py
|
fangjiyuan/python-wechaty-puppet-itchat
|
b1435e4725645579d562df6fc4717330b6bac901
|
[
"Apache-2.0"
] | 4
|
2021-09-20T10:16:03.000Z
|
2022-03-09T09:44:59.000Z
|
"""
Python Wechaty - https://github.com/wechaty/python-wechaty
Authors: Huan LI (李卓桓) <https://github.com/huan>
Jingjing WU (吴京京) <https://github.com/wj-Mcat>
2020-now @ Copyright Wechaty
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import platform
from wechaty_puppet import get_logger # type: ignore
VERSION = '1.4.1'
BASE_URL = 'https://login.weixin.qq.com'
OS = platform.system() # Windows, Linux, Darwin
DIR = os.getcwd()
DEFAULT_QR = 'QR.png'
TIMEOUT = (10, 60)
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36'
UOS_PATCH_CLIENT_VERSION = '2.0.0'
UOS_PATCH_EXTSPAM = 'Gp8ICJkIEpkICggwMDAwMDAwMRAGGoAI1GiJSIpeO1RZTq9QBKsRbPJdi84ropi16EYI10WB6g74sGmRwSNXjPQnYUKYotKkvLGpshucCaeWZMOylnc6o2AgDX9grhQQx7fm2DJRTyuNhUlwmEoWhjoG3F0ySAWUsEbH3bJMsEBwoB//0qmFJob74ffdaslqL+IrSy7LJ76/G5TkvNC+J0VQkpH1u3iJJs0uUYyLDzdBIQ6Ogd8LDQ3VKnJLm4g/uDLe+G7zzzkOPzCjXL+70naaQ9medzqmh+/SmaQ6uFWLDQLcRln++wBwoEibNpG4uOJvqXy+ql50DjlNchSuqLmeadFoo9/mDT0q3G7o/80P15ostktjb7h9bfNc+nZVSnUEJXbCjTeqS5UYuxn+HTS5nZsPVxJA2O5GdKCYK4x8lTTKShRstqPfbQpplfllx2fwXcSljuYi3YipPyS3GCAqf5A7aYYwJ7AvGqUiR2SsVQ9Nbp8MGHET1GxhifC692APj6SJxZD3i1drSYZPMMsS9rKAJTGz2FEupohtpf2tgXm6c16nDk/cw+C7K7me5j5PLHv55DFCS84b06AytZPdkFZLj7FHOkcFGJXitHkX5cgww7vuf6F3p0yM/W73SoXTx6GX4G6Hg2rYx3O/9VU2Uq8lvURB4qIbD9XQpzmyiFMaytMnqxcZJcoXCtfkTJ6pI7a92JpRUvdSitg967VUDUAQnCXCM/m0snRkR9LtoXAO1FUGpwlp1EfIdCZFPKNnXMeqev0j9W9ZrkEs9ZWcUEexSj5z+dKYQBhIICviYUQHVqBTZSNy22PlUIeDeIs11j7q4t8rD8LPvzAKWVqXE+5lS1JPZkjg4y5hfX1Dod3t96clFfwsvDP6xBSe1NBcoKbkyGxYK0UvPGtKQEE0Se2zAymYDv41klYE9s+rxp8e94/H8XhrL9oGm8KWb2RmYnAE7ry9gd6e8ZuBRIsISlJAE/e8y8xFmP031S6Lnaet6YXPsFpuFsdQs535IjcFd75hh6DNMBYhSfjv456cvhsb99+fRw/KVZLC3yzNSCbLSyo9d9BI45Plma6V8akURQA/qsaAzU0VyTIqZJkPDTzhuCl92vD2AD/QOhx6iwRSVPAxcRFZcWjgc2wCKh+uCYkTVbNQpB9B90YlNmI3fWTuUOUjwOzQRxJZj11NsimjOJ50qQwTTFj6qQvQ1a/I+MkTx5UO+yNHl718JWcR3AXGmv/aa9rD1eNP8ioTGlOZwPgmr2sor2iBpKTOrB83QgZXP+xRYkb4zVC+LoAXEoIa1+zArywlgREer7DLePukkU6wHTkuSaF+ge5Of1bXuU4i938WJHj0t3D8uQxkJvoFi/EYN/7u2P1zGRLV4dHVUsZMGCCtnO6BBigFMAA='
CACHE_DIR = '.wechaty'
LOGIN_TIMEOUT = 60
| 69.157895
| 1,434
| 0.865677
|
58e42161d06dab838e684dc420109713afabc229
| 5,878
|
py
|
Python
|
hs_file_types/utils.py
|
ResearchSoftwareInstitute/MyHPOM
|
2d48fe5ac8d21173b1685eb33059bb391fe24414
|
[
"BSD-3-Clause"
] | 1
|
2018-09-17T13:07:29.000Z
|
2018-09-17T13:07:29.000Z
|
hs_file_types/utils.py
|
ResearchSoftwareInstitute/MyHPOM
|
2d48fe5ac8d21173b1685eb33059bb391fe24414
|
[
"BSD-3-Clause"
] | 100
|
2017-08-01T23:48:04.000Z
|
2018-04-03T13:17:27.000Z
|
hs_file_types/utils.py
|
ResearchSoftwareInstitute/MyHPOM
|
2d48fe5ac8d21173b1685eb33059bb391fe24414
|
[
"BSD-3-Clause"
] | 1
|
2018-06-28T13:19:58.000Z
|
2018-06-28T13:19:58.000Z
|
import json
from dateutil import parser
from operator import lt, gt
def update_resource_coverage_element(resource):
# update resource spatial coverage based on coverage metadata from the
# logical files in the resource
spatial_coverages = [lf.metadata.spatial_coverage for lf in resource.logical_files
if lf.metadata.spatial_coverage is not None]
bbox_limits = {'box': {'northlimit': 'northlimit', 'southlimit': 'southlimit',
'eastlimit': 'eastlimit', 'westlimit': 'westlimit'},
'point': {'northlimit': 'north', 'southlimit': 'north',
'eastlimit': 'east', 'westlimit': 'east'}
}
def set_coverage_data(res_coverage_value, lfo_coverage_element, box_limits):
comparison_operator = {'northlimit': lt, 'southlimit': gt, 'eastlimit': lt,
'westlimit': gt}
for key in comparison_operator.keys():
if comparison_operator[key](res_coverage_value[key],
lfo_coverage_element.value[box_limits[key]]):
res_coverage_value[key] = lfo_coverage_element.value[box_limits[key]]
cov_type = "point"
bbox_value = {'northlimit': -90, 'southlimit': 90, 'eastlimit': -180, 'westlimit': 180,
'projection': 'WGS 84 EPSG:4326', 'units': "Decimal degrees"}
if len(spatial_coverages) > 1:
# check if one of the coverage is of type box
if any(sp_cov.type == 'box' for sp_cov in spatial_coverages):
cov_type = 'box'
else:
# check if the coverages represent different locations
unique_lats = set([sp_cov.value['north'] for sp_cov in spatial_coverages])
unique_lons = set([sp_cov.value['east'] for sp_cov in spatial_coverages])
if len(unique_lats) == 1 and len(unique_lons) == 1:
cov_type = 'point'
else:
cov_type = 'box'
if cov_type == 'point':
sp_cov = spatial_coverages[0]
bbox_value = dict()
bbox_value['projection'] = 'WGS 84 EPSG:4326'
bbox_value['units'] = 'Decimal degrees'
bbox_value['north'] = sp_cov.value['north']
bbox_value['east'] = sp_cov.value['east']
else:
for sp_cov in spatial_coverages:
if sp_cov.type == "box":
box_limits = bbox_limits['box']
set_coverage_data(bbox_value, sp_cov, box_limits)
else:
# point type coverage
box_limits = bbox_limits['point']
set_coverage_data(bbox_value, sp_cov, box_limits)
elif len(spatial_coverages) == 1:
sp_cov = spatial_coverages[0]
if sp_cov.type == "box":
cov_type = 'box'
bbox_value['projection'] = 'WGS 84 EPSG:4326'
bbox_value['units'] = 'Decimal degrees'
bbox_value['northlimit'] = sp_cov.value['northlimit']
bbox_value['eastlimit'] = sp_cov.value['eastlimit']
bbox_value['southlimit'] = sp_cov.value['southlimit']
bbox_value['westlimit'] = sp_cov.value['westlimit']
else:
# point type coverage
cov_type = "point"
bbox_value = dict()
bbox_value['projection'] = 'WGS 84 EPSG:4326'
bbox_value['units'] = 'Decimal degrees'
bbox_value['north'] = sp_cov.value['north']
bbox_value['east'] = sp_cov.value['east']
spatial_cov = resource.metadata.coverages.all().exclude(type='period').first()
if len(spatial_coverages) > 0:
if spatial_cov:
spatial_cov.type = cov_type
place_name = spatial_cov.value.get('name', None)
if place_name is not None:
bbox_value['name'] = place_name
spatial_cov._value = json.dumps(bbox_value)
spatial_cov.save()
else:
resource.metadata.create_element("coverage", type=cov_type, value=bbox_value)
else:
# delete spatial coverage element for the resource since the content files don't have any
# spatial coverage
if spatial_cov:
spatial_cov.delete()
# update resource temporal coverage
temporal_coverages = [lf.metadata.temporal_coverage for lf in resource.logical_files
if lf.metadata.temporal_coverage is not None]
date_data = {'start': None, 'end': None}
def set_date_value(date_data, coverage_element, key):
comparison_operator = gt if key == 'start' else lt
if date_data[key] is None:
date_data[key] = coverage_element.value[key]
else:
if comparison_operator(parser.parse(date_data[key]),
parser.parse(coverage_element.value[key])):
date_data[key] = coverage_element.value[key]
for temp_cov in temporal_coverages:
start_date = parser.parse(temp_cov.value['start'])
end_date = parser.parse(temp_cov.value['end'])
temp_cov.value['start'] = start_date.strftime('%m/%d/%Y')
temp_cov.value['end'] = end_date.strftime('%m/%d/%Y')
set_date_value(date_data, temp_cov, 'start')
set_date_value(date_data, temp_cov, 'end')
temp_cov = resource.metadata.coverages.all().filter(type='period').first()
if date_data['start'] is not None and date_data['end'] is not None:
if temp_cov:
temp_cov._value = json.dumps(date_data)
temp_cov.save()
else:
resource.metadata.create_element("coverage", type='period', value=date_data)
elif temp_cov:
# delete the temporal coverage for the resource since the content files don't have
# temporal coverage
temp_cov.delete()
| 45.565891
| 97
| 0.595441
|
0a02680988c8e42a4508d6fdc23e52b1377bbae7
| 715
|
py
|
Python
|
projects/migrations/0005_auto_20190907_1750.py
|
TriedAngle/project_planner
|
2d2ce91bad111ff1f2c192f060099cb7036ee86d
|
[
"Apache-2.0"
] | null | null | null |
projects/migrations/0005_auto_20190907_1750.py
|
TriedAngle/project_planner
|
2d2ce91bad111ff1f2c192f060099cb7036ee86d
|
[
"Apache-2.0"
] | 9
|
2021-03-09T16:58:51.000Z
|
2022-03-02T05:29:41.000Z
|
projects/migrations/0005_auto_20190907_1750.py
|
strobl-net/project_planner
|
2d2ce91bad111ff1f2c192f060099cb7036ee86d
|
[
"Apache-2.0"
] | 1
|
2020-04-07T05:56:06.000Z
|
2020-04-07T05:56:06.000Z
|
# Generated by Django 2.2.5 on 2019-09-07 17:50
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0004_project_members2'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='members',
),
migrations.RemoveField(
model_name='project',
name='members2',
),
migrations.AddField(
model_name='project',
name='member_ids',
field=django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), blank=True, null=True, size=None),
),
]
| 25.535714
| 128
| 0.598601
|
7c8dc0e930700a03e71ece7eb9646df36c18b9d8
| 44
|
py
|
Python
|
lgbn/__init__.py
|
DavideA/lgbn
|
0b2a78da954c5726887759249dbf8be21f5154ea
|
[
"MIT"
] | 1
|
2019-10-05T14:34:54.000Z
|
2019-10-05T14:34:54.000Z
|
lgbn/__init__.py
|
DavideA/lgbn
|
0b2a78da954c5726887759249dbf8be21f5154ea
|
[
"MIT"
] | 1
|
2019-10-07T17:11:51.000Z
|
2019-10-07T17:11:51.000Z
|
lgbn/__init__.py
|
DavideA/lgbn
|
0b2a78da954c5726887759249dbf8be21f5154ea
|
[
"MIT"
] | 1
|
2019-10-07T17:12:21.000Z
|
2019-10-07T17:12:21.000Z
|
from bayesian_network import BayesianNetwork
| 44
| 44
| 0.931818
|
9c5fa63809d198eb0cb8b8c67833c5fde4771dfc
| 5,777
|
py
|
Python
|
iaws/yolov5/hubconf.py
|
nowage/ImgAugWithSR
|
eed90d462d1171f0be809eaf4d54a78fc2cd3893
|
[
"MIT"
] | null | null | null |
iaws/yolov5/hubconf.py
|
nowage/ImgAugWithSR
|
eed90d462d1171f0be809eaf4d54a78fc2cd3893
|
[
"MIT"
] | null | null | null |
iaws/yolov5/hubconf.py
|
nowage/ImgAugWithSR
|
eed90d462d1171f0be809eaf4d54a78fc2cd3893
|
[
"MIT"
] | null | null | null |
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5/
Usage:
import torch
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
"""
import torch
def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
"""Creates a specified YOLOv5 model
Arguments:
name (str): name of model, i.e. 'yolov5s'
pretrained (bool): load pretrained weights into the model
channels (int): number of input channels
classes (int): number of model classes
autoshape (bool): apply YOLOv5 .autoshape() wrapper to model
verbose (bool): print all information to screen
device (str, torch.device, None): device to use for model parameters
Returns:
YOLOv5 pytorch model
"""
from pathlib import Path
from models.yolo import Model
from models.experimental import attempt_load
from utils.general import check_requirements, set_logging
from utils.downloads import attempt_download
from utils.torch_utils import select_device
file = Path(__file__).resolve()
check_requirements(requirements=file.parent / 'requirements.txt', exclude=('tensorboard', 'thop', 'opencv-python'))
set_logging(verbose=verbose)
save_dir = Path('') if str(name).endswith('.pt') else file.parent
path = (save_dir / name).with_suffix('.pt') # checkpoint path
try:
device = select_device(('0' if torch.cuda.is_available() else 'cpu') if device is None else device)
if pretrained and channels == 3 and classes == 80:
model = attempt_load(path, map_location=device) # download/load FP32 model
else:
cfg = list((Path(__file__).parent / 'models').rglob(f'{name}.yaml'))[0] # model.yaml path
model = Model(cfg, channels, classes) # create model
if pretrained:
ckpt = torch.load(attempt_download(path), map_location=device) # load
msd = model.state_dict() # model state_dict
csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32
csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter
model.load_state_dict(csd, strict=False) # load
if len(ckpt['model'].names) == classes:
model.names = ckpt['model'].names # set class names attribute
if autoshape:
model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS
return model.to(device)
except Exception as e:
help_url = 'https://github.com/ultralytics/yolov5/issues/36'
s = 'Cache may be out of date, try `force_reload=True`. See %s for help.' % help_url
raise Exception(s) from e
def custom(path='path/to/model.pt', autoshape=True, verbose=True, device=None):
# YOLOv5 custom or local model
return _create(path, autoshape=autoshape, verbose=verbose, device=device)
def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-small model https://github.com/ultralytics/yolov5
return _create('yolov5s', pretrained, channels, classes, autoshape, verbose, device)
def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-medium model https://github.com/ultralytics/yolov5
return _create('yolov5m', pretrained, channels, classes, autoshape, verbose, device)
def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-large model https://github.com/ultralytics/yolov5
return _create('yolov5l', pretrained, channels, classes, autoshape, verbose, device)
def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-xlarge model https://github.com/ultralytics/yolov5
return _create('yolov5x', pretrained, channels, classes, autoshape, verbose, device)
def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-small-P6 model https://github.com/ultralytics/yolov5
return _create('yolov5s6', pretrained, channels, classes, autoshape, verbose, device)
def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5
return _create('yolov5m6', pretrained, channels, classes, autoshape, verbose, device)
def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-large-P6 model https://github.com/ultralytics/yolov5
return _create('yolov5l6', pretrained, channels, classes, autoshape, verbose, device)
def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None):
# YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5
return _create('yolov5x6', pretrained, channels, classes, autoshape, verbose, device)
if __name__ == '__main__':
model = _create(name='yolov5s', pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) # pretrained
# model = custom(path='path/to/model.pt') # custom
# Verify inference
import cv2
import numpy as np
from PIL import Image
from pathlib import Path
imgs = ['data/images/zidane.jpg', # filename
Path('data/images/zidane.jpg'), # Path
'https://ultralytics.com/images/zidane.jpg', # URI
cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV
Image.open('data/images/bus.jpg'), # PIL
np.zeros((320, 640, 3))] # numpy
results = model(imgs) # batched inference
results.print()
results.save()
| 43.43609
| 120
| 0.682707
|
d2528507e624eff864aa4e531c0a5132e1f1261a
| 5,301
|
py
|
Python
|
tests/commands/gc_test.py
|
katrinleinweber/pre-commit
|
a889f0bfd5d39042db9f834c6259ef57df1807b2
|
[
"MIT"
] | null | null | null |
tests/commands/gc_test.py
|
katrinleinweber/pre-commit
|
a889f0bfd5d39042db9f834c6259ef57df1807b2
|
[
"MIT"
] | null | null | null |
tests/commands/gc_test.py
|
katrinleinweber/pre-commit
|
a889f0bfd5d39042db9f834c6259ef57df1807b2
|
[
"MIT"
] | null | null | null |
import os
import pre_commit.constants as C
from pre_commit import git
from pre_commit.clientlib import load_config
from pre_commit.commands.autoupdate import autoupdate
from pre_commit.commands.gc import gc
from pre_commit.repository import all_hooks
from testing.fixtures import make_config_from_repo
from testing.fixtures import make_repo
from testing.fixtures import modify_config
from testing.fixtures import sample_local_config
from testing.fixtures import sample_meta_config
from testing.fixtures import write_config
from testing.util import git_commit
def _repo_count(store):
return len(store.select_all_repos())
def _config_count(store):
return len(store.select_all_configs())
def _remove_config_assert_cleared(store, cap_out):
os.remove(C.CONFIG_FILE)
assert not gc(store)
assert _config_count(store) == 0
assert _repo_count(store) == 0
assert cap_out.get().splitlines()[-1] == '1 repo(s) removed.'
def test_gc(tempdir_factory, store, in_git_dir, cap_out):
path = make_repo(tempdir_factory, 'script_hooks_repo')
old_rev = git.head_rev(path)
git_commit(cwd=path)
write_config('.', make_config_from_repo(path, rev=old_rev))
store.mark_config_used(C.CONFIG_FILE)
# update will clone both the old and new repo, making the old one gc-able
assert not autoupdate(C.CONFIG_FILE, store, tags_only=False)
assert _config_count(store) == 1
assert _repo_count(store) == 2
assert not gc(store)
assert _config_count(store) == 1
assert _repo_count(store) == 1
assert cap_out.get().splitlines()[-1] == '1 repo(s) removed.'
_remove_config_assert_cleared(store, cap_out)
def test_gc_repo_not_cloned(tempdir_factory, store, in_git_dir, cap_out):
path = make_repo(tempdir_factory, 'script_hooks_repo')
write_config('.', make_config_from_repo(path))
store.mark_config_used(C.CONFIG_FILE)
assert _config_count(store) == 1
assert _repo_count(store) == 0
assert not gc(store)
assert _config_count(store) == 1
assert _repo_count(store) == 0
assert cap_out.get().splitlines()[-1] == '0 repo(s) removed.'
def test_gc_meta_repo_does_not_crash(store, in_git_dir, cap_out):
write_config('.', sample_meta_config())
store.mark_config_used(C.CONFIG_FILE)
assert not gc(store)
assert cap_out.get().splitlines()[-1] == '0 repo(s) removed.'
def test_gc_local_repo_does_not_crash(store, in_git_dir, cap_out):
write_config('.', sample_local_config())
store.mark_config_used(C.CONFIG_FILE)
assert not gc(store)
assert cap_out.get().splitlines()[-1] == '0 repo(s) removed.'
def test_gc_unused_local_repo_with_env(store, in_git_dir, cap_out):
config = {
'repo': 'local',
'hooks': [{
'id': 'flake8', 'name': 'flake8', 'entry': 'flake8',
# a `language: python` local hook will create an environment
'types': ['python'], 'language': 'python',
}],
}
write_config('.', config)
store.mark_config_used(C.CONFIG_FILE)
# this causes the repositories to be created
all_hooks(load_config(C.CONFIG_FILE), store)
assert _config_count(store) == 1
assert _repo_count(store) == 1
assert not gc(store)
assert _config_count(store) == 1
assert _repo_count(store) == 1
assert cap_out.get().splitlines()[-1] == '0 repo(s) removed.'
_remove_config_assert_cleared(store, cap_out)
def test_gc_config_with_missing_hook(
tempdir_factory, store, in_git_dir, cap_out,
):
path = make_repo(tempdir_factory, 'script_hooks_repo')
write_config('.', make_config_from_repo(path))
store.mark_config_used(C.CONFIG_FILE)
# to trigger a clone
all_hooks(load_config(C.CONFIG_FILE), store)
with modify_config() as config:
# add a hook which does not exist, make sure we don't crash
config['repos'][0]['hooks'].append({'id': 'does-not-exist'})
assert _config_count(store) == 1
assert _repo_count(store) == 1
assert not gc(store)
assert _config_count(store) == 1
assert _repo_count(store) == 1
assert cap_out.get().splitlines()[-1] == '0 repo(s) removed.'
_remove_config_assert_cleared(store, cap_out)
def test_gc_deletes_invalid_configs(store, in_git_dir, cap_out):
config = {'i am': 'invalid'}
write_config('.', config)
store.mark_config_used(C.CONFIG_FILE)
assert _config_count(store) == 1
assert not gc(store)
assert _config_count(store) == 0
assert cap_out.get().splitlines()[-1] == '0 repo(s) removed.'
def test_invalid_manifest_gcd(tempdir_factory, store, in_git_dir, cap_out):
# clean up repos from old pre-commit versions
path = make_repo(tempdir_factory, 'script_hooks_repo')
write_config('.', make_config_from_repo(path))
store.mark_config_used(C.CONFIG_FILE)
# trigger a clone
assert not autoupdate(C.CONFIG_FILE, store, tags_only=False)
# we'll "break" the manifest to simulate an old version clone
(_, _, path), = store.select_all_repos()
os.remove(os.path.join(path, C.MANIFEST_FILE))
assert _config_count(store) == 1
assert _repo_count(store) == 1
assert not gc(store)
assert _config_count(store) == 1
assert _repo_count(store) == 0
assert cap_out.get().splitlines()[-1] == '1 repo(s) removed.'
| 33.13125
| 77
| 0.7093
|
3a7b84f6ada759868ecdb5da7f273822bd3c445a
| 23,109
|
py
|
Python
|
nuitka/plugins/PluginBase.py
|
timgates42/Nuitka
|
536f39e37d8657ec750304bb1a93ccb19d1942af
|
[
"Apache-2.0"
] | 1
|
2020-12-20T19:01:50.000Z
|
2020-12-20T19:01:50.000Z
|
nuitka/plugins/PluginBase.py
|
timgates42/Nuitka
|
536f39e37d8657ec750304bb1a93ccb19d1942af
|
[
"Apache-2.0"
] | 1
|
2021-01-05T09:01:31.000Z
|
2021-01-05T09:01:31.000Z
|
nuitka/plugins/PluginBase.py
|
timgates42/Nuitka
|
536f39e37d8657ec750304bb1a93ccb19d1942af
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Plugins: Welcome to Nuitka! This is your shortest way to become part of it.
This is to provide the base class for all plug-ins. Some of which are part of
proper Nuitka, and some of which are waiting to be created and submitted for
inclusion by you.
The base class will serve as documentation. And it will point to examples of
it being used.
"""
import os
import shutil
import sys
from nuitka import Options, OutputDirectories
from nuitka.Errors import NuitkaPluginError
from nuitka.ModuleRegistry import addUsedModule
from nuitka.SourceCodeReferences import fromFilename
from nuitka.Tracing import plugins_logger
from nuitka.utils.FileOperations import putTextFileContents, relpath
from nuitka.utils.ModuleNames import ModuleName
pre_modules = {}
post_modules = {}
warned_unused_plugins = set()
class NuitkaPluginBase(object):
"""Nuitka base class for all plug-ins.
Derive your plugin from "NuitkaPluginBase" please.
For instructions, see https://github.com/Nuitka/Nuitka/blob/orsiris/UserPlugin-Creation.rst
Plugins allow to adapt Nuitka's behaviour in a number of ways as explained
below at the individual methods.
It is used to deal with special requirements some packages may have (e.g. PyQt
and tkinter), data files to be included (e.g. certifi), inserting hidden
code, coping with otherwise undetectable needs, or issuing messages in
certain situations.
A plugin in general must be enabled to be used by Nuitka. This happens by
specifying "--plugin-enable" (standard plugins) or by "--user-plugin" (user
plugins) in the Nuitka command line. However, some plugins are always enabled
and invisible to the user.
Nuitka comes with a number of "standard" plugins to be enabled as needed.
What they are can be displayed using "nuitka --plugin-list file.py" (filename
required but ignored).
User plugins may be specified (and implicitly enabled) using their Python
script pathname.
"""
# Standard plugins must provide this as a unique string which Nuitka
# then uses to identify them.
#
# User plugins are identified by their path and implicitly activated.
# They however still need to specify some arbitrary non-blank string here,
# which does not equal the name of an inactivated standard plugin.
# For working with options, user plugins must set this variable to
# the script's path (use __file__, __module__ or __name__).
plugin_name = None
@staticmethod
def isAlwaysEnabled():
"""Request to be always enabled.
Notes:
Setting this to true is only applicable to standard plugins. In
this case, the plugin will be enabled upon Nuitka start-up. Any
plugin detector class will then be ignored. Method isRelevant() may
also be present and can be used to fine-control enabling the
plugin: A to-be-enabled, but irrelevant plugin will still not be
activated.
Returns:
True or False
"""
return False
@classmethod
def isRelevant(cls):
"""Consider if the plugin is relevant.
Notes:
A plugin may only be a needed on a certain OS, or with some options,
but this is only a class method, so you will not have much run time
information.
Returns:
True or False
"""
return True
@classmethod
def addPluginCommandLineOptions(cls, group):
# Call group.add_option() here.
pass
@classmethod
def getPluginDefaultOptionValues(cls):
"""This method is used to get a values to use as defaults.
Since the defaults are in the command line options, we call
that and extract them.
"""
from optparse import OptionGroup, OptionParser
parser = OptionParser()
group = OptionGroup(parser, "Pseudo Target")
cls.addPluginCommandLineOptions(group)
result = {}
for option in group.option_list:
result[option.dest] = option.default
return result
def considerImplicitImports(self, module, signal_change):
"""Provide additional modules to import implicitly when encountering the module.
Notes:
Better do not overload this method.
The standard plugin 'ImplicitImports.py' already contains MANY of these.
If you do have a new candidate, consider a PR to get it included there.
Args:
module: the module object
signal_change: bool
Returns:
None
"""
from nuitka.importing.Importing import getModuleNameAndKindFromFilename
for full_name in self.getImplicitImports(module):
if type(full_name) in (tuple, list):
raise NuitkaPluginError(
"Plugin %s needs to be change to only return modules names, not %r"
% (self, full_name)
)
full_name = ModuleName(full_name)
try:
module_filename = self.locateModule(
importing=module, module_name=full_name
)
except Exception:
self.warning(
"Problem locating '%s' implicit imports '%s'."
% (module.getFullName(), full_name)
)
raise
if module_filename is None:
if Options.isShowInclusion():
self.info(
"Implicit module '%s' suggested by '%s' not found."
% (full_name, module.getFullName())
)
continue
_module_name2, module_kind = getModuleNameAndKindFromFilename(
module_filename
)
# TODO: This should get back to plug-ins, they should be allowed to
# preempt or override the decision.
decision, reason = self.decideRecursion(
module_filename=module_filename,
module_name=full_name,
module_kind=module_kind,
)
if decision:
self.recurseTo(
module_package=full_name.getPackageName(),
module_filename=module_filename,
module_kind=module_kind,
reason=reason,
signal_change=signal_change,
)
def isRequiredImplicitImport(self, module, full_name):
"""Indicate whether an implicitly imported module should be accepted.
Notes:
You may negate importing a module specified as "implicit import",
although this is an unexpected event.
Args:
module: the module object
full_name: of the implicitly import module
Returns:
True or False
"""
# Virtual method, pylint: disable=no-self-use,unused-argument
return True
def getImplicitImports(self, module):
"""Return the implicit imports for a given module (iterator).
Args:
module: the module object
Yields:
implicit imports for the module
"""
# Virtual method, pylint: disable=no-self-use,unused-argument
return ()
# Provide fall-back for failed imports here.
module_aliases = {}
def considerFailedImportReferrals(self, module_name):
"""Provide a dictionary of fallback imports for modules that failed to import.
Args:
module_name: name of module
Returns:
dict
"""
return self.module_aliases.get(module_name, None)
def onModuleSourceCode(self, module_name, source_code):
"""Inspect or modify source code.
Args:
module_name: (str) name of module
source_code: (str) its source code
Returns:
source_code (str)
Notes:
Default implementation forwards to `checkModuleSourceCode` which is
going to allow simply checking the source code without the need to
pass it back.
"""
self.checkModuleSourceCode(module_name, source_code)
return source_code
def checkModuleSourceCode(self, module_name, source_code):
"""Inspect source code.
Args:
module_name: (str) name of module
source_code: (str) its source code
Returns:
None
"""
def onFrozenModuleSourceCode(self, module_name, is_package, source_code):
"""Inspect or modify frozen module source code.
Args:
module_name: (str) full name of module
is_package: (bool) True indicates a package
source_code: (str) its source code
Returns:
source_code (str)
"""
# Virtual method, pylint: disable=no-self-use,unused-argument
return source_code
def onFrozenModuleBytecode(self, module_name, is_package, bytecode):
"""Inspect or modify frozen module byte code.
Args:
module_name: (str) name of module
is_package: (bool) True indicates a package
bytecode: (bytes) byte code
Returns:
bytecode (bytes)
"""
# Virtual method, pylint: disable=no-self-use,unused-argument
return bytecode
@staticmethod
def _createTriggerLoadedModule(module, trigger_name, code):
"""Create a "trigger" for a module to be imported.
Notes:
The trigger will incorpaorate the code to be prepended / appended.
Called by @onModuleDiscovered.
Args:
module: the module object (serves as dict key)
trigger_name: string ("-preload"/"-postload")
code: the code string
Returns
trigger_module
"""
from nuitka.nodes.ModuleNodes import CompiledPythonModule
from nuitka.tree.Building import createModuleTree
from .Plugins import Plugins
module_name = ModuleName(module.getFullName() + trigger_name)
source_ref = fromFilename(module.getCompileTimeFilename() + trigger_name)
mode = Plugins.decideCompilation(module_name, source_ref)
trigger_module = CompiledPythonModule(
module_name=module_name,
is_top=False,
mode=mode,
future_spec=None,
source_ref=source_ref,
)
createModuleTree(
module=trigger_module,
source_ref=module.getSourceReference(),
source_code=code,
is_main=False,
)
if mode == "bytecode":
trigger_module.setSourceCode(code)
if Options.is_debug:
source_path = os.path.join(
OutputDirectories.getSourceDirectoryPath(), module_name + ".py"
)
putTextFileContents(filename=source_path, contents=code)
return trigger_module
@staticmethod
def createPreModuleLoadCode(module):
"""Create code to prepend to a module.
Notes:
Called by @onModuleDiscovered.
Args:
module: the module object
Returns:
tuple (code, documentary string)
"""
# Virtual method, pylint: disable=unused-argument
return None, None
@staticmethod
def createPostModuleLoadCode(module):
"""Create code to append to a module.
Notes:
Called by @onModuleDiscovered.
Args:
module: the module object
Returns:
tuple (code, documentary string)
"""
# Virtual method, pylint: disable=unused-argument
return None, None
def onModuleDiscovered(self, module):
"""Called with a module to be loaded.
Notes:
We may specify code to be prepended and/or appended to this module.
This code is stored in the appropriate dict.
For every imported module and each of these two options, only one plugin may do this.
We check this condition here.
Args:
module: the module object
Returns:
None
"""
full_name = module.getFullName()
pre_code, reason = self.createPreModuleLoadCode(module)
if pre_code:
# Note: We could find a way to handle this if needed.
if full_name in pre_modules:
sys.exit("Error, conflicting plug-ins for %s" % full_name)
self.info("Injecting pre-module load code for module '%s':" % full_name)
for line in reason.split("\n"):
self.info(" " + line)
pre_modules[full_name] = self._createTriggerLoadedModule(
module=module, trigger_name="-preLoad", code=pre_code
)
post_code, reason = self.createPostModuleLoadCode(module)
if post_code:
# Note: We could find a way to handle this if needed.
if full_name is post_modules:
sys.exit("Error, conflicting plug-ins for %s" % full_name)
self.info("Injecting post-module load code for module '%s':" % full_name)
for line in reason.split("\n"):
self.info(" " + line)
post_modules[full_name] = self._createTriggerLoadedModule(
module=module, trigger_name="-postLoad", code=post_code
)
def onModuleEncounter(self, module_filename, module_name, module_kind):
"""Help decide whether to include a module.
Args:
module_filename: filename
module_name: full module name
module_kind: one of "py", "shlib" (shared library)
Returns:
True or False
"""
# Virtual method, pylint: disable=no-self-use,unused-argument
return None
def onModuleInitialSet(self):
"""Provide extra modules to the initial root module set.
Args:
None
Returns:
Iterable of modules, may yield.
"""
# Virtual method, pylint: disable=no-self-use
return ()
@staticmethod
def locateModule(importing, module_name):
"""Provide a filename / -path for a to-be-imported module.
Args:
importing: module object
module_name: (str or ModuleName) full name of module
warn: (bool) True if required module
Returns:
filename for module
"""
from nuitka.importing import Importing
_module_package, module_filename, _finding = Importing.findModule(
importing=importing,
module_name=ModuleName(module_name),
parent_package=None,
level=-1,
warn=False,
)
return module_filename
@staticmethod
def decideRecursion(module_filename, module_name, module_kind):
"""Decide whether Nuitka should recurse down to a given module.
Args:
module_filename: filename
module_name: full module name
module_kind: one of "py" or "shlib" (shared library)
Returns:
(decision, reason) where decision is either a bool or None, and reason is a string message.
"""
from nuitka.importing import Recursion
decision, reason = Recursion.decideRecursion(
module_filename=module_filename,
module_name=module_name,
module_kind=module_kind,
)
return decision, reason
@staticmethod
def recurseTo(module_package, module_filename, module_kind, reason, signal_change):
from nuitka.importing import Recursion
imported_module, added_flag = Recursion.recurseTo(
module_package=module_package,
module_filename=module_filename,
module_relpath=relpath(module_filename),
module_kind=module_kind,
reason=reason,
)
addUsedModule(imported_module)
if added_flag:
signal_change(
"new_code", imported_module.getSourceReference(), "Recursed to module."
)
def considerExtraDlls(self, dist_dir, module):
"""Provide a tuple of names of binaries to be included.
Args:
dist_dir: the distribution folder
module: the module object needing the binaries
Returns:
tuple
"""
# TODO: This should no longer be here, as this API is obsolete, pylint: disable=unused-argument
for included_entry_point in self.getExtraDlls(module):
# Copy to the dist directory.
target_path = (included_entry_point.dest_path,)
shutil.copyfile(included_entry_point.source_path, target_path)
yield included_entry_point
def getExtraDlls(self, module):
"""Provide IncludedEntryPoint named tuples describing extra needs of the module.
Args:
module: the module object needing the binaries
Returns:
yields IncludedEntryPoint objects
"""
# Virtual method, pylint: disable=no-self-use,unused-argument
return ()
def removeDllDependencies(self, dll_filename, dll_filenames):
"""Yield any DLLs / shared libraries not to be included in distribution.
Args:
dll_filename: DLL name
dll_filenames: list of DLLs
Yields:
yielded filenames to exclude
"""
# Virtual method, pylint: disable=no-self-use,unused-argument
return ()
def considerDataFiles(self, module):
"""Yield data file names (source|func, target) for inclusion (iterator).
Args:
module: module object that may need extra data files
Yields:
Data file description pairs, either (source, dest) or (func, dest)
where the func will be called to create the content dynamically.
"""
# Virtual method, pylint: disable=no-self-use,unused-argument
return ()
def onStandaloneDistributionFinished(self, dist_dir):
"""Called after successfully finishing a standalone compile.
Note:
It is up to the plugin to take subsequent action. Examples are:
insert additional information (license, copyright, company or
application description), create installation material, further
folder clean-up, start downstream applications etc.
Args:
dist_dir: the created distribution folder
Returns:
None
"""
# Virtual method, pylint: disable=no-self-use,unused-argument
return None
def suppressUnknownImportWarning(self, importing, module_name, source_ref):
"""Suppress import warnings for unknown modules.
Args:
importing: the module object
module_name: name of module
source_ref: ???
Returns:
True or False
"""
# Virtual method, pylint: disable=no-self-use,unused-argument
return False
def decideCompilation(self, module_name, source_ref):
"""Decide whether to compile a module (or just use its bytecode).
Notes:
The first plugin not returning None makes the decision. Thereafter,
no other plugins will be checked. If all plugins return None, the
module will be compiled.
Args:
module_name: name of module
source_ref: ???
Returns:
"compiled" or "bytecode" or None (default)
"""
# Virtual method, pylint: disable=no-self-use,unused-argument
return None
def getPreprocessorSymbols(self):
"""Decide which C defines to be used in compilation.
Notes:
The plugins can each contribute, but are hopefully using
a namespace for their defines.
Returns:
None for no defines, otherwise dictionary of key to be
defined, and non-None values if any, i.e. no "-Dkey" only
"""
# Virtual method, pylint: disable=no-self-use
return None
def getExtraCodeFiles(self):
"""Add extra code files to the compilation.
Notes:
This is generally a bad idea to use unless you absolutely
know what you are doing.
Returns:
None for no extra codes, otherwise dictionary of key to be
filename, and value to be source code.
"""
# Virtual method, pylint: disable=no-self-use
return None
def getExtraLinkLibraries(self):
"""Decide which link library should be added.
Notes:
Names provided multiple times, e.g. by multiple plugins are
only added once.
Returns:
None for no extra link library, otherwise the name as a **str**
or an iterable of names of link libraries.
"""
# Virtual method, pylint: disable=no-self-use
return None
def warnUnusedPlugin(self, message):
"""An inactive plugin may issue a warning if it believes this may be wrong.
Returns:
None
"""
if self.plugin_name not in warned_unused_plugins:
warned_unused_plugins.add(self.plugin_name)
plugins_logger.warning(
"Use '--plugin-enable=%s' for: %s" % (self.plugin_name, message)
)
@classmethod
def warning(cls, message):
plugins_logger.warning(cls.plugin_name + ": " + message)
@classmethod
def info(cls, message):
plugins_logger.info(cls.plugin_name + ": " + message)
def isTriggerModule(module):
return module in pre_modules.values() or module in post_modules.values()
def replaceTriggerModule(old, new):
found = None
for key, value in pre_modules.items():
if value is old:
found = key
break
if found is not None:
pre_modules[found] = new
found = None
for key, value in post_modules.items():
if value is old:
found = key
break
if found is not None:
post_modules[found] = new
| 32.593794
| 103
| 0.616167
|
d6c1f635ac83a9b0d67e76c1de9103c7ed000f73
| 3,041
|
py
|
Python
|
btrsync/cli.py
|
zeroxoneb/arch-mirror-archive
|
b94f311876d8bff8524a4bce6acd0c5d33c602c1
|
[
"MIT"
] | 2
|
2017-04-18T09:05:27.000Z
|
2018-01-06T02:43:21.000Z
|
btrsync/cli.py
|
zeroxoneb/arch-mirror-archive
|
b94f311876d8bff8524a4bce6acd0c5d33c602c1
|
[
"MIT"
] | null | null | null |
btrsync/cli.py
|
zeroxoneb/arch-mirror-archive
|
b94f311876d8bff8524a4bce6acd0c5d33c602c1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
btrsync.cli
--------------
Main `btrsync.cli` CLI.
"""
import click
import contextlib
import logging
import os
from datetime import datetime
log = logging.getLogger('btrsync')
@contextlib.contextmanager
def cd(path):
old_path = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_path)
def make_snapshot(data_dir, version):
"""
Make a snapshot of the previous version to sync against.
"""
log.debug('Finding last sync')
versions = [
path for path in os.listdir(data_dir) if (
os.path.isdir(os.path.join(data_dir, path)) and
not os.path.islink(os.path.join(data_dir, path)) and
not version == path
)
]
versions.sort()
if len(versions):
latest = os.path.join(data_dir, versions[-1])
log.debug('latest: {0}'.format(latest))
new_version = os.path.join(data_dir, version)
if not os.path.exists(new_version):
cmd = "btrfs subvol snapshot {} {}".format(latest, new_version)
log.debug(cmd)
os.system(cmd)
else:
log.info('Version {} already exists: {}'.format(version, new_version))
def repo_sync(sync_url, data_dir, version, excludes, delete):
log.info(
'repo_sync(pkg_sync_url: {}, data_dir: {}, version: {}, excludes: {}, delete: {})'.format(
sync_url, data_dir, version, excludes, delete
)
)
target = '{}/'.format(
os.path.join(data_dir, version)
)
log.debug('target: {}'.format(target))
if not os.path.exists(target):
raise RuntimeError('Target directory {} does not exist'.format(target))
cmd = (
"rsync -rltvH "
"{delete} "
"{excludes} "
"{url} "
"{target} ".format(
delete='--delete' if delete else '',
excludes=''.join([" --exclude '{}' ".format(exclude) for exclude in excludes]),
url=sync_url,
target=target
)
)
log.debug(cmd)
os.system(cmd)
def make_current(data_dir, version):
"""
Link the new version as the current
"""
current = os.path.join(data_dir, 'current')
if os.path.exists(current) and os.path.islink(current):
os.remove(current)
os.symlink(os.path.join(data_dir, version), current)
@click.command()
@click.option('--sync-url', required=True)
@click.option('--data-dir', required=True)
@click.option('--version', default=datetime.now().strftime('%Y.%m.%d'), show_default=True)
@click.option('--exclude', multiple=True)
@click.option('--delete/--no-delete', default=True, show_default=True)
@click.option('--debug', is_flag=True, show_default=True)
def sync(sync_url, data_dir, version, exclude, delete, debug):
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
make_snapshot(data_dir, version)
repo_sync(sync_url, data_dir, version, exclude, delete)
make_current(data_dir, version)
| 26.675439
| 98
| 0.616573
|
32312b0040e93c1dd011a49368700b03bca1fe3a
| 1,450
|
py
|
Python
|
examples/flow_routing/test_script_for_route_flow_dn.py
|
cctrunz/landlab
|
4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939
|
[
"MIT"
] | null | null | null |
examples/flow_routing/test_script_for_route_flow_dn.py
|
cctrunz/landlab
|
4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939
|
[
"MIT"
] | 1
|
2016-03-16T02:34:08.000Z
|
2016-04-20T19:31:30.000Z
|
examples/flow_routing/test_script_for_route_flow_dn.py
|
cctrunz/landlab
|
4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939
|
[
"MIT"
] | null | null | null |
#! /usr/env/python
"""
test_script_for_route_flow_dn.py:
Tests and illustrates use of route_flow_dn component.
"""
import os
import pylab
from landlab.components import FlowAccumulator
from landlab.io import read_esri_ascii
dem_name = "../../../io/tests/data/west_bijou_gully.asc"
outlet_row = 82
outlet_column = 38
# Read in a DEM and set its boundaries
DATA_FILE = os.path.join(os.path.dirname(__file__), dem_name)
(grid, z) = read_esri_ascii(DATA_FILE)
grid.set_nodata_nodes_to_inactive(z, 0) # set nodata nodes to inactive bounds
outlet_node = grid.grid_coords_to_node_id(outlet_row, outlet_column)
# Route flow
flow_router = FlowAccumulator(grid, flow_director="D8")
flow_router.route_flow()
# Get a 2D array version of the elevations
ar = grid.node_vector_to_raster(grid["node"]["drainage_area"])
numcols = grid.number_of_node_columns
numrows = grid.number_of_node_rows
dx = grid.dx
# Create a shaded image
pylab.close() # clear any pre-existing plot
im = pylab.imshow(ar, cmap=pylab.cm.RdBu, extent=[0, numcols * dx, 0, numrows * dx])
# add contour lines with labels
cset = pylab.contour(ar, extent=[0, numcols * dx, numrows * dx, 0])
pylab.clabel(cset, inline=True, fmt="%1.1f", fontsize=10)
# add a color bar on the side
cb = pylab.colorbar(im)
cb.set_label("Drainage area, sq meters")
# add a title and axis labels
pylab.title("DEM")
pylab.xlabel("Distance (m)")
pylab.ylabel("Distance (m)")
# Display the plot
pylab.show()
| 26.363636
| 84
| 0.751724
|
bc2540f717328e30d1afd92dc2f211fbc106ea37
| 1,320
|
py
|
Python
|
test/modules/core/conftest.py
|
Beeee11/httpd
|
547873051555fc0d4faf1a38daaa850a830401f6
|
[
"Apache-2.0"
] | null | null | null |
test/modules/core/conftest.py
|
Beeee11/httpd
|
547873051555fc0d4faf1a38daaa850a830401f6
|
[
"Apache-2.0"
] | null | null | null |
test/modules/core/conftest.py
|
Beeee11/httpd
|
547873051555fc0d4faf1a38daaa850a830401f6
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import pytest
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
from .env import CoreTestEnv
def pytest_report_header(config, startdir):
env = CoreTestEnv(setup_dirs=False)
return f"core [apache: {env.get_httpd_version()}, mpm: {env.mpm_module}, {env.prefix}]"
@pytest.fixture(scope="package")
def env(pytestconfig) -> CoreTestEnv:
level = logging.INFO
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
logging.getLogger('').addHandler(console)
logging.getLogger('').setLevel(level=level)
env = CoreTestEnv(pytestconfig=pytestconfig)
env.apache_access_log_clear()
env.httpd_error_log.clear_log()
return env
@pytest.fixture(autouse=True, scope="package")
def _session_scope(env):
env.httpd_error_log.set_ignored_lognos([
'AH10244', # core: invalid URI path
'AH01264', # mod_cgid script not found
])
yield
assert env.apache_stop() == 0
errors, warnings = env.httpd_error_log.get_missed()
assert (len(errors), len(warnings)) == (0, 0),\
f"apache logged {len(errors)} errors and {len(warnings)} warnings: \n"\
"{0}\n{1}\n".format("\n".join(errors), "\n".join(warnings))
| 30
| 91
| 0.683333
|
b00156ccc10fdbf098c90cd2cb124cebb1206893
| 4,208
|
py
|
Python
|
nipyapi/nifi/models/banner_dto.py
|
iMajna/nipyapi
|
5480af8fe8c6b470249837835cb1a067abb6678e
|
[
"Apache-2.0"
] | null | null | null |
nipyapi/nifi/models/banner_dto.py
|
iMajna/nipyapi
|
5480af8fe8c6b470249837835cb1a067abb6678e
|
[
"Apache-2.0"
] | 1
|
2020-03-16T10:02:46.000Z
|
2020-03-16T13:37:42.000Z
|
nipyapi/nifi/models/banner_dto.py
|
iMajna/nipyapi
|
5480af8fe8c6b470249837835cb1a067abb6678e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.12.1
Contact: dev@nifi.apache.org
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class BannerDTO(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'header_text': 'str',
'footer_text': 'str'
}
attribute_map = {
'header_text': 'headerText',
'footer_text': 'footerText'
}
def __init__(self, header_text=None, footer_text=None):
"""
BannerDTO - a model defined in Swagger
"""
self._header_text = None
self._footer_text = None
if header_text is not None:
self.header_text = header_text
if footer_text is not None:
self.footer_text = footer_text
@property
def header_text(self):
"""
Gets the header_text of this BannerDTO.
The header text.
:return: The header_text of this BannerDTO.
:rtype: str
"""
return self._header_text
@header_text.setter
def header_text(self, header_text):
"""
Sets the header_text of this BannerDTO.
The header text.
:param header_text: The header_text of this BannerDTO.
:type: str
"""
self._header_text = header_text
@property
def footer_text(self):
"""
Gets the footer_text of this BannerDTO.
The footer text.
:return: The footer_text of this BannerDTO.
:rtype: str
"""
return self._footer_text
@footer_text.setter
def footer_text(self, footer_text):
"""
Sets the footer_text of this BannerDTO.
The footer text.
:param footer_text: The footer_text of this BannerDTO.
:type: str
"""
self._footer_text = footer_text
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, BannerDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.324675
| 479
| 0.549905
|
3e52626411b01ff2d4c5e1e06d593a7e285c8b6f
| 539
|
py
|
Python
|
csm_web/scheduler/migrations/0009_course_section_start.py
|
shadaj/csm_web
|
aab7920987bf3212b6da7fa26bac24cca77c6d03
|
[
"MIT"
] | 8
|
2019-08-18T16:04:01.000Z
|
2022-02-09T05:01:49.000Z
|
csm_web/scheduler/migrations/0009_course_section_start.py
|
shadaj/csm_web
|
aab7920987bf3212b6da7fa26bac24cca77c6d03
|
[
"MIT"
] | 69
|
2019-03-20T00:20:04.000Z
|
2022-03-27T11:21:55.000Z
|
csm_web/scheduler/migrations/0009_course_section_start.py
|
shadaj/csm_web
|
aab7920987bf3212b6da7fa26bac24cca77c6d03
|
[
"MIT"
] | 12
|
2019-03-17T18:31:06.000Z
|
2021-12-28T21:27:00.000Z
|
# Generated by Django 3.0.3 on 2020-02-20 01:40
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0008_auto_20200112_2051'),
]
operations = [
migrations.AddField(
model_name='course',
name='section_start',
field=models.DateField(default=datetime.datetime(2020, 2, 20, 1, 40, 43, 642165, tzinfo=utc)),
preserve_default=False,
),
]
| 24.5
| 106
| 0.636364
|
e22e4d8bf2116d8f4311fde9412a20fbc31fac5c
| 3,041
|
py
|
Python
|
Filters/General/Testing/Python/TestGroupTimeStepsFilter.py
|
cclauss/VTK
|
f62a52cce9044159efb4adb7cc0cfd7ec0bc8b6d
|
[
"BSD-3-Clause"
] | 1,755
|
2015-01-03T06:55:00.000Z
|
2022-03-29T05:23:26.000Z
|
Filters/General/Testing/Python/TestGroupTimeStepsFilter.py
|
cclauss/VTK
|
f62a52cce9044159efb4adb7cc0cfd7ec0bc8b6d
|
[
"BSD-3-Clause"
] | 29
|
2015-04-23T20:58:30.000Z
|
2022-03-02T16:16:42.000Z
|
Filters/General/Testing/Python/TestGroupTimeStepsFilter.py
|
cclauss/VTK
|
f62a52cce9044159efb4adb7cc0cfd7ec0bc8b6d
|
[
"BSD-3-Clause"
] | 1,044
|
2015-01-05T22:48:27.000Z
|
2022-03-31T02:38:26.000Z
|
import vtk
from vtk.util.vtkAlgorithm import VTKPythonAlgorithmBase
class MovingSphereSource(VTKPythonAlgorithmBase):
def __init__(self):
VTKPythonAlgorithmBase.__init__(self,
nInputPorts=0,
nOutputPorts=1, outputType='vtkPolyData')
def RequestInformation(self, request, inInfo, outInfo):
info = outInfo.GetInformationObject(0)
t = range(0, 10)
info.Set(vtk.vtkStreamingDemandDrivenPipeline.TIME_STEPS(), t, len(t))
info.Set(vtk.vtkStreamingDemandDrivenPipeline.TIME_RANGE(), [t[0], t[-1]], 2)
return 1
def RequestData(self, request, inInfo, outInfo):
info = outInfo.GetInformationObject(0)
output = vtk.vtkPolyData.GetData(outInfo)
t = info.Get(vtk.vtkStreamingDemandDrivenPipeline.UPDATE_TIME_STEP())
sphere = vtk.vtkSphereSource()
sphere.SetCenter(0, t * 2, 0)
sphere.Update()
output.ShallowCopy(sphere.GetOutput())
return 1
class MovingPDC(VTKPythonAlgorithmBase):
def __init__(self):
VTKPythonAlgorithmBase.__init__(self,
nInputPorts=0,
nOutputPorts=1, outputType='vtkPartitionedDataSetCollection')
def RequestInformation(self, request, inInfo, outInfo):
info = outInfo.GetInformationObject(0)
t = range(0, 10)
info.Set(vtk.vtkStreamingDemandDrivenPipeline.TIME_STEPS(), t, len(t))
info.Set(vtk.vtkStreamingDemandDrivenPipeline.TIME_RANGE(), [t[0], t[-1]], 2)
return 1
def RequestData(self, request, inInfo, outInfo):
info = outInfo.GetInformationObject(0)
output = vtk.vtkDataObject.GetData(outInfo)
t = info.Get(vtk.vtkStreamingDemandDrivenPipeline.UPDATE_TIME_STEP())
source = vtk.vtkPartitionedDataSetCollectionSource()
source.SetNumberOfShapes(int(1 + t % 3))
transform = vtk.vtkTransform()
transform.Identity()
transform.Translate(2, t*2, 0)
xform = vtk.vtkTransformPolyDataFilter()
xform.SetTransform(transform)
xform.SetInputConnection(source.GetOutputPort())
xform.Update()
output.ShallowCopy(xform.GetOutputDataObject(0))
return 1
source1 = MovingSphereSource()
group1 = vtk.vtkGroupTimeStepsFilter()
group1.SetInputConnection(source1.GetOutputPort())
mapper1 = vtk.vtkCompositePolyDataMapper2()
mapper1.SetInputConnection(group1.GetOutputPort())
actor1 = vtk.vtkActor()
actor1.SetMapper(mapper1)
source2 = MovingPDC()
group2 = vtk.vtkGroupTimeStepsFilter()
group2.SetInputConnection(source2.GetOutputPort())
mapper2 = vtk.vtkCompositePolyDataMapper2()
mapper2.SetInputConnection(group2.GetOutputPort())
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(actor1)
ren1.AddActor(actor2)
ren1.ResetCamera()
renWin.SetSize(300, 300)
renWin.Render()
# render the image
#
iren.Initialize()
# iren.Start()
| 31.030612
| 85
| 0.709964
|
35ac67adeee27bb97ba364dcd0dde85c503fea5c
| 319
|
py
|
Python
|
filters/filter_news_text.py
|
ircashem/iem-telegram-bot
|
512a9502dbd53b6b1e5f41f57b6df05797ba4932
|
[
"MIT"
] | 1
|
2020-07-27T06:54:42.000Z
|
2020-07-27T06:54:42.000Z
|
filters/filter_news_text.py
|
ircashem/iem-telegram-bot
|
512a9502dbd53b6b1e5f41f57b6df05797ba4932
|
[
"MIT"
] | null | null | null |
filters/filter_news_text.py
|
ircashem/iem-telegram-bot
|
512a9502dbd53b6b1e5f41f57b6df05797ba4932
|
[
"MIT"
] | null | null | null |
import re
def extract_news_text(text):
text = re.sub('<a.*">', "", text)
text = re.sub('</a>', '', text)
text = re.sub('</h1>', '', text)
text = re.sub('<hr/>', '', text)
text = re.sub('</p>', '</b>', text)
text = re.sub('<p>', '<b>', text)
text = re.sub('</div>', '', text)
return text
| 29
| 39
| 0.45768
|
87e3d0c93bdd5437ae1c14ca69105a6081987587
| 5,581
|
py
|
Python
|
dashboard/dashboard/main.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 1
|
2019-05-18T02:43:02.000Z
|
2019-05-18T02:43:02.000Z
|
dashboard/dashboard/main.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 6
|
2020-07-19T21:51:44.000Z
|
2022-02-13T08:22:58.000Z
|
dashboard/dashboard/main.py
|
tingshao/catapult
|
a8fe19e0c492472a8ed5710be9077e24cc517c5c
|
[
"BSD-3-Clause"
] | 1
|
2020-07-24T18:22:03.000Z
|
2020-07-24T18:22:03.000Z
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""URL endpoint for the main page which lists recent anomalies."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import datetime
import logging
from google.appengine.ext import ndb
from dashboard import email_template
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import anomaly
_ANOMALY_FETCH_LIMIT = 1000
_DEFAULT_DAYS_TO_SHOW = 7
_DEFAULT_CHANGES_TO_SHOW = 10
_DEFAULT_SHERIFF_NAME = 'Chromium Perf Sheriff'
class MainHandler(request_handler.RequestHandler):
"""Displays the main overview page."""
def get(self):
"""Renders the UI for the main overview page.
Request parameters:
days: Number of days to show anomalies for (optional).
sheriff: Sheriff to show anomalies for (optional)
num_changes: The number of improvements/regressions to list.
Outputs:
A HTML page that shows recent regressions, improvements.
"""
days = int(self.request.get('days', _DEFAULT_DAYS_TO_SHOW))
num_changes = int(self.request.get('num_changes', _DEFAULT_CHANGES_TO_SHOW))
sheriff_name = self.request.get('sheriff', _DEFAULT_SHERIFF_NAME)
sheriff = ndb.Key('Sheriff', sheriff_name)
anomalies = _GetRecentAnomalies(days, sheriff)
top_improvements = _TopImprovements(anomalies, num_changes)
top_regressions = _TopRegressions(anomalies, num_changes)
tests = _GetKeyToTestDict(top_improvements + top_regressions)
template_dict = {
'num_days': days,
'num_changes': num_changes,
'sheriff_name': sheriff_name,
'improvements': _AnomalyInfoDicts(top_improvements, tests),
'regressions': _AnomalyInfoDicts(top_regressions, tests),
}
self.RenderHtml('main.html', template_dict)
def _GetRecentAnomalies(days, sheriff):
"""Fetches recent Anomalies from the datastore.
Args:
days: Number of days old of the oldest Anomalies to fetch.
sheriff: The ndb.Key of the Sheriff to fetch Anomalies for.
Returns:
A list of Anomaly entities sorted from large to small relative change.
"""
anomalies, _, _ = anomaly.Anomaly.QueryAsync(
min_timestamp=datetime.datetime.now() - datetime.timedelta(days=days),
sheriff=sheriff.id(),
limit=_ANOMALY_FETCH_LIMIT).get_result()
# We only want to list alerts that aren't marked invalid or ignored.
anomalies = [a for a in anomalies if a.bug_id is None or a.bug_id > 0]
anomalies.sort(key=lambda a: abs(a.percent_changed), reverse=True)
return anomalies
def _GetKeyToTestDict(anomalies):
"""Returns a map of TestMetadata keys to entities for the given anomalies."""
test_keys = {a.GetTestMetadataKey() for a in anomalies}
tests = utils.GetMulti(test_keys)
return {t.key: t for t in tests}
def _GetColorClass(percent_changed):
"""Returns a CSS class name for the anomaly, based on percent changed."""
if percent_changed > 50:
return 'over-50'
if percent_changed > 40:
return 'over-40'
if percent_changed > 30:
return 'over-30'
if percent_changed > 20:
return 'over-20'
if percent_changed > 10:
return 'over-10'
return 'under-10'
def _AnomalyInfoDicts(anomalies, tests):
"""Returns information info about the given anomalies.
Args:
anomalies: A list of anomalies.
tests: A dictionary mapping TestMetadata keys to TestMetadata entities.
Returns:
A list of dictionaries with information about the given anomalies.
"""
anomaly_list = []
for anomaly_entity in anomalies:
test = tests.get(anomaly_entity.GetTestMetadataKey())
if not test:
logging.warning(
'No TestMetadata entity for key: %s.',
anomaly_entity.GetTestMetadataKey())
continue
subtest_path = '/'.join(test.test_path.split('/')[3:])
graph_link = email_template.GetGroupReportPageLink(anomaly_entity)
anomaly_list.append({
'key': anomaly_entity.key.urlsafe(),
'bug_id': anomaly_entity.bug_id or '',
'start_revision': anomaly_entity.start_revision,
'end_revision': anomaly_entity.end_revision,
'master': test.master_name,
'bot': test.bot_name,
'testsuite': test.suite_name,
'test': subtest_path,
'percent_changed': anomaly_entity.GetDisplayPercentChanged(),
'color_class': _GetColorClass(abs(anomaly_entity.percent_changed)),
'improvement': anomaly_entity.is_improvement,
'dashboard_link': graph_link,
})
return anomaly_list
def _TopImprovements(recent_anomalies, num_to_show):
"""Fills in the given template dictionary with top improvements.
Args:
recent_anomalies: A list of Anomaly entities sorted from large to small.
num_to_show: The number of improvements to return.
Returns:
A list of top improvement Anomaly entities, in decreasing order.
"""
improvements = [a for a in recent_anomalies if a.is_improvement]
return improvements[:num_to_show]
def _TopRegressions(recent_anomalies, num_to_show):
"""Fills in the given template dictionary with top regressions.
Args:
recent_anomalies: A list of Anomaly entities sorted from large to small.
num_to_show: The number of regressions to return.
Returns:
A list of top regression Anomaly entities, in decreasing order.
"""
regressions = [a for a in recent_anomalies if not a.is_improvement]
return regressions[:num_to_show]
| 33.620482
| 80
| 0.730514
|
5b1146de770536640fce31ff52a7a1c355a59fed
| 1,369
|
py
|
Python
|
vnftest/tests/unit/core/test_testcase.py
|
onap/vnfsdk-dovetail-integration
|
2720441e7c03bdb57aefba16a262f1eef1ce2cbd
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
vnftest/tests/unit/core/test_testcase.py
|
onap/vnfsdk-dovetail-integration
|
2720441e7c03bdb57aefba16a262f1eef1ce2cbd
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
vnftest/tests/unit/core/test_testcase.py
|
onap/vnfsdk-dovetail-integration
|
2720441e7c03bdb57aefba16a262f1eef1ce2cbd
|
[
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null |
##############################################################################
# Copyright 2018 EuropeanSoftwareMarketingLtd.
# ===================================================================
# Licensed under the ApacheLicense, Version2.0 (the"License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under
# the License
##############################################################################
# vnftest comment: this is a modified copy of
# yardstick/tests/unit/benchmark/core/test_testcase.py
from __future__ import absolute_import
import unittest
from vnftest.core import testcase
class Arg(object):
def __init__(self):
self.casename = ('onap_vnftest_onboard-v1',)
class TestcaseUT(unittest.TestCase):
def test_list_all(self):
t = testcase.Testcase()
result = t.list_all("")
self.assertIsInstance(result, list)
def test_show(self):
t = testcase.Testcase()
casename = Arg()
result = t.show(casename)
self.assertTrue(result)
| 33.390244
| 87
| 0.604091
|
bf43284b9d02e8df9b1b5cfbef3b1bb27b1b63b4
| 12,190
|
py
|
Python
|
utils/AmazonDataset/sequentially_split_train_test_data_with_random_query.py
|
ronaldseoh/Explainable-Product-Search-with-a-Dynamic-Relation-Embedding-Model
|
aef67aa8ebc085ed2c673da4a5d451e68136eebf
|
[
"Apache-2.0"
] | null | null | null |
utils/AmazonDataset/sequentially_split_train_test_data_with_random_query.py
|
ronaldseoh/Explainable-Product-Search-with-a-Dynamic-Relation-Embedding-Model
|
aef67aa8ebc085ed2c673da4a5d451e68136eebf
|
[
"Apache-2.0"
] | null | null | null |
utils/AmazonDataset/sequentially_split_train_test_data_with_random_query.py
|
ronaldseoh/Explainable-Product-Search-with-a-Dynamic-Relation-Embedding-Model
|
aef67aa8ebc085ed2c673da4a5d451e68136eebf
|
[
"Apache-2.0"
] | null | null | null |
import os,sys
import gzip
import random
import numpy as np
import json
data_path = sys.argv[1]
review_sample_rate = float(sys.argv[2]) # Percentage of reviews used for test+valid for each user
#query_sample_rate = float(sys.argv[3]) # Percetage of queries that are unique in testing+validation
output_path = data_path + 'seq_query_split/'
if not os.path.exists(output_path):
os.makedirs(output_path)
# Read all queries
product_query_idxs = []
query_level_count = []
all_queries = {}
all_query_idx = []
with gzip.open(data_path + 'product_query.txt.gz', 'rt') as cat_fin:
for line in cat_fin:
query_arr = line.strip().split(';')
product_query_idxs.append([])
for query_line in query_arr:
if len(query_line) < 1:
continue
arr = query_line.split('\t')
cat_num = int(arr[0][1:])
cat_query = arr[1]
if cat_query not in all_queries:
all_queries[cat_query] = len(all_queries)
all_query_idx.append(cat_query)
query_level_count.append(cat_num)
product_query_idxs[-1].append(all_queries[cat_query])
# Build user-review map
user_review_map = {}
review_product_list = []
with gzip.open(data_path + 'review_u_p.txt.gz', 'rt') as fin:
index = 0
for line in fin:
arr = line.strip().split(' ')
user = arr[0]
product = int(arr[1])
if user not in user_review_map:
user_review_map[user] = []
user_review_map[user].append(index)
review_product_list.append(product)
index += 1
# Read user-review sequence
user_review_seq = []
with gzip.open(data_path + 'u_r_seq.txt.gz', 'rt') as fin:
for line in fin:
arr = line.strip().split(' ')
user_review_seq.append([int(x) for x in arr])
# Generate train/test sets
test_review_idx = set()
test_product_set = set()
valid_review_idx = set()
valid_product_set = set()
for review_seq in user_review_seq:
sample_num = round(review_sample_rate * len(review_seq))
test_sample_num = int(sample_num / 2.0 + random.random())
valid_sample_num = sample_num - test_sample_num
for i in range(test_sample_num):
test_review_idx.add(review_seq[-1-i])
test_product_set.add(review_product_list[review_seq[-1-i]])
for i in range(valid_sample_num):
valid_review_idx.add(review_seq[-1-test_sample_num-i])
valid_product_set.add(review_product_list[review_seq[-1-test_sample_num-i]])
test_query_idx = set()
valid_query_idx = set()
train_query_idx = set()
'''
additional_train_query_idx = set()
#sample_number = int(len(all_query_idx) * query_sample_rate)
#test_query_idx = set(np.random.choice([i for i in range(len(all_query_idx))], sample_number , replace=False))
# refine the train query set so that every item has at least one query
for product_id in test_product_set:
query_idxs = product_query_idxs[product_id]
tmp_test = random.choices(query_idxs, k=int(max(1, len(query_idxs) * query_sample_rate/2)))
tmp_valid = random.choices(query_idxs, k=int(max(1, len(query_idxs) * query_sample_rate/2)))
for q in query_idxs:
if q in tmp_test:
test_query_idx.add(q)
elif q in tmp_valid:
valid_query_idx.add(q)
else:
additional_train_query_idx.add(q)
unique_test_query = test_query_idx.union(valid_query_idx) - additional_train_query_idx
for product_id in test_product_set:
query_idxs = product_query_idxs[product_id]
tmp = set(query_idxs) - unique_test_query
if len(tmp) < 1:
pick_i = int(random.random()*len(query_idxs))
additional_train_query_idx.add(pick_i)
#for query_idxs in product_query_idxs:
# tmp = set(query_idxs) - test_query_idx
# if len(tmp) < 1:
# pick_i = int(random.random()*len(query_idxs))
# test_query_idx.remove(query_idxs[pick_i])
'''
#output train/test review data
train_user_product_map = {}
test_user_product_map = {}
valid_user_product_map = {}
with gzip.open(output_path + 'train.txt.gz', 'wt') as train_fout, gzip.open(output_path + 'test.txt.gz', 'wt') as test_fout, gzip.open(output_path + 'valid.txt.gz', 'wt') as valid_fout:
with gzip.open(data_path + 'review_u_p.txt.gz', 'rt') as info_fin, gzip.open(data_path + 'review_text.txt.gz', 'rt') as text_fin:
info_line = info_fin.readline()
text_line = text_fin.readline()
index = 0
while info_line:
arr = info_line.strip().split(' ')
if index in test_review_idx:
test_fout.write(arr[0] + '\t' + arr[1] + '\t' + text_line.strip() + '\n')
if int(arr[0]) not in test_user_product_map:
test_user_product_map[int(arr[0])] = set()
test_user_product_map[int(arr[0])].add(int(arr[1]))
elif index in valid_review_idx:
valid_fout.write(arr[0] + '\t' + arr[1] + '\t' + text_line.strip() + '\n')
if int(arr[0]) not in valid_user_product_map:
valid_user_product_map[int(arr[0])] = set()
valid_user_product_map[int(arr[0])].add(int(arr[1]))
else:
train_fout.write(arr[0] + '\t' + arr[1] + '\t' + text_line.strip() + '\n')
if int(arr[0]) not in train_user_product_map:
train_user_product_map[int(arr[0])] = set()
train_user_product_map[int(arr[0])].add(int(arr[1]))
index += 1
info_line = info_fin.readline()
text_line = text_fin.readline()
#read review_u_p and construct train/test id sets
with gzip.open(output_path + 'train_id.txt.gz', 'wt') as train_fout, gzip.open(output_path + 'test_id.txt.gz', 'wt') as test_fout, gzip.open(output_path + 'valid_id.txt.gz', 'wt') as valid_fout:
with gzip.open(data_path + 'review_u_p.txt.gz', 'rt') as info_fin, gzip.open(data_path + 'review_id.txt.gz', 'rt') as id_fin:
info_line = info_fin.readline()
id_line = id_fin.readline()
index = 0
while info_line:
arr = info_line.strip().split(' ')
user_idx = int(arr[0])
product_idx = int(arr[1])
query_idx = random.choice(product_query_idxs[product_idx])
if index in test_review_idx:
test_fout.write(arr[0] + '\t' + arr[1] + '\t' + str(id_line.strip()) + '\t' + str(query_idx) + '\t' + str(index) + '\n')
test_query_idx.add(query_idx)
elif index in valid_review_idx:
valid_fout.write(arr[0] + '\t' + arr[1] + '\t' + str(id_line.strip()) + '\t' + str(query_idx) + '\t' + str(index) + '\n')
valid_query_idx.add(query_idx)
else:
train_fout.write(arr[0] + '\t' + arr[1] + '\t' + str(id_line.strip()) + '\t' + str(query_idx) + '\t' + str(index) + '\n')
train_query_idx.add(query_idx)
index += 1
info_line = info_fin.readline()
id_line = id_fin.readline()
#output train/test queries
query_max_length = 0
with gzip.open(output_path + 'query.txt.gz', 'wt' ) as query_fout:
for cat_query in all_query_idx:
query_fout.write(cat_query + '\n')
query_length = len(cat_query.strip().split(' '))
if query_length > query_max_length:
query_max_length = query_length
train_product_query_idxs = []
test_product_query_idxs = []
valid_product_query_idxs = []
#mentioned_query_idx = test_query_idx.union(valid_query_idx).union(additional_train_query_idx)
with gzip.open(output_path + 'train_query_idx.txt.gz', 'wt') as train_fout, gzip.open(output_path + 'test_query_idx.txt.gz','wt') as test_fout, gzip.open(output_path + 'valid_query_idx.txt.gz','wt') as valid_fout:
for query_idxs in product_query_idxs:
train_product_query_idxs.append([])
valid_product_query_idxs.append([])
test_product_query_idxs.append([])
for q_idx in query_idxs:
# All queries could appear in the test set
if q_idx in test_query_idx:
test_fout.write(str(q_idx) + ' ')
test_product_query_idxs[-1].append(q_idx)
if q_idx in valid_query_idx:
valid_fout.write(str(q_idx) + ' ')
valid_product_query_idxs[-1].append(q_idx)
# Output training query
if q_idx in train_query_idx:
train_fout.write(str(q_idx) + ' ')
train_product_query_idxs[-1].append(q_idx)
#if q_idx not in mentioned_query_idx:
# train_fout.write(str(q_idx) + ' ')
# train_product_query_idxs[-1].append(q_idx)
# There are also some queries unique in the test set
#if q_idx not in test_query_idx:
# train_fout.write(str(q_idx) + ' ')
# train_product_query_idxs[-1].append(q_idx)
valid_fout.write('\n')
train_fout.write('\n')
test_fout.write('\n')
#generate qrels and json queries
product_ids = []
with gzip.open(data_path + 'product.txt.gz', 'rt') as fin:
for line in fin:
product_ids.append(line.strip())
user_ids = []
with gzip.open(data_path + 'users.txt.gz', 'rt') as fin:
for line in fin:
user_ids.append(line.strip())
vocab = []
with gzip.open(data_path + 'vocab.txt.gz', 'rt') as fin:
for line in fin:
vocab.append(line.strip())
def output_qrels_jsonQuery(user_product_map, product_query, qrel_file, jsonQuery_file):
json_queries = []
appeared_qrels = {}
with open(qrel_file, 'wt') as fout:
for u_idx in user_product_map:
user_id = user_ids[u_idx]
if user_id not in appeared_qrels:
appeared_qrels[user_id] = {}
for product_idx in user_product_map[u_idx]:
product_id = product_ids[product_idx]
if product_id not in appeared_qrels[user_id]:
appeared_qrels[user_id][product_id] = set()
#check if has query
for q_idx in product_query[product_idx]:
if q_idx in appeared_qrels[user_id][product_id]:
continue
appeared_qrels[user_id][product_id].add(q_idx)
fout.write(user_id + '_' + str(q_idx) + ' 0 ' + product_id + ' 1 ' + '\n')
json_q = {'number' : user_id + '_' + str(q_idx), 'text' : []}
json_q['text'].append('#combine(')
for v_i in all_query_idx[q_idx].strip().split(' '):
if len(v_i) > 0:
json_q['text'].append(vocab[int(v_i)])
json_q['text'].append(')')
json_q['text'] = ' '.join(json_q['text'])
json_queries.append(json_q)
with open(jsonQuery_file,'wt') as fout:
output_json = {'mu' : 1000, 'queries' : json_queries}
json.dump(output_json, fout, sort_keys = True, indent = 4)
output_qrels_jsonQuery(test_user_product_map, test_product_query_idxs,
output_path + 'test.qrels', output_path + 'test_query.json')
output_qrels_jsonQuery(train_user_product_map, train_product_query_idxs,
output_path + 'train.qrels', output_path + 'train_query.json')
output_qrels_jsonQuery(valid_user_product_map, valid_product_query_idxs,
output_path + 'valid.qrels', output_path + 'valid_query.json')
# output statisitc
with open(output_path + 'statistic.txt', 'wt') as fout:
fout.write('Total User ' + str(len(user_ids)) + '\n')
fout.write('Total Product ' + str(len(product_ids)) + '\n')
fout.write('Total Review ' + str(len(review_product_list)) + '\n')
fout.write('Total Vocab ' + str(len(vocab)) + '\n')
fout.write('Total Queries ' + str(len(all_query_idx)) + '\n')
fout.write('Max Query Length ' + str(query_max_length) + '\n')
fout.write('Test Review ' + str(len(test_review_idx)) + '\n')
fout.write('Test Queries ' + str(len(test_query_idx)) + '\n')
fout.write('Valid Review ' + str(len(valid_review_idx)) + '\n')
fout.write('Valid Queries ' + str(len(valid_query_idx)) + '\n')
| 45.148148
| 213
| 0.622395
|
871c385280ade3328e27b0fc3a4d24dd3458f20b
| 3,333
|
py
|
Python
|
scripts/tar2db.py
|
rzswh/FirmAE
|
a590e11a63eb5bec5a0568ec298538daf0ba1816
|
[
"MIT"
] | null | null | null |
scripts/tar2db.py
|
rzswh/FirmAE
|
a590e11a63eb5bec5a0568ec298538daf0ba1816
|
[
"MIT"
] | null | null | null |
scripts/tar2db.py
|
rzswh/FirmAE
|
a590e11a63eb5bec5a0568ec298538daf0ba1816
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import tarfile
import getopt
import sys
import re
import hashlib
import psycopg2
psql_ip = '127.0.0.1'
def getFileHashes(infile):
t = tarfile.open(infile)
files = list()
links = list()
for f in t.getmembers():
if f.isfile():
# we use f.name[1:] to get rid of the . at the beginning of the path
files.append((f.name[1:], hashlib.md5(t.extractfile(f).read()).hexdigest(),
f.uid, f.gid, f.mode))
elif f.issym():
links.append((f.name[1:], f.linkpath))
return (files, links)
def getOids(objs, cur):
# hashes ... all the hashes in the tar file
hashes = [x[1] for x in objs]
hashes_str = ",".join(["""'%s'""" % x for x in hashes])
query = """SELECT id,hash FROM object WHERE hash IN (%s)"""
cur.execute(query % hashes_str)
res = [(int(x), y) for (x, y) in cur.fetchall()]
existingHashes = [x[1] for x in res]
missingHashes = set(hashes).difference(set(existingHashes))
newObjs = createObjects(missingHashes, cur)
res += newObjs
result = dict([(y, x) for (x, y) in res])
return result
def createObjects(hashes, cur):
query = """INSERT INTO object (hash) VALUES (%(hash)s) RETURNING id"""
res = list()
for h in set(hashes):
try:
cur.execute(query, {'hash':h})
oid = int(cur.fetchone()[0])
res.append((oid, h))
except:
continue
return res
def insertObjectToImage(iid, files2oids, links, cur):
query = """INSERT INTO object_to_image (iid, oid, filename, regular_file, uid, gid, permissions) VALUES (%(iid)s, %(oid)s, %(filename)s, %(regular_file)s, %(uid)s, %(gid)s, %(mode)s)"""
try:
cur.executemany(query, [{'iid': iid, 'oid' : x[1], 'filename' : x[0][0],
'regular_file' : True, 'uid' : x[0][1],
'gid' : x[0][2], 'mode' : x[0][3]} \
for x in files2oids])
cur.executemany(query, [{'iid': iid, 'oid' : 1, 'filename' : x[0],
'regular_file' : False, 'uid' : None,
'gid' : None, 'mode' : None} \
for x in links])
except:
return
def process(iid, infile):
global psql_ip
dbh = psycopg2.connect(database="firmware",
user="firmadyne",
password="firmadyne",
host=psql_ip)
cur = dbh.cursor()
(files, links) = getFileHashes(infile)
oids = getOids(files, cur)
fdict = dict([(h, (filename, uid, gid, mode)) \
for (filename, h, uid, gid, mode) in files])
file2oid = [(fdict[h], oid) for (h, oid) in oids.items()]
insertObjectToImage(iid, file2oid, links, cur)
dbh.commit()
dbh.close()
def main():
global psql_ip
infile = iid = None
opts, argv = getopt.getopt(sys.argv[1:], "f:i:h:")
for k, v in opts:
if k == '-i':
iid = int(v)
if k == '-f':
infile = v
if k == '-h':
psql_ip = v
if infile and not iid:
m = re.search(r"(\d+)\.tar\.gz", infile)
if m:
iid = int(m.groups(1))
process(iid, infile)
if __name__ == "__main__":
main()
| 28.982609
| 189
| 0.519952
|
3086cf016589ffa49ed24ae16f0882f788d4ca40
| 5,724
|
py
|
Python
|
airflow/config_templates/airflow_local_settings.py
|
MAliNaqvi/incubator-airflow
|
63e761614869bd7cb93106f6235c50899ace3013
|
[
"Apache-2.0"
] | null | null | null |
airflow/config_templates/airflow_local_settings.py
|
MAliNaqvi/incubator-airflow
|
63e761614869bd7cb93106f6235c50899ace3013
|
[
"Apache-2.0"
] | null | null | null |
airflow/config_templates/airflow_local_settings.py
|
MAliNaqvi/incubator-airflow
|
63e761614869bd7cb93106f6235c50899ace3013
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from airflow import configuration as conf
# TODO: Logging format and level should be configured
# in this file instead of from airflow.cfg. Currently
# there are other log format and level configurations in
# settings.py and cli.py. Please see AIRFLOW-1455.
LOG_LEVEL = conf.get('core', 'LOGGING_LEVEL').upper()
# Flask appbuilder's info level log is very verbose,
# so it's set to 'WARN' by default.
FAB_LOG_LEVEL = 'WARN'
LOG_FORMAT = conf.get('core', 'LOG_FORMAT')
BASE_LOG_FOLDER = conf.get('core', 'BASE_LOG_FOLDER')
PROCESSOR_LOG_FOLDER = conf.get('scheduler', 'CHILD_PROCESS_LOG_DIRECTORY')
FILENAME_TEMPLATE = '{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log'
PROCESSOR_FILENAME_TEMPLATE = '{{ filename }}.log'
# Storage bucket url for remote logging
# s3 buckets should start with "s3://"
# gcs buckets should start with "gs://"
# wasb buckets should start with "wasb" just to help Airflow select correct handler
REMOTE_BASE_LOG_FOLDER = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
DEFAULT_LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'airflow': {
'format': LOG_FORMAT,
},
},
'handlers': {
'console': {
'class': 'airflow.utils.log.logging_mixin.RedirectStdHandler',
'formatter': 'airflow',
'stream': 'sys.stdout'
},
'task': {
'class': 'airflow.utils.log.file_task_handler.FileTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
'filename_template': FILENAME_TEMPLATE,
},
'processor': {
'class': 'airflow.utils.log.file_processor_handler.FileProcessorHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(PROCESSOR_LOG_FOLDER),
'filename_template': PROCESSOR_FILENAME_TEMPLATE,
},
},
'loggers': {
'airflow.processor': {
'handlers': ['processor'],
'level': LOG_LEVEL,
'propagate': False,
},
'airflow.task': {
'handlers': ['task'],
'level': LOG_LEVEL,
'propagate': False,
},
'flask_appbuilder': {
'handler': ['console'],
'level': FAB_LOG_LEVEL,
'propagate': True,
}
},
'root': {
'handlers': ['console'],
'level': LOG_LEVEL,
}
}
REMOTE_HANDLERS = {
's3': {
'task': {
'class': 'airflow.utils.log.s3_task_handler.S3TaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
's3_log_folder': REMOTE_BASE_LOG_FOLDER,
'filename_template': FILENAME_TEMPLATE,
},
'processor': {
'class': 'airflow.utils.log.s3_task_handler.S3TaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(PROCESSOR_LOG_FOLDER),
's3_log_folder': REMOTE_BASE_LOG_FOLDER,
'filename_template': PROCESSOR_FILENAME_TEMPLATE,
},
},
'gcs': {
'task': {
'class': 'airflow.utils.log.gcs_task_handler.GCSTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
'gcs_log_folder': REMOTE_BASE_LOG_FOLDER,
'filename_template': FILENAME_TEMPLATE,
},
'processor': {
'class': 'airflow.utils.log.gcs_task_handler.GCSTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(PROCESSOR_LOG_FOLDER),
'gcs_log_folder': REMOTE_BASE_LOG_FOLDER,
'filename_template': PROCESSOR_FILENAME_TEMPLATE,
},
},
'wasb': {
'task': {
'class': 'airflow.utils.log.wasb_task_handler.WasbTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
'wasb_log_folder': REMOTE_BASE_LOG_FOLDER,
'wasb_container': 'airflow-logs',
'filename_template': FILENAME_TEMPLATE,
'delete_local_copy': False,
},
'processor': {
'class': 'airflow.utils.log.wasb_task_handler.WasbTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(PROCESSOR_LOG_FOLDER),
'wasb_log_folder': REMOTE_BASE_LOG_FOLDER,
'wasb_container': 'airflow-logs',
'filename_template': PROCESSOR_FILENAME_TEMPLATE,
'delete_local_copy': False,
},
}
}
REMOTE_LOGGING = conf.get('core', 'remote_logging')
if REMOTE_LOGGING and REMOTE_BASE_LOG_FOLDER.startswith('s3://'):
DEFAULT_LOGGING_CONFIG['handlers'].update(REMOTE_HANDLERS['s3'])
elif REMOTE_LOGGING and REMOTE_BASE_LOG_FOLDER.startswith('gs://'):
DEFAULT_LOGGING_CONFIG['handlers'].update(REMOTE_HANDLERS['gcs'])
elif REMOTE_LOGGING and REMOTE_BASE_LOG_FOLDER.startswith('wasb'):
DEFAULT_LOGGING_CONFIG['handlers'].update(REMOTE_HANDLERS['wasb'])
| 36
| 85
| 0.625961
|
b64bf3da0f469c08e0d10f3e9e8e450dc036c4fc
| 619
|
py
|
Python
|
unit_tests/lib/test_etcd_lib.py
|
hemanthnakkina/layer-etcd
|
ab21eb62984c0f84088f11881e5ad2783388557b
|
[
"ECL-2.0",
"Apache-2.0"
] | 4
|
2016-05-30T02:58:43.000Z
|
2019-03-07T09:55:04.000Z
|
unit_tests/lib/test_etcd_lib.py
|
hemanthnakkina/layer-etcd
|
ab21eb62984c0f84088f11881e5ad2783388557b
|
[
"ECL-2.0",
"Apache-2.0"
] | 122
|
2016-04-18T15:31:24.000Z
|
2019-01-29T14:00:58.000Z
|
unit_tests/lib/test_etcd_lib.py
|
hemanthnakkina/layer-etcd
|
ab21eb62984c0f84088f11881e5ad2783388557b
|
[
"ECL-2.0",
"Apache-2.0"
] | 15
|
2016-03-21T10:32:15.000Z
|
2019-01-29T08:03:00.000Z
|
from charmhelpers.contrib.templating import jinja
from etcd_lib import render_grafana_dashboard
def test_render_grafana_dashboard():
"""Test loading of Grafana dashboard."""
datasource = "prometheus"
raw_template = (
'{{"panels": [{{"datasource": "{} - '
'Juju generated source"}}]}}'.format(datasource)
)
expected_dashboard = {
"panels": [{"datasource": "{} - Juju generated source".format(datasource)}]
}
jinja.render.return_value = raw_template
rendered_dashboard = render_grafana_dashboard(datasource)
assert rendered_dashboard == expected_dashboard
| 29.47619
| 83
| 0.691438
|
857600d2eb41422c26da1e62da3f99f5e01a3e87
| 973
|
py
|
Python
|
zipkin/api.py
|
MoiTux/python-zipkin
|
4c69a28a43176fe24a2aeac932c153258ff7e60a
|
[
"Apache-2.0"
] | null | null | null |
zipkin/api.py
|
MoiTux/python-zipkin
|
4c69a28a43176fe24a2aeac932c153258ff7e60a
|
[
"Apache-2.0"
] | null | null | null |
zipkin/api.py
|
MoiTux/python-zipkin
|
4c69a28a43176fe24a2aeac932c153258ff7e60a
|
[
"Apache-2.0"
] | null | null | null |
from .models import Annotation
from .thread import local
__all__ = ['trace', 'get_current_trace']
def trace(name):
""" A decorator that trace the decorated function """
if hasattr(name, '__call__'):
return trace(name.__name__)(name)
def func_decorator(func):
def wrapper(*args, **kwargs):
try:
try:
recording = True
trace = local().child(name)
annotation = Annotation.server_recv()
trace.record(annotation)
except Exception:
recording = False
return func(*args, **kwargs)
finally:
if recording:
trace.record(Annotation.server_send())
local().pop()
return wrapper
return func_decorator
def get_current_trace():
return local().current
def stack_trace(trace):
return local().append(trace)
| 24.325
| 58
| 0.539568
|
1b13b423c3dfa188fbbcd18d322fdde56b91c9b9
| 9,476
|
py
|
Python
|
stumpy/aamp.py
|
tylerwmarrs/stumpy
|
150d8e2af00dd199821179ca6a7d71f7adbaf726
|
[
"BSD-3-Clause"
] | null | null | null |
stumpy/aamp.py
|
tylerwmarrs/stumpy
|
150d8e2af00dd199821179ca6a7d71f7adbaf726
|
[
"BSD-3-Clause"
] | null | null | null |
stumpy/aamp.py
|
tylerwmarrs/stumpy
|
150d8e2af00dd199821179ca6a7d71f7adbaf726
|
[
"BSD-3-Clause"
] | null | null | null |
# STUMPY
# Copyright 2019 TD Ameritrade. Released under the terms of the 3-Clause BSD license.
# STUMPY is a trademark of TD Ameritrade IP Company, Inc. All rights reserved.
import logging
import numpy as np
from numba import njit, prange, config
from . import core
from stumpy.config import STUMPY_D_SQUARED_THRESHOLD
logger = logging.getLogger(__name__)
@njit(fastmath=True)
def _compute_diagonal(
T_A,
T_B,
m,
T_A_subseq_isfinite,
T_B_subseq_isfinite,
diags,
diags_start_idx,
diags_stop_idx,
thread_idx,
P,
I,
ignore_trivial,
):
"""
Compute (Numba JIT-compiled) and update P, I along a single diagonal using a single
thread and avoiding race conditions
Parameters
----------
T_A : ndarray
The time series or sequence for which to compute the matrix profile
T_B : ndarray
The time series or sequence that will be used to annotate T_A. For every
subsequence in T_A, its nearest neighbor in T_B will be recorded.
m : int
Window size
P : ndarray
Matrix profile
I : ndarray
Matrix profile indices
T_A_subseq_isfinite : ndarray
A boolean array that indicates whether a subsequence in `T_A` contains a
`np.nan`/`np.inf` value (False)
T_B_subseq_isfinite : ndarray
A boolean array that indicates whether a subsequence in `T_B` contains a
`np.nan`/`np.inf` value (False)
diags : ndarray
The diag of diagonals to process and compute
diags_start_idx : int
The start index for a range of diagonal diag to process and compute
diags_stop_idx : int
The (exclusive) stop index for a range of diagonal diag to process and compute
thread_idx : int
The thread index
ignore_trivial : bool
Set to `True` if this is a self-join. Otherwise, for AB-join, set this to
`False`. Default is `True`.
Returns
-------
None
"""
n_A = T_A.shape[0]
n_B = T_B.shape[0]
for diag_idx in range(diags_start_idx, diags_stop_idx):
k = diags[diag_idx]
if k >= 0:
iter_range = range(0, min(n_A - m + 1, n_B - m + 1 - k))
else:
iter_range = range(-k, min(n_A - m + 1, n_B - m + 1 - k))
for i in iter_range:
if i == 0 or i == k or (k < 0 and i == -k):
D_squared = np.linalg.norm(T_B[i + k : i + k + m] - T_A[i : i + m]) ** 2
else:
D_squared = np.abs(
D_squared
- (T_B[i + k - 1] - T_A[i - 1]) ** 2
+ (T_B[i + k + m - 1] - T_A[i + m - 1]) ** 2
)
if D_squared < STUMPY_D_SQUARED_THRESHOLD:
D_squared = 0.0
if T_A_subseq_isfinite[i] and T_B_subseq_isfinite[i + k]:
# Neither subsequence contains NaNs
if D_squared < P[thread_idx, i, 0]:
P[thread_idx, i, 0] = D_squared
I[thread_idx, i, 0] = i + k
if ignore_trivial:
if D_squared < P[thread_idx, i + k, 0]:
P[thread_idx, i + k, 0] = D_squared
I[thread_idx, i + k, 0] = i
if i < i + k:
# left matrix profile and left matrix profile index
if D_squared < P[thread_idx, i + k, 1]:
P[thread_idx, i + k, 1] = D_squared
I[thread_idx, i + k, 1] = i
# right matrix profile and right matrix profile index
if D_squared < P[thread_idx, i, 2]:
P[thread_idx, i, 2] = D_squared
I[thread_idx, i, 2] = i + k
return
@njit(parallel=True, fastmath=True)
def _aamp(
T_A,
T_B,
m,
T_A_subseq_isfinite,
T_B_subseq_isfinite,
diags,
ignore_trivial,
):
"""
A Numba JIT-compiled version of AAMP for parallel computation of the matrix
profile and matrix profile indices.
Parameters
----------
T_A : ndarray
The time series or sequence for which to compute the matrix profile
T_B : ndarray
The time series or sequence that will be used to annotate T_A. For every
subsequence in T_A, its nearest neighbor in T_B will be recorded.
m : int
Window size
T_A_subseq_isfinite : ndarray
A boolean array that indicates whether a subsequence in `T_A` contains a
`np.nan`/`np.inf` value (False)
T_B_subseq_isfinite : ndarray
A boolean array that indicates whether a subsequence in `T_B` contains a
`np.nan`/`np.inf` value (False)
diags : ndarray
The diag of diagonals to process and compute
ignore_trivial : bool
Set to `True` if this is a self-join. Otherwise, for AB-join, set this to
`False`. Default is `True`.
Returns
-------
P : ndarray
Matrix profile
I : ndarray
Matrix profile indices
Notes
-----
`DOI: 10.1109/ICDM.2018.00099 \
<https://www.cs.ucr.edu/~eamonn/SCRIMP_ICDM_camera_ready_updated.pdf>`__
See Algorithm 1
"""
n_A = T_A.shape[0]
n_B = T_B.shape[0]
l = n_A - m + 1
n_threads = config.NUMBA_NUM_THREADS
P = np.full((n_threads, l, 3), np.inf)
I = np.full((n_threads, l, 3), -1, np.int64)
ndist_counts = core._count_diagonal_ndist(diags, m, n_A, n_B)
diags_ranges = core._get_array_ranges(ndist_counts, n_threads)
for thread_idx in prange(n_threads):
# Compute and update P, I within a single thread while avoiding race conditions
_compute_diagonal(
T_A,
T_B,
m,
T_A_subseq_isfinite,
T_B_subseq_isfinite,
diags,
diags_ranges[thread_idx, 0],
diags_ranges[thread_idx, 1],
thread_idx,
P,
I,
ignore_trivial,
)
# Reduction of results from all threads
for thread_idx in range(1, n_threads):
for i in prange(l):
if P[0, i, 0] > P[thread_idx, i, 0]:
P[0, i, 0] = P[thread_idx, i, 0]
I[0, i, 0] = I[thread_idx, i, 0]
# left matrix profile and left matrix profile indices
if P[0, i, 1] > P[thread_idx, i, 1]:
P[0, i, 1] = P[thread_idx, i, 1]
I[0, i, 1] = I[thread_idx, i, 1]
# right matrix profile and right matrix profile indices
if P[0, i, 2] > P[thread_idx, i, 2]:
P[0, i, 2] = P[thread_idx, i, 2]
I[0, i, 2] = I[thread_idx, i, 2]
return np.sqrt(P[0, :, :]), I[0, :, :]
def aamp(T_A, m, T_B=None, ignore_trivial=True):
"""
Compute the non-normalized (i.e., without z-normalization) matrix profile
This is a convenience wrapper around the Numba JIT-compiled parallelized
`_aamp` function which computes the matrix profile according to AAMP.
Parameters
----------
T_A : ndarray
The time series or sequence for which to compute the matrix profile
m : int
Window size
T_B : ndarray, default None
The time series or sequence that will be used to annotate T_A. For every
subsequence in T_A, its nearest neighbor in T_B will be recorded. Default is
`None` which corresponds to a self-join.
ignore_trivial : bool, default True
Set to `True` if this is a self-join. Otherwise, for AB-join, set this
to `False`. Default is `True`.
Returns
-------
out : ndarray
The first column consists of the matrix profile, the second column
consists of the matrix profile indices.
Notes
-----
`arXiv:1901.05708 \
<https://arxiv.org/pdf/1901.05708.pdf>`__
See Algorithm 1
Note that we have extended this algorithm for AB-joins as well.
"""
if T_B is None:
T_B = T_A.copy()
ignore_trivial = True
T_A, T_A_subseq_isfinite = core.preprocess_non_normalized(T_A, m)
T_B, T_B_subseq_isfinite = core.preprocess_non_normalized(T_B, m)
if T_A.ndim != 1: # pragma: no cover
raise ValueError(f"T_A is {T_A.ndim}-dimensional and must be 1-dimensional. ")
if T_B.ndim != 1: # pragma: no cover
raise ValueError(f"T_B is {T_B.ndim}-dimensional and must be 1-dimensional. ")
core.check_window_size(m)
if ignore_trivial is False and core.are_arrays_equal(T_A, T_B): # pragma: no cover
logger.warning("Arrays T_A, T_B are equal, which implies a self-join.")
logger.warning("Try setting `ignore_trivial = True`.")
if ignore_trivial and core.are_arrays_equal(T_A, T_B) is False: # pragma: no cover
logger.warning("Arrays T_A, T_B are not equal, which implies an AB-join.")
logger.warning("Try setting `ignore_trivial = False`.")
n_A = T_A.shape[0]
n_B = T_B.shape[0]
l = n_A - m + 1
excl_zone = int(np.ceil(m / 4))
out = np.empty((l, 4), dtype=object)
if ignore_trivial:
diags = np.arange(excl_zone + 1, n_A - m + 1)
else:
diags = np.arange(-(n_A - m + 1) + 1, n_B - m + 1)
P, I = _aamp(
T_A,
T_B,
m,
T_A_subseq_isfinite,
T_B_subseq_isfinite,
diags,
ignore_trivial,
)
out[:, 0] = P[:, 0]
out[:, 1:] = I[:, :]
return out
| 29.705329
| 88
| 0.576931
|
4dd541e9bdbb57b9f73aaa5803c4999b63d00302
| 11,947
|
py
|
Python
|
python/tvm/tir/ir_builder.py
|
retamia/tvm
|
5d25dc54d874bf2ddf0e8cf34c4748e9e2656fd8
|
[
"Apache-2.0"
] | 5
|
2020-06-19T03:22:24.000Z
|
2021-03-17T22:16:48.000Z
|
python/tvm/tir/ir_builder.py
|
retamia/tvm
|
5d25dc54d874bf2ddf0e8cf34c4748e9e2656fd8
|
[
"Apache-2.0"
] | 2
|
2020-07-08T12:34:59.000Z
|
2020-07-11T15:54:47.000Z
|
python/tvm/tir/ir_builder.py
|
retamia/tvm
|
5d25dc54d874bf2ddf0e8cf34c4748e9e2656fd8
|
[
"Apache-2.0"
] | 1
|
2020-03-10T17:11:57.000Z
|
2020-03-10T17:11:57.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Developer API of IR node builder make function."""
from tvm._ffi.base import string_types
from tvm.runtime import ObjectGeneric, DataType, convert, const
from tvm.ir import container as _container
from . import stmt as _stmt
from . import expr as _expr
class WithScope(object):
"""Auxiliary scope with"""
def __init__(self, enter_value, exit_cb):
self._enter_value = enter_value
self._exit_cb = exit_cb
def __enter__(self):
return self._enter_value
def __exit__(self, ptype, value, trace):
self._exit_cb()
class BufferVar(ObjectGeneric):
"""Buffer variable with content type, makes load store easily.
Do not create it directly, create use IRBuilder.
Examples
--------
In the follow example, x is BufferVar.
:code:`x[0] = ...` directly emit a store to the IRBuilder,
:code:`x[10]` translates to Load.
.. code-block:: python
# The following code generate IR for x[0] = x[
ib = tvm.tir.ir_builder.create()
x = ib.pointer("float32")
x[0] = x[10] + 1
See Also
--------
IRBuilder.pointer
IRBuilder.buffer_ptr
IRBuilder.allocate
"""
def __init__(self, builder, buffer_var, content_type):
self._builder = builder
self._buffer_var = buffer_var
self._content_type = content_type
def asobject(self):
return self._buffer_var
@property
def dtype(self):
return self._content_type
def __getitem__(self, index):
t = DataType(self._content_type)
if t.lanes > 1:
base = index * t.lanes
index = _expr.Ramp(base, const(1, base.dtype), t.lanes)
return _expr.Load(self._content_type, self._buffer_var, index)
def __setitem__(self, index, value):
value = convert(value)
if value.dtype != self._content_type:
raise ValueError(
"data type does not match content type %s vs %s" % (
value.dtype, self._content_type))
t = DataType(self._content_type)
if t.lanes > 1:
base = index * t.lanes
index = _expr.Ramp(base, const(1, base.dtype), t.lanes)
self._builder.emit(_stmt.Store(self._buffer_var, value, index))
class IRBuilder(object):
"""Auxiliary builder to build IR for testing and dev.
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
n = te.var("n")
A = ib.allocate("float32", n, name="A")
with ib.for_range(0, n, name="i") as i:
with ib.if_scope((i % 2) == 0):
A[i] = A[i] + 1
# The result stmt.
stmt = ib.get()
"""
def __init__(self):
self._seq_stack = [[]]
self.nidx = 0
def _pop_seq(self):
"""Pop sequence from stack"""
seq = self._seq_stack.pop()
if not seq or callable(seq[-1]):
seq.append(_stmt.Evaluate(0))
seqwrap = lambda x: x[0] if len(x) == 1 else _stmt.SeqStmt(list(reversed(x)))
ret_seq = [seq[-1]]
for s in reversed(seq[:-1]):
if callable(s):
ret_seq = [s(seqwrap(ret_seq))]
else:
assert isinstance(s, _stmt.Stmt)
ret_seq.append(s)
return seqwrap(ret_seq)
def emit(self, stmt):
"""Emit a statement to the end of current scope.
Parameters
----------
stmt : Stmt or callable.
The statement to be emitted or callable that build stmt given body.
"""
if isinstance(stmt, _expr.Call):
stmt = _stmt.Evaluate(stmt)
assert isinstance(stmt, _stmt.Stmt) or callable(stmt)
self._seq_stack[-1].append(stmt)
def scope_attr(self, node, attr_key, value):
"""Create an AttrStmt at current scope.
Parameters
----------
attr_key : str
The key of the attribute type.
node : Node
The attribute node to annottate on.
value : Expr
Attribute value.
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
i = te.var("i")
x = ib.pointer("float32")
ib.scope_attr(x, "storage_scope", "global")
x[i] = x[i - 1] + 1
"""
if isinstance(node, string_types):
node = _expr.StringImm(node)
if isinstance(value, string_types):
value = _expr.StringImm(value)
self.emit(lambda x: _stmt.AttrStmt(node, attr_key, value, x))
def for_range(self, begin, end, name="i", dtype="int32", for_type="serial"):
"""Create a for iteration scope.
Parameters
----------
begin : Expr
The min iteration scope.
end : Expr
The end iteration scope
name : str, optional
The name of iteration variable, if no input names,
using typical index names i, j, k, then i_nidx
dtype : str, optional
The data type of iteration variable.
for_type : str, optional
The special tag on the for loop.
Returns
-------
loop_scope : With.Scope of Var
The for scope, when enters returns loop_var
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
x = ib.pointer("float32")
with ib.for_range(1, 10, name="i") as i:
x[i] = x[i - 1] + 1
"""
if name == 'i':
name = chr(ord(name) + self.nidx) if self.nidx < 3 else name + "_" + str(self.nidx - 3)
self.nidx += 1
self._seq_stack.append([])
loop_var = _expr.Var(name, dtype=dtype)
extent = end if begin == 0 else (end - begin)
def _exit_cb():
if for_type == "serial":
for_type_id = 0
elif for_type == "parallel":
for_type_id = 1
elif for_type == "vectorize":
for_type_id = 2
elif for_type == "unroll":
for_type_id = 3
else:
raise ValueError("Unknown for_type")
self.emit(_stmt.For(
loop_var, begin, extent, for_type_id, 0, self._pop_seq()))
return WithScope(loop_var, _exit_cb)
def if_scope(self, cond):
"""Create an if scope.
Parameters
----------
cond : Expr
The condition.
Returns
-------
if_scope : WithScope
The result if scope.
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
i = te.var("i")
x = ib.pointer("float32")
with ib.if_scope((i % 2) == 0):
x[i] = x[i - 1] + 1
"""
self._seq_stack.append([])
def _exit_cb():
self.emit(_stmt.IfThenElse(cond, self._pop_seq(), None))
return WithScope(None, _exit_cb)
def else_scope(self):
"""Create an else scope.
This can only be used right after an if scope.
Returns
-------
else_scope : WithScope
The result else scope.
Examples
--------
.. code-block:: python
ib = tvm.tir.ir_builder.create()
i = te.var("i")
x = ib.pointer("float32")
with ib.if_scope((i % 2) == 0):
x[i] = x[i - 1] + 1
with ib.else_scope():
x[i] = x[i - 1] + 2
"""
if not self._seq_stack[-1]:
raise RuntimeError("else_scope can only follow an if_scope")
prev = self._seq_stack[-1][-1]
if not isinstance(prev, _stmt.IfThenElse) or prev.else_case:
raise RuntimeError("else_scope can only follow an if_scope")
self._seq_stack[-1].pop()
self._seq_stack.append([])
def _exit_cb():
self.emit(_stmt.IfThenElse(prev.condition, prev.then_case, self._pop_seq()))
return WithScope(None, _exit_cb)
def new_scope(self):
"""Create new scope,
this is useful to set boundary of attr and allocate.
Returns
-------
new_scope : WithScope
The result new scope.
"""
self._seq_stack.append([])
def _exit_cb():
self.emit(self._pop_seq())
return WithScope(None, _exit_cb)
def allocate(self, dtype, shape, name="buf", scope=None):
"""Create a allocate statement.
Parameters
----------
dtype : str
The content data type.
shape : tuple of Expr
The shape of array to be allocated.
name : str, optional
The name of the buffer.
scope : str, optional
The scope of the buffer.
Returns
-------
buffer : BufferVar
The buffer var representing the buffer.
"""
buffer_var = _expr.Var(name, dtype="handle")
if not isinstance(shape, (list, tuple, _container.Array)):
shape = [shape]
if scope:
self.scope_attr(buffer_var, "storage_scope", scope)
self.emit(lambda x: _stmt.Allocate(
buffer_var, dtype, shape, const(1, dtype="uint1"), x))
return BufferVar(self, buffer_var, dtype)
def pointer(self, content_type, name="ptr"):
"""Create pointer variable with content type.
Parameters
----------
content_type : str
The content data type.
name : str, optional
The name of the pointer.
Returns
-------
ptr : BufferVar
The buffer var representing the buffer.
"""
buffer_var = _expr.Var(name, dtype="handle")
return BufferVar(self, buffer_var, content_type)
def buffer_ptr(self, buf):
"""Create pointer variable corresponds to buffer ptr.
Parameters
----------
buf : Buffer
The buffer to be extracted.
Returns
-------
ptr : BufferVar
The buffer var representing the buffer.
"""
return BufferVar(self, buf.data, buf.dtype)
def likely(self, expr):
"""Add likely tag for expression.
Parameters
----------
expr : Expr
The expression. Usually a condition expression.
Returns
-------
expr : Expr
The expression will likely tag.
"""
return _expr.Call(expr.dtype, "likely", [expr],
_expr.Call.PureIntrinsic, None, 0)
def get(self):
"""Return the builded IR.
Returns
-------
stmt : Stmt
The result statement.
"""
seq = self._pop_seq()
if self._seq_stack:
raise RuntimeError("cannot call get inside construction scope")
return seq
def create():
"""Create a new IRBuilder
Returns
-------
builder : IRBuilder
The created IRBuilder
"""
return IRBuilder()
| 29.281863
| 99
| 0.549929
|
f578098550e6f794fc9125c46f5c1258931ac891
| 1,073
|
py
|
Python
|
sample_code/Python_NLP/nlp-6-1-context.py
|
ice-melt/python-lib
|
345e34fff7386d91acbb03a01fd4127c5dfed037
|
[
"MIT"
] | null | null | null |
sample_code/Python_NLP/nlp-6-1-context.py
|
ice-melt/python-lib
|
345e34fff7386d91acbb03a01fd4127c5dfed037
|
[
"MIT"
] | null | null | null |
sample_code/Python_NLP/nlp-6-1-context.py
|
ice-melt/python-lib
|
345e34fff7386d91acbb03a01fd4127c5dfed037
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@Author : ice-melt@outlook.com
@File : nlp-6-1-context.py
@Time : 2019/5/6 18:17
@Version : 1.0
@Desc : None
"""
from nltk.corpus import brown
import nltk
def pos_features(sentence, i):
features = {'suffix(1)': sentence[i][-1:],
'suffix(2)': sentence[i][-2:],
'suffix(3)': sentence[i][-3:]}
if i == 0:
features["prev-word"] = "<START>"
else:
features['prev-word'] = sentence[i - 1]
return features
print(brown.sents()[0])
pos_features(brown.sents()[0], 8)
tagged_sents = brown.tagged_sents(categories='news')
featuresets = []
for tagged_sent in tagged_sents:
untagged_sent = nltk.tag.untag(tagged_sent)
for i, (word, tag) in enumerate(tagged_sent):
featuresets.append((pos_features(untagged_sent, i), tag))
size = int(len(featuresets) * 0.1)
train_set, test_set = featuresets[size:], featuresets[:size]
classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(classifier, test_set))
| 27.512821
| 65
| 0.635601
|
00df1d5e3b22c14ae453468eff5b8dfe62853639
| 1,745
|
py
|
Python
|
setup.py
|
amane-katagiri/maruberu
|
dec81e6ac97022e0f99090a0b9919c60736e2445
|
[
"MIT"
] | null | null | null |
setup.py
|
amane-katagiri/maruberu
|
dec81e6ac97022e0f99090a0b9919c60736e2445
|
[
"MIT"
] | 1
|
2019-10-04T02:13:04.000Z
|
2019-10-04T02:13:04.000Z
|
setup.py
|
amane-katagiri/maruberu
|
dec81e6ac97022e0f99090a0b9919c60736e2445
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Setup file for Distutils."""
from logging import DEBUG
from logging import getLogger
from logging import StreamHandler
import os
import maruberu
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handler)
try:
from setuptools import find_packages
from setuptools import setup
except ImportError:
logger.critical("Please install setuptools.")
def main() -> None:
"""Setup package after read meta information from files."""
long_description = "You can ring my round bell."
if os.path.exists("README.rst"):
with open("README.rst") as f:
long_description = f.read()
install_requires = []
if os.path.exists("requirements.txt"):
with open("requirements.txt") as f:
install_requires = f.read().split()
setup(name="maruberu",
version=maruberu.__version__,
description="You can ring my round bell.",
long_description=long_description,
license="MIT",
author="Amane Katagiri",
author_email="amane@ama.ne.jp",
url="https://github.com/amane-katagiri/maruberu",
keywords="server bell alert",
install_requires=[
] + install_requires,
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
"Framework :: Tornado",
],
packages=find_packages(),
entry_points="""
[console_scripts]
maruberu = maruberu.main:main
""", )
if __name__ == "__main__":
main()
| 27.698413
| 63
| 0.6149
|
e608727db0b7328d763feb5a9b9ef1db828b6a99
| 898
|
py
|
Python
|
tests/conftest.py
|
pylucknow/open-community-api
|
cf7cbdb977bfdd4ca047f7efc6eb895c22417146
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
pylucknow/open-community-api
|
cf7cbdb977bfdd4ca047f7efc6eb895c22417146
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
pylucknow/open-community-api
|
cf7cbdb977bfdd4ca047f7efc6eb895c22417146
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Defines fixtures available to all tests."""
import pytest
from webtest import TestApp
from oca.app import create_app
from oca.database import db as _db
from .factories import UserFactory
@pytest.fixture
def app():
"""Create application for the tests."""
_app = create_app("tests.settings")
ctx = _app.test_request_context()
ctx.push()
yield _app
ctx.pop()
@pytest.fixture
def testapp(app):
"""Create Webtest app."""
return TestApp(app)
@pytest.fixture
def db(app):
"""Create database for the tests."""
_db.app = app
with app.app_context():
_db.create_all()
yield _db
# Explicitly close DB connection
_db.session.close()
_db.drop_all()
@pytest.fixture
def user(db):
"""Create user for the tests."""
user = UserFactory(password="myprecious")
db.session.commit()
return user
| 17.607843
| 46
| 0.660356
|
7eba8235654766e7076e934e38b5ab73627887e1
| 219
|
py
|
Python
|
tests/test_day13.py
|
TobiasRoeding/advent-of-code-2021
|
3db16d52ad9f4f04ac7f43087f6f504dca41cc43
|
[
"Unlicense"
] | null | null | null |
tests/test_day13.py
|
TobiasRoeding/advent-of-code-2021
|
3db16d52ad9f4f04ac7f43087f6f504dca41cc43
|
[
"Unlicense"
] | null | null | null |
tests/test_day13.py
|
TobiasRoeding/advent-of-code-2021
|
3db16d52ad9f4f04ac7f43087f6f504dca41cc43
|
[
"Unlicense"
] | null | null | null |
from src.day13 import Day13 as DayX
INPUT = "tests/inputs/day13.txt"
def test_init():
DayX(INPUT)
def test_part1():
assert DayX(INPUT).part1() == 17
def test_part2():
assert DayX(INPUT).part2() == 16
| 13.6875
| 36
| 0.657534
|
0229e86d0b082143909d65d92b8fa8623c88bb08
| 2,295
|
py
|
Python
|
python/test/test_shuttle_api.py
|
openlattice/api-clients
|
1d5be9861785b295089b732f37464e31bf80c8ca
|
[
"Apache-2.0"
] | null | null | null |
python/test/test_shuttle_api.py
|
openlattice/api-clients
|
1d5be9861785b295089b732f37464e31bf80c8ca
|
[
"Apache-2.0"
] | 1
|
2021-01-20T00:20:01.000Z
|
2021-01-20T00:20:01.000Z
|
python/test/test_shuttle_api.py
|
openlattice/api-clients
|
1d5be9861785b295089b732f37464e31bf80c8ca
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
OpenLattice API
OpenLattice API # noqa: E501
The version of the OpenAPI document: 0.0.1
Contact: support@openlattice.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import openlattice
from openlattice.api.shuttle_api import ShuttleApi # noqa: E501
from openlattice.rest import ApiException
class TestShuttleApi(unittest.TestCase):
"""ShuttleApi unit test stubs"""
def setUp(self):
self.api = openlattice.api.shuttle_api.ShuttleApi() # noqa: E501
def tearDown(self):
pass
def test_create_integration_definition(self):
"""Test case for create_integration_definition
Creates a new integration definition for running recurring integrations # noqa: E501
"""
pass
def test_delete_integration_definition(self):
"""Test case for delete_integration_definition
Replaces any number of fields within an existing integration definition # noqa: E501
"""
pass
def test_delete_integration_job_status(self):
"""Test case for delete_integration_job_status
Deletes an integration job status from the integrationJobs map # noqa: E501
"""
pass
def test_enqueue_integration(self):
"""Test case for enqueue_integration
Enqueues an integration on Shuttle Server for a given integration # noqa: E501
"""
pass
def test_poll_all_integrations(self):
"""Test case for poll_all_integrations
Polls the statuses of all running integrations # noqa: E501
"""
pass
def test_poll_integration(self):
"""Test case for poll_integration
Polls the status of an integration # noqa: E501
"""
pass
def test_read_integration_definition(self):
"""Test case for read_integration_definition
Gets an existing integration definition # noqa: E501
"""
pass
def test_update_integration_definition(self):
"""Test case for update_integration_definition
Replaces any number of fields within an existing integration definition # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| 25.21978
| 93
| 0.674074
|
d9a9afa2e7bed21f15be6d08d662ef312cb77aa2
| 2,669
|
py
|
Python
|
runtime/databricks/automl_runtime/forecast/utils.py
|
wenfeiy-db/automl
|
f63e259b0db0a19ca87c3ea851ab749e3ab57c3b
|
[
"Apache-2.0"
] | null | null | null |
runtime/databricks/automl_runtime/forecast/utils.py
|
wenfeiy-db/automl
|
f63e259b0db0a19ca87c3ea851ab749e3ab57c3b
|
[
"Apache-2.0"
] | null | null | null |
runtime/databricks/automl_runtime/forecast/utils.py
|
wenfeiy-db/automl
|
f63e259b0db0a19ca87c3ea851ab749e3ab57c3b
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (C) 2022 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import List
import pandas as pd
def generate_cutoffs(df: pd.DataFrame, horizon: int, unit: str,
seasonal_period: int, num_folds: int) -> List[pd.Timestamp]:
"""
Generate cutoff times for cross validation with the control of number of folds.
:param df: pd.DataFrame of the historical data
:param horizon: int number of time into the future for forecasting.
:param unit: frequency of the timeseries, which must be a pandas offset alias.
:param seasonal_period: length of the seasonality period in days
:param num_folds: int number of cutoffs for cross validation.
:return: list of pd.Timestamp cutoffs for corss-validation.
"""
period = max(0.5 * horizon, 1) # avoid empty cutoff buckets
period_timedelta = pd.to_timedelta(period, unit=unit)
horizon_timedelta = pd.to_timedelta(horizon, unit=unit)
seasonality_timedelta = pd.to_timedelta(seasonal_period, unit=unit)
initial = max(3 * horizon_timedelta, seasonality_timedelta)
# Last cutoff is "latest date in data - horizon_timedelta" date
cutoff = df["ds"].max() - horizon_timedelta
if cutoff < df["ds"].min():
raise ValueError("Less data than horizon.")
result = [cutoff]
while result[-1] >= min(df["ds"]) + initial and len(result) < num_folds:
cutoff -= period_timedelta
# If data does not exist in data range (cutoff, cutoff + horizon_timedelta]
if not (((df["ds"] > cutoff) & (df["ds"] <= cutoff + horizon_timedelta)).any()):
# Next cutoff point is "last date before cutoff in data - horizon_timedelta"
if cutoff > df["ds"].min():
closest_date = df[df["ds"] <= cutoff].max()["ds"]
cutoff = closest_date - horizon_timedelta
# else no data left, leave cutoff as is, it will be dropped.
result.append(cutoff)
result = result[:-1]
if len(result) == 0:
raise ValueError(
"Less data than horizon after initial window. Make horizon shorter."
)
return list(reversed(result))
| 43.048387
| 88
| 0.68003
|
5a5e41a0281c6ca8c88f6aff71a3165575ba152b
| 2,159
|
py
|
Python
|
Aggregation-Network/models/AttentionModel/rnn.py
|
King-HAW/DC-MT
|
16678dd8284d074527fe1f7ddd042739aece4ed1
|
[
"MIT"
] | 1
|
2022-02-17T07:26:18.000Z
|
2022-02-17T07:26:18.000Z
|
Aggregation-Network/models/AttentionModel/rnn.py
|
King-HAW/DC-MT
|
16678dd8284d074527fe1f7ddd042739aece4ed1
|
[
"MIT"
] | null | null | null |
Aggregation-Network/models/AttentionModel/rnn.py
|
King-HAW/DC-MT
|
16678dd8284d074527fe1f7ddd042739aece4ed1
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
class SimpleRNN(nn.Module):
def __init__(self, in_dim=128, hidden_dim=96, n_layers=2, drop_out=0.5, num_classes=1000):
super(SimpleRNN, self).__init__()
self.n_layers = n_layers
self.hidden_dim = hidden_dim
self.fea_conv = nn.Sequential(nn.Dropout2d(drop_out),
nn.Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Dropout2d(drop_out),
nn.Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=False),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Dropout2d(drop_out),
)
self.fea_lstm = nn.GRU(in_dim, hidden_dim, num_layers=self.n_layers, batch_first=True, bidirectional=True)
self.fea_first_final = nn.Sequential(nn.Conv2d(128, hidden_dim * 2, kernel_size=(24, 1), stride=(1, 1), padding=(0, 0), bias=True))
self.fea_secon_final = nn.Sequential(nn.Conv2d(hidden_dim * 2, hidden_dim * 2, kernel_size=(24, 1), stride=(1, 1), padding=(0, 0), bias=True))
self.classifier = nn.Linear(hidden_dim * 2, num_classes, bias=False)
def forward(self, data):
x, pro = data
bs, seq_len, fea_len = x.shape
x = x.permute(0, 2, 1).contiguous()
x = x.view(bs, fea_len, seq_len, 1).contiguous()
x_out = self.fea_conv(x)
x0 = self.fea_first_final(x_out)
x0 = x0.view(bs, -1).contiguous()
x_out = x_out.view(bs, 128, seq_len).contiguous()
x1 = x_out.permute(0, 2, 1).contiguous()
x1, _ = self.fea_lstm(x1)
x1 = x1.view(bs, 1, seq_len, self.hidden_dim * 2).contiguous()
x1 = x1.permute(0, 3, 2, 1).contiguous()
x1 = self.fea_secon_final(x1)
x1 = x1.view(bs, -1).contiguous()
out0 = x0 + x1
out = self.classifier(out0)
return out
| 46.934783
| 150
| 0.528022
|
1bc8ea4dcddcc42e46c1ddf066dc0565212b4cd1
| 1,398
|
py
|
Python
|
oarepo_sitemaps/version.py
|
Alzpeta/oarepo-sitemaps
|
9dbf119c1cd12d3f1ff707a411bf0a601a4a4ff6
|
[
"MIT"
] | null | null | null |
oarepo_sitemaps/version.py
|
Alzpeta/oarepo-sitemaps
|
9dbf119c1cd12d3f1ff707a411bf0a601a4a4ff6
|
[
"MIT"
] | null | null | null |
oarepo_sitemaps/version.py
|
Alzpeta/oarepo-sitemaps
|
9dbf119c1cd12d3f1ff707a411bf0a601a4a4ff6
|
[
"MIT"
] | 1
|
2021-05-24T08:11:07.000Z
|
2021-05-24T08:11:07.000Z
|
#
# Copyright (c) 2020 CESNET Prague.
#
# version.py is part of OAREPO repository
# (see https://github.com/oarepo).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""Version information for oarepo_sitemaps repository.
This file is imported by ``oarepo_sitemaps.__init__``,
and parsed by ``setup.py``.
"""
from __future__ import absolute_import, print_function
__version__ = '1.0.0'
| 41.117647
| 80
| 0.771102
|
fc2db3d58b50ef884bf24935a50e47ec56962536
| 11,550
|
py
|
Python
|
ebs_clone_decrypted/cli.py
|
koiker/ebs-clone-decrypted
|
0bfe25f56743ac4ee31a6ef3f12b1a742a9a8637
|
[
"Apache-2.0"
] | null | null | null |
ebs_clone_decrypted/cli.py
|
koiker/ebs-clone-decrypted
|
0bfe25f56743ac4ee31a6ef3f12b1a742a9a8637
|
[
"Apache-2.0"
] | null | null | null |
ebs_clone_decrypted/cli.py
|
koiker/ebs-clone-decrypted
|
0bfe25f56743ac4ee31a6ef3f12b1a742a9a8637
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Amazon AWS Clone an Encrypted EBS to a Decrypted one
Inputs:
- instance-id
- vol-id
1 - Start aux instance => Lambda
2 - Stop source instance => OK
3 - Snapshot source EBS => OK
4 - Mount source EBS => OK
5 - Create new EBS => OK
6 - Mount new EBS => OK
7 - Prepare source EBS to be copied
8 - Copy data
9 - Unmount source EBS
10 - Delete source EBS
11 - Unmount new EBS
12 - Attach new EBS to source instance
13 - Start source instance
14 - Stop aux instance
"""
import subprocess
import boto3
import click
import requests
class CloneEbsDecrypted:
def __init__(self, source_instance_id, source_volume_id, new_size):
# This class is a refactor of the original procedural code
# TODO: pass _snapshotON, _snapshot and other properties as parameters
# Main variables
self.source_instance_id = source_instance_id
self.source_volume_id = source_volume_id
self.new_size = new_size
self.local_instance_id = None
# Initialize Boto3 AWS
self.ec2_client = boto3.client('ec2')
self.ec2_resource = boto3.resource('ec2')
# Turn on/off backups
self._snapshotON = False
# Devices to be attached on this AUX instance
self.aux_source_device = '/dev/sds'
self.aux_target_device = '/dev/sdt'
# Generated snapshot
self._snapshot = None
self.source_device = None
self.new_volume_id = None
# Stop instance
def stop_instance(self, instance_id):
print("Instance %s Stopping..." % instance_id)
self.ec2_client.stop_instances(InstanceIds=[instance_id])
stop_waiter = self.ec2_client.get_waiter('instance_stopped')
stop_waiter.wait(InstanceIds=[instance_id])
print("Instance %s Stopped." % instance_id)
# Generate Snapshot for source volume
def snapshot(self, volume_id):
source_volume = self.ec2_resource.Volume(volume_id)
if self._snapshotON:
print("Snapshot started... %s" % source_volume)
self._snapshot = source_volume.create_snapshot(
Description='EBS-CLONE',
TagSpecifications=[
{'ResourceType': 'snapshot', 'Tags': [{'Key': 'EBS-CLONE', 'Value': 'EBS-CLONE-BKP'}]}
]
)
snapshot_waiter = self.ec2_client.get_waiter('snapshot_completed')
snapshot_waiter.wait(Filters=[{'Name': 'volume-id', 'Values': [volume_id]}])
print("Snapshot completed: %s" % self._snapshot.id)
# Detach source EBS from source Instance
def detach_volume(self, detach_volume_id, detach_instance_id):
source_volume = self.ec2_resource.Volume(detach_volume_id)
attached_volumes = filter(
lambda attach: attach['State'] == 'attached' and attach['InstanceId'] == detach_instance_id,
source_volume.attachments)
if len(attached_volumes) > 0:
source_device = attached_volumes.pop()['Device']
print("Volume %s is being detached..." % detach_volume_id)
source_volume.detach_from_instance(Device=source_device, InstanceId=detach_instance_id)
waiter_detach = self.ec2_client.get_waiter('volume_available')
waiter_detach.wait(VolumeIds=[detach_volume_id])
print("Volume %s detached." % detach_volume_id)
return source_device
# Attach source EBS to Instance
def attach_volume(self, attach_volume_id, attach_instance_id, attach_device):
source_volume = self.ec2_resource.Volume(attach_volume_id)
attached_volumes = filter(lambda attach: attach['State'] == 'attached', source_volume.attachments)
if len(attached_volumes) == 0:
print("Volume %s is being attached to instance %s at device %s..." % (
attach_volume_id, attach_instance_id, attach_device))
response_attach = source_volume.attach_to_instance(Device=attach_device, InstanceId=attach_instance_id)
waiter_attach = self.ec2_client.get_waiter('volume_in_use')
waiter_attach.wait(VolumeIds=[attach_volume_id],
Filters=[{'Name': 'attachment.status', 'Values': ['attached']}])
print("Volume %s attached to instance %s at device %s." % (
attach_volume_id, attach_instance_id, attach_device))
# Create EBS From Volume or Snapshot
def create_volume_from_existing_volume(self, volume_id, new_size=None, snapshot_id=None):
source_volume = self.ec2_resource.Volume(volume_id)
if new_size is None:
new_size = source_volume.size
# ST1 and SC1 min size is 500
if source_volume.volume_type in ('sc1', 'st1') and new_size < 500:
new_size = 500
new_volume_dict = {
'AvailabilityZone': source_volume.availability_zone,
'Encrypted': None,
'Iops': source_volume.iops,
'KmsKeyId': None,
'Size': new_size,
'VolumeType': source_volume.volume_type,
'TagSpecifications': [{'ResourceType': 'volume', 'Tags': self.create_tag_specifications(volume_id)}]
}
# Remove None attributes
new_volume_dict = dict(filter(lambda item: item[1] is not None, new_volume_dict.items()))
# Remove iops attribute if creating GP2
is_gp2 = False
if source_volume.volume_type == 'gp2':
is_gp2 = True
new_volume_dict = dict((k, v) for k, v in new_volume_dict.iteritems() if k != 'Iops' or not is_gp2)
# Remove Size and add Snapshot if from snapshot
if snapshot_id is not None:
del new_volume_dict['Size']
new_volume_dict['SnapshotId'] = snapshot_id
print("Creating new EBS volume... %s" % new_volume_dict)
response_create_volume = self.ec2_client.create_volume(**new_volume_dict)
new_volume_id = response_create_volume['VolumeId']
waiter_create_volume = self.ec2_client.get_waiter('volume_available')
waiter_create_volume.wait(VolumeIds=[new_volume_id])
print("New EBS created: \n%s" % response_create_volume)
return new_volume_id
# Delete source Volume
def delete_volume(self, volume_id):
print("Volume %s is being deleted..." % volume_id)
response_delete_volume = self.ec2_client.delete_volume(VolumeId=volume_id)
print("Volume %s deleted." % volume_id)
# Create new TAG set for volume with EBS-CLONE as key
def create_tag_specifications(self, local_source_volume_id, new_tag_value="EBS-CLONE-CREATED"):
local_source_volume = self.ec2_resource.Volume(local_source_volume_id)
local_new_tags = None
# Add a new tag
if local_source_volume.tags is not None and any(d['Key'] == 'EBS-CLONE' for d in local_source_volume.tags):
local_new_tags = local_source_volume.tags
else:
local_new_tags = [{'Value': new_tag_value, 'Key': 'EBS-CLONE'}]
if local_source_volume.tags:
local_new_tags = local_source_volume.tags + local_new_tags
return local_new_tags
# Prepare and Copy source volume
def prepare_and_copy_volume(self, source_device, target_device):
output = None
# Copy the volume (clone partitions)
try:
print("Start to copy device %s to %s ..." % (source_device, target_device))
output = subprocess.check_output(["sudo", "dd", "bs=16M", "if=" + source_device, "of=" + target_device,
"status=progress", "oflag=direct"])
print(output)
output = subprocess.check_output(["sync"])
print(output)
except subprocess.CalledProcessError as e:
output = e.output
print(output)
self.rollback()
exit(-1)
# Start source instance
def start_instance(self, instance_id):
print("Source instance %s Starting..." % instance_id)
self.ec2_client.start_instances(InstanceIds=[instance_id])
stop_waiter = self.ec2_client.get_waiter('instance_running')
stop_waiter.wait(InstanceIds=[instance_id])
print("Source instance %s Started." % instance_id)
# Rollback: recover snapshot and start instance
def rollback(self):
print("Rollback started...")
source_volume = self.ec2_resource.Volume(self.source_volume_id)
# Create new EBS volume from backup
# newVolumeId = create_volume_from_existing_volume(volumeId = _source_volume_id, snapshotId = self._snapshot.id)
# Detach and attach source_volume
if self.source_volume_id:
self.detach_volume(self.source_volume_id, self.local_instance_id)
# Attach new EBS to original Instance
self.attach_volume(attach_volume_id=self.source_volume_id,
attach_instance_id=self.source_instance_id,
attach_device=self.source_device)
self.start_instance(self.source_instance_id)
# Detach and delete generated volume
if self.new_volume_id:
self.detach_volume(self.new_volume_id, self.local_instance_id)
self.delete_volume(self.new_volume_id)
print("Rollback finished.")
exit(-1)
def run(self):
# Retrieve AUX instance info
local_instance_id = requests.get('http://169.254.169.254/latest/meta-data/instance-id').text
# Stop source instance
self.stop_instance(instance_id=self.source_instance_id)
# Generate Snapshot for source volume
self.snapshot(self.source_volume_id)
# Detach source EBS from source Instance
source_device = self.detach_volume(self.source_volume_id, self.source_instance_id)
# Attach source EBS to AUX Instance
self.attach_volume(self.source_volume_id, local_instance_id, self.aux_source_device)
# Create new reduced EBS volume
new_volume_id = self.create_volume_from_existing_volume(volume_id=self.source_volume_id)
# Attach new EBS to AUX Instance
self.attach_volume(new_volume_id, local_instance_id, self.aux_target_device)
# Prepare and Copy source volume to new reduced one
self.prepare_and_copy_volume(self.aux_source_device, self.aux_target_device)
# Detach new EBS from AUX Instance
self.detach_volume(new_volume_id, local_instance_id)
# Attach new EBS to original Instance
self.attach_volume(new_volume_id, self.source_instance_id, source_device)
# Start source instance
self.start_instance(self.source_instance_id)
# Detach source EBS from AUX Instance
self.detach_volume(self.source_volume_id, local_instance_id)
# Delete source Volume
self.delete_volume(self.source_volume_id)
# Inputs
@click.command()
@click.option('-si', '--instance-id', help='Source Instance Id. Ex: i-095c3fa3d1688eaa3 ')
@click.option('-sv', '--volume-id', help='Source Volume Id. Ex: vol-0a49b7a908e747385')
@click.option('-ns', '--new-size', help='New Size. Default will use the source size')
def main(**kwargs):
source_instance_id = kwargs.pop('instance_id')
source_volume_id = kwargs.pop('volume_id')
new_size = kwargs.pop('new_size')
decrypt_ebs = CloneEbsDecrypted(source_instance_id, source_volume_id, new_size)
decrypt_ebs.run()
if __name__ == "__main__":
main()
| 39.827586
| 120
| 0.661385
|
72185f288a9ad15f18fb64a38bcde4ec58ea0fd1
| 13,476
|
py
|
Python
|
neutron/services/network_segment_range/plugin.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | null | null | null |
neutron/services/network_segment_range/plugin.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | null | null | null |
neutron/services/network_segment_range/plugin.py
|
knodir/neutron
|
ac4e28478ac8a8a0c9f5c5785f6a6bcf532c66b8
|
[
"Apache-2.0"
] | 1
|
2019-03-13T17:05:02.000Z
|
2019-03-13T17:05:02.000Z
|
# Copyright (c) 2019 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import network_segment_range as range_def
from neutron_lib import constants as const
from neutron_lib.db import api as db_api
from neutron_lib import exceptions as lib_exc
from neutron_lib.exceptions import network_segment_range as range_exc
from neutron_lib.plugins import directory
from neutron_lib.plugins import utils as plugin_utils
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log
import six
from neutron._i18n import _
from neutron.db import segments_db
from neutron.extensions import network_segment_range as ext_range
from neutron.objects import base as base_obj
from neutron.objects import network_segment_range as obj_network_segment_range
LOG = log.getLogger(__name__)
def is_network_segment_range_enabled():
network_segment_range_class = ('neutron.services.network_segment_range.'
'plugin.NetworkSegmentRangePlugin')
return any(p in cfg.CONF.service_plugins
for p in ['network_segment_range', network_segment_range_class])
class NetworkSegmentRangePlugin(ext_range.NetworkSegmentRangePluginBase):
"""Implements Neutron Network Segment Range Service plugin."""
supported_extension_aliases = [range_def.ALIAS]
__native_pagination_support = True
__native_sorting_support = True
__filter_validation_support = True
def __init__(self):
super(NetworkSegmentRangePlugin, self).__init__()
self.type_manager = directory.get_plugin().type_manager
self.type_manager.initialize_network_segment_range_support()
def _get_network_segment_range(self, context, id):
obj = obj_network_segment_range.NetworkSegmentRange.get_object(
context, id=id)
if obj is None:
raise range_exc.NetworkSegmentRangeNotFound(range_id=id)
return obj
def _validate_network_segment_range_eligible(self, network_segment_range):
range_data = (network_segment_range.get('minimum'),
network_segment_range.get('maximum'))
# Currently, network segment range only supports VLAN, VxLAN,
# GRE and Geneve.
if network_segment_range.get('network_type') == const.TYPE_VLAN:
plugin_utils.verify_vlan_range(range_data)
else:
plugin_utils.verify_tunnel_range(
range_data, network_segment_range.get('network_type'))
def _validate_network_segment_range_overlap(self, context,
network_segment_range):
filters = {
'default': False,
'network_type': network_segment_range['network_type'],
'physical_network': (network_segment_range['physical_network']
if network_segment_range['network_type'] ==
const.TYPE_VLAN else None),
}
range_objs = obj_network_segment_range.NetworkSegmentRange.get_objects(
context, **filters)
overlapped_range_id = [
obj.id for obj in range_objs if
(network_segment_range['minimum'] <= obj.maximum and
network_segment_range['maximum'] >= obj.minimum)]
if overlapped_range_id:
raise range_exc.NetworkSegmentRangeOverlaps(
range_id=', '.join(overlapped_range_id))
def _add_unchanged_range_attributes(self, updates, existing):
"""Adds data for unspecified fields on incoming update requests."""
for key, value in six.iteritems(existing):
updates.setdefault(key, value)
return updates
def _is_network_segment_range_referenced(self, context,
network_segment_range):
return segments_db.network_segments_exist_in_range(
context, network_segment_range['network_type'],
network_segment_range.get('physical_network'),
network_segment_range)
def _is_network_segment_range_type_supported(self, network_type):
if not (self.type_manager.network_type_supported(network_type) and
network_type in const.NETWORK_SEGMENT_RANGE_TYPES):
# TODO(kailun): To use
# range_exc.NetworkSegmentRangeNetTypeNotSupported when the
# neutron-lib patch https://review.openstack.org/640777 is merged
# and released.
message = _("Network type %s does not support "
"network segment ranges.") % network_type
raise lib_exc.BadRequest(resource=range_def.RESOURCE_NAME,
msg=message)
return True
def _are_allocated_segments_in_range_impacted(self, context,
existing_range,
updated_range):
updated_range_min = updated_range.get('minimum',
existing_range['minimum'])
updated_range_max = updated_range.get('maximum',
existing_range['maximum'])
existing_range_min, existing_range_max = (
segments_db.min_max_actual_segments_in_range(
context, existing_range['network_type'],
existing_range.get('physical_network'), existing_range))
if existing_range_min and existing_range_max:
return bool(updated_range_min >= existing_range_min or
updated_range_max <= existing_range_max)
return False
@log_helpers.log_method_call
def create_network_segment_range(self, context, network_segment_range):
"""Check network types supported on network segment range creation."""
range_data = network_segment_range['network_segment_range']
if self._is_network_segment_range_type_supported(
range_data['network_type']):
with db_api.CONTEXT_WRITER.using(context):
self._validate_network_segment_range_eligible(range_data)
self._validate_network_segment_range_overlap(context,
range_data)
network_segment_range = (
obj_network_segment_range.NetworkSegmentRange(
context, name=range_data['name'],
description=range_data.get('description'),
default=False,
shared=range_data['shared'],
project_id=(range_data['project_id']
if not range_data['shared'] else None),
network_type=range_data['network_type'],
physical_network=(range_data['physical_network']
if range_data['network_type'] ==
const.TYPE_VLAN else None),
minimum=range_data['minimum'],
maximum=range_data['maximum'])
)
network_segment_range.create()
self.type_manager.update_network_segment_range_allocations(
network_segment_range['network_type'])
return network_segment_range.to_dict()
@log_helpers.log_method_call
def get_network_segment_range(self, context, id, fields=None):
network_segment_range = self._get_network_segment_range(
context, id)
return network_segment_range.to_dict(fields=fields)
@log_helpers.log_method_call
def get_network_segment_ranges(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
# TODO(kailun): Based on the current spec:
# https://review.openstack.org/599980, this method call may
# possibly return a large amount of data since ``available``
# segment list and ``used`` segment/project mapping will be also
# returned and they can be large sometimes. Considering that this
# API is admin-only and list operations won't be called often based
# on the use cases, we'll keep this open for now and evaluate the
# potential impacts. An alternative is to return the ``available``
# and ``used`` segment number or percentage.
pager = base_obj.Pager(sorts, limit, page_reverse, marker)
filters = filters or {}
network_segment_ranges = (
obj_network_segment_range.NetworkSegmentRange.get_objects(
context, _pager=pager, **filters))
return [
network_segment_range.to_dict(fields=fields)
for network_segment_range in network_segment_ranges
]
@log_helpers.log_method_call
def update_network_segment_range(self, context, id, network_segment_range):
"""Check existing network segment range impact on range updates."""
updated_range_data = network_segment_range['network_segment_range']
with db_api.CONTEXT_WRITER.using(context):
network_segment_range = self._get_network_segment_range(context,
id)
existing_range_data = network_segment_range.to_dict()
if existing_range_data['default']:
# TODO(kailun): To use
# range_exc.NetworkSegmentRangeDefaultReadOnly when the
# neutron-lib patch https://review.openstack.org/640777 is
# merged and released.
message = _("Network Segment Range %s is a "
"default segment range which could not be "
"updated or deleted.") % id
raise lib_exc.BadRequest(resource=range_def.RESOURCE_NAME,
msg=message)
if self._are_allocated_segments_in_range_impacted(
context,
existing_range=existing_range_data,
updated_range=updated_range_data):
# TODO(kailun): To use
# range_exc.NetworkSegmentRangeReferencedByProject when the
# neutron-lib patch https://review.openstack.org/640777 is
# merged and released.
message = _("Network Segment Range %s is referenced by "
"one or more tenant networks.") % id
raise lib_exc.InUse(resource=range_def.RESOURCE_NAME,
msg=message)
new_range_data = self._add_unchanged_range_attributes(
updated_range_data, existing_range_data)
self._validate_network_segment_range_eligible(new_range_data)
network_segment_range.update_fields(new_range_data)
network_segment_range.update()
self.type_manager.update_network_segment_range_allocations(
network_segment_range['network_type'])
return network_segment_range.to_dict()
@log_helpers.log_method_call
def delete_network_segment_range(self, context, id):
"""Check segment reference on network segment range deletion."""
with db_api.CONTEXT_WRITER.using(context):
network_segment_range = self._get_network_segment_range(context,
id)
range_data = network_segment_range.to_dict()
if range_data['default']:
# TODO(kailun): To use
# range_exc.NetworkSegmentRangeDefaultReadOnly when the
# neutron-lib patch https://review.openstack.org/640777 is
# merged and released.
message = _("Network Segment Range %s is a "
"default segment range which could not be "
"updated or deleted.") % id
raise lib_exc.BadRequest(resource=range_def.RESOURCE_NAME,
msg=message)
if self._is_network_segment_range_referenced(
context, range_data):
# TODO(kailun): To use
# range_exc.NetworkSegmentRangeReferencedByProject when the
# neutron-lib patch https://review.openstack.org/640777 is
# merged and released.
message = _("Network Segment Range %s is referenced by "
"one or more tenant networks.") % id
raise lib_exc.InUse(resource=range_def.RESOURCE_NAME,
msg=message)
network_segment_range.delete()
self.type_manager.update_network_segment_range_allocations(
network_segment_range['network_type'])
| 49.003636
| 79
| 0.625631
|
f9768c5539f99760bf4d518402a5f499011f22f6
| 153
|
py
|
Python
|
problemsets/Codeforces/Python/A1252.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
problemsets/Codeforces/Python/A1252.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
problemsets/Codeforces/Python/A1252.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
"""
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
n=int(input())
print(*[x%n+1 for x in map(int,input().split())])
| 19.125
| 49
| 0.614379
|
128ac6788d9ce7c632a2da841258015e89838f0e
| 13,417
|
py
|
Python
|
main.py
|
bknyaz/s
|
4a4a0e651856625e730e8fe83ffe5c15edacc3fa
|
[
"MIT"
] | 65
|
2020-05-19T01:45:52.000Z
|
2022-03-26T09:22:15.000Z
|
main.py
|
bknyaz/s
|
4a4a0e651856625e730e8fe83ffe5c15edacc3fa
|
[
"MIT"
] | 7
|
2020-05-25T21:50:17.000Z
|
2021-06-29T07:32:46.000Z
|
main.py
|
bknyaz/s
|
4a4a0e651856625e730e8fe83ffe5c15edacc3fa
|
[
"MIT"
] | 12
|
2020-05-22T01:51:25.000Z
|
2021-11-21T19:53:45.000Z
|
"""
Training script for Scene Graph Generation (or Scene Graph Prediction).
The script allows to reproduce the main experiments from our two papers:
[1] Boris Knyazev, Harm de Vries, Cătălina Cangea, Graham W. Taylor, Aaron Courville, Eugene Belilovsky.
Graph Density-Aware Losses for Novel Compositions in Scene Graph Generation. BMVC 2020. https://arxiv.org/abs/2005.08230
[2] Boris Knyazev, Harm de Vries, Cătălina Cangea, Graham W. Taylor, Aaron Courville, Eugene Belilovsky.
Generative Compositional Augmentations for Scene Graph Prediction. ICCV 2021. https://arxiv.org/abs/2007.05756
A large portion of this repo is based on https://github.com/rowanz/neural-motifs (MIT License).
For the paper [2], some GAN layers are based on https://github.com/google/sg2im (Apache-2.0 License).
Example to train IMP++ with GAN and GraphN scene graph perturbations:
python main.py -ckpt ./data/VG/vg-faster-rcnn.tar -gan -largeD -loss dnorm -perturb graphn -vis_cond ./data/VG/features.hdf5
"""
from config import *
from dataloaders.visual_genome import VGDataLoader, VG
conf = ModelConfig()
VG.split = conf.split # set VG, GQA or VTE split here to use as a global variable
from os.path import join
import pandas as pd
import time
import pickle
from sgg_models.rel_model_stanford import RelModelStanford
from lib.pytorch_misc import *
from lib.losses import node_losses, edge_losses
from lib.eval import val_epoch
from augment.gan import GAN
from augment.sg_perturb import SceneGraphPerturb
# Load VG data
train_loader, eval_loaders = VGDataLoader.splits(data_dir=conf.data,
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus,
is_cuda=conf.device=='cuda',
backbone=conf.backbone,
square_pad=True,
num_val_im=conf.val_size,
filter_non_overlap=conf.mode=='sgdet',
exclude_left_right=conf.exclude_left_right,
min_graph_size=conf.min_graph_size,
max_graph_size=conf.max_graph_size)
# Define SGG model
sgg_model = RelModelStanford(train_data=train_loader.dataset,
mode=conf.mode,
use_bias=conf.use_bias,
test_bias=conf.test_bias,
backbone=conf.backbone,
RELS_PER_IMG=conf.rels_per_img,
edge_model=conf.edge_model)
# Freeze the detector
for n, param in sgg_model.detector.named_parameters():
param.requires_grad = False
gan = GAN(train_loader.dataset.ind_to_classes,
train_loader.dataset.ind_to_predicates,
n_ch=sgg_model.edge_dim,
pool_sz=sgg_model.pool_sz,
fmap_sz=sgg_model.fmap_sz,
vis_cond=conf.vis_cond,
losses=conf.ganlosses,
init_embed=conf.init_embed,
largeD=conf.largeD,
device=conf.device,
data_dir=train_loader.dataset.root) if conf.gan else None
checkpoint_path = None if conf.save_dir is None else join(conf.save_dir, 'vgrel.pth')
start_epoch, ckpt = load_checkpoint(conf, sgg_model, checkpoint_path, gan)
sgg_model.to(conf.device)
if conf.gan:
gan.to(conf.device)
if conf.perturb:
set_seed(start_epoch + 1) # to avoid repeating the same perturbations when reloaded from the checkpoint
sgp = SceneGraphPerturb(method=conf.perturb,
embed_objs=gan.embed_objs,
subj_pred_obj_pairs=(train_loader.dataset.subj_pred_pairs,
train_loader.dataset.pred_obj_pairs),
obj_classes=train_loader.dataset.ind_to_classes,
triplet2str=train_loader.dataset.triplet2str,
L=conf.L, topk=conf.topk, alpha=conf.graphn_a,
uniform=conf.uniform, degree_smoothing=conf.degree_smoothing)
if conf.wandb_log:
wandb.watch(gan, log="all", log_freq=100 if conf.debug else 2000)
if conf.wandb_log:
wandb.watch(sgg_model, log="all", log_freq=100 if conf.debug else 2000)
def train_batch(batch, verbose=False):
set_mode(sgg_model, mode=conf.mode, is_train=True)
res = sgg_model(batch.scatter()) # forward pass through an object detector and an SGG model
# 1. Main SGG model object and relationship classification losses (L_cls)----------------------------------------------
losses = node_losses(res.rm_obj_dists, # predicted node labels (objects)
res.rm_obj_labels) # predicted node labels (objects)
loss, edges_fg, edges_bg = edge_losses(res.rel_dists, # predicted edge labels (predicates)
res.rel_labels[:, -1], # ground truth edge labels (predicates)
conf.loss,
return_idx=True,
loss_weights=(conf.alpha, conf.beta, conf.gamma))
losses.update(loss)
optimizer.zero_grad()
loss = sum(losses.values())
loss.backward()
grad_clip(sgg_model, conf.clip, verbose)
optimizer.step()
# ------------------------------------------------------------------------------------------------------------------
# 2. GAN-based updates----------------------------------------------------------------------------------------------
if conf.gan:
gan.train()
# assume a single gpu!
gt_boxes, gt_objects, gt_rels = batch[0][3].clone(), batch[0][4].clone(), batch[0][5].clone()
if conf.perturb:
# Scene Graph perturbations
gt_objects_fake = sgp.perturb(gt_objects.clone(), gt_rels.clone()).clone()
else:
gt_objects_fake = gt_objects.clone()
# Generate visual features conditioned on the SG
fmaps = gan(gt_objects_fake,
sgg_model.get_scaled_boxes(gt_boxes, res.im_inds, res.im_sizes_org),
gt_rels)
# Extract node,edge features from fmaps
nodes_fake, edges_fake = sgg_model.node_edge_features(fmaps, res.rois, res.rel_inds[:, 1:], res.im_sizes)
# Make SGG predictions for the node,edge features
# In case of G update, detach generated features to avoid collaboration between the SGG model and G
obj_dists_fake, rel_dists_fake = sgg_model.predict(nodes_fake if conf.attachG else nodes_fake.detach(),
edges_fake if conf.attachG else edges_fake.detach(),
res.rel_inds,
rois=res.rois,
im_sizes=res.im_sizes)
# 2.1. Generator losses
optimizer.zero_grad()
G_optimizer.zero_grad()
losses_G = {}
losses_G.update(gan.loss(features_fake=nodes_fake, is_nodes=True, labels_fake=gt_objects_fake[:, -1]))
losses_G.update(gan.loss(features_fake=edges_fake, labels_fake=res.rel_labels[:, -1]))
losses_G.update(gan.loss(features_fake=fmaps, is_fmaps=True))
for key in losses_G:
losses_G[key] = conf.ganw * losses_G[key]
if 'rec' in conf.ganlosses:
sfx = '_rec'
losses_G.update(node_losses(obj_dists_fake, gt_objects_fake[:, -1], sfx=sfx))
losses_G.update(edge_losses(rel_dists_fake,
res.rel_labels[:, -1],
conf.loss,
edges_fg, edges_bg,
loss_weights=(conf.alpha, conf.beta, conf.gamma),
sfx=sfx))
if len(losses_G) > 0:
loss = sum(losses_G.values())
loss.backward()
if 'rec' in conf.ganlosses:
grad_clip(sgg_model, conf.clip, verbose)
optimizer.step()
G_optimizer.step()
losses.update(losses_G)
# 2.1. Discriminator losses
D_optimizer.zero_grad()
losses_D = {}
losses_D.update(gan.loss(res.node_feat, nodes_fake, is_nodes=True, updateD=True, labels_fake=gt_objects_fake[:, -1],
labels_real=gt_objects[:, -1]))
losses_D.update(gan.loss(res.edge_feat, edges_fake, updateD=True, labels_fake=res.rel_labels[:, -1]))
losses_D.update(gan.loss(res.fmap, fmaps, updateD=True, is_fmaps=True))
for key in losses_D:
losses_D[key] = conf.ganw * losses_D[key]
if len(losses_D) > 0:
loss = sum(losses_D.values())
loss.backward()
D_optimizer.step()
losses.update(losses_D)
# ------------------------------------------------------------------------------------------------------------------
# Compute for debugging purpose (not used for backprop)
losses['total'] = sum(losses.values()).detach().data
return pd.Series({x: tensor_item(y) for x, y in losses.items()})
def train_epoch(epoch_num):
print('\nepoch %d, smallest lr %.3e\n' % (epoch_num, get_smallest_lr(optimizer)))
sgg_model.train()
tr = []
start = time.time()
for b, batch in enumerate(train_loader):
tr.append(train_batch(batch, verbose=False))
if conf.wandb_log:
conf.wandb_log(tr[-1], step=sgg_model.global_batch_iter, prefix='loss/')
if b % conf.print_interval == 0 and b >= conf.print_interval:
mn = pd.concat(tr[-conf.print_interval:], axis=1, sort=True).mean(1)
time_per_batch = (time.time() - start) / conf.print_interval
print(mn)
time_eval_batch = time_per_batch
print("\ne{:2d}b{:5d}/{:5d} {:.3f}s/batch, {:.1f}m/epoch ({:.1f}m/epoch including eval)\n".
format(epoch_num, b, len(train_loader),
time_per_batch,
len(train_loader) * time_per_batch / 60,
len(train_loader) * time_eval_batch / 60))
print('-----------', flush=True)
start = time.time()
sgg_model.global_batch_iter += 1
return
optimizer, scheduler = get_optim(sgg_model, conf.lr * conf.num_gpus * conf.batch_size, conf, start_epoch, ckpt)
if conf.gan:
G_optimizer, D_optimizer = get_optim_gan(gan, conf, start_epoch, ckpt)
print("\nTraining %s starts now!" % conf.mode.upper())
for epoch in range(start_epoch + 1, conf.num_epochs):
scheduler.step(epoch) # keep here for consistency with the paper
train_epoch(epoch)
other_states = {'epoch': epoch, 'global_batch_iter': sgg_model.global_batch_iter}
if conf.gan:
other_states.update({'gan': gan.state_dict(),
'G_optimizer': G_optimizer.state_dict(),
'D_optimizer': D_optimizer.state_dict() })
save_checkpoint(sgg_model, optimizer, checkpoint_path, other_states)
if epoch == start_epoch + 1 or (epoch % 5 == 0 and epoch < start_epoch + conf.num_epochs - 1):
# evaluate only once in every 5 epochs since it's time consuming and evaluation is noisy
for name, loader in eval_loaders.items():
if name.startswith('val_'):
val_epoch(conf.mode, sgg_model, loader, name,
train_loader.dataset.triplet_counts,
train_loader.dataset.triplet2str,
save_scores=conf.save_scores,
predicate_weight=conf.pred_weight,
train=train_loader.dataset,
wandb_log=conf.wandb_log)
# Evaluation on the test set here to make the pipeline complete
if conf.notest:
print('evaluation on the test set is skipped due to the notest flag')
else:
all_pred_entries = {}
for name, loader in eval_loaders.items():
if name.startswith('test_'):
all_pred_entries[name] = val_epoch(conf.mode, sgg_model, loader, name,
train_loader.dataset.triplet_counts,
train_loader.dataset.triplet2str,
is_test=True,
save_scores=conf.save_scores,
predicate_weight=conf.pred_weight,
train=train_loader.dataset,
wandb_log=conf.wandb_log)
if conf.save_scores and conf.save_dir is not None:
test_pred_f = join(conf.save_dir, 'test_predictions_%s.pkl' % conf.mode)
print('saving test predictions to %s' % test_pred_f)
with open(test_pred_f, 'wb') as f:
pickle.dump(all_pred_entries, f)
print('done!')
| 46.106529
| 128
| 0.56488
|
877ea0647b455af403bbb9ac9c239f8a5d263a87
| 35,375
|
py
|
Python
|
Smart Cab RL/Smart Cab SARSA/smart_cab_adapted.py
|
MohammadWasil/Flappy-Bird-with-Qlearning-and-SARSA
|
abd57a0617b54ef491cff31ec899ba866995cc63
|
[
"MIT"
] | null | null | null |
Smart Cab RL/Smart Cab SARSA/smart_cab_adapted.py
|
MohammadWasil/Flappy-Bird-with-Qlearning-and-SARSA
|
abd57a0617b54ef491cff31ec899ba866995cc63
|
[
"MIT"
] | null | null | null |
Smart Cab RL/Smart Cab SARSA/smart_cab_adapted.py
|
MohammadWasil/Flappy-Bird-with-Qlearning-and-SARSA
|
abd57a0617b54ef491cff31ec899ba866995cc63
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 22 21:00:43 2021
@author: wasil
"""
# Import the pygame library and initialise the game engine
import pygame
from pygame.locals import *
import random
import pickle
pygame.init()
# to diplay the text
font = pygame.font.Font(pygame.font.get_default_font(), 36)
font2 = pygame.font.Font(pygame.font.get_default_font(), 18)
IMAGE = pygame.image.load('car4.png')
IMAGE = pygame.transform.scale(IMAGE, (60, 60))
PASSENGER = pygame.image.load('passenger.png')
PASSENGER = pygame.transform.scale(PASSENGER, (60, 60))
DROPOFF = pygame.image.load('loca.png')
DROPOFF = pygame.transform.scale(DROPOFF, (60, 60))
GRAY = (192, 192, 192)
RED = (255, 0, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
LIGHT_BLUE = (173, 216, 230 )
DARK_GRAY = (88,88,88)
LIGHT_GRAY = (169,169,169)
# spaces around the board
SPACING = 20
# spaces between the lines
LINE_SPACING = 92
# The clock will be used to control how fast the screen updates
clock = pygame.time.Clock()
# Open a new window
size = (500, 700)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Smart Cab")
PASSENGER_POS = [[0, 0], [4, 0], [0, 4], [4, 3]]
PASSENGER_PICKED = False
PASSENGER_DROPPED = False
pickup_position = 0
dropoff_position = 0
REWARD = 0
NEW_GAME = False
ACTION = None
CUMULATIVE_REWARD = 0
CUMULATIVE_REWARD_LIST = []
# COUNTER TO SAVE CUMULATIVE_REWARD_LIST
C = 0
C_limit = 10_000
def GameBoard(I=None, J=None):
# Display the Actions / coordinates.
# x=0, y=500, w=500, h=200
action_rect = Rect(0, 500, 500, 200)
# Lower rectangle.
pygame.draw.rect(screen, GRAY, action_rect)
# The main screen
# For game screen
# x=0, y=0, w=500, h=500
game_rect = Rect(0, 0, 500, 500)
# uppper rectangle.
pygame.draw.rect(screen, DARK_GRAY, game_rect)
# The Game Board
# For game screen
# x=0, y=0, w=500, h=500
game_rect = Rect(SPACING, SPACING, 500-(SPACING*2), 500-(SPACING*2))
# game rectangle.
pygame.draw.rect(screen, LIGHT_GRAY, game_rect)
# RED BOUNDARY
pygame.draw.rect(screen, RED, game_rect, 3)
# drop off and pick up location.
# with color light blue
x_imd_point = (LINE_SPACING*(1-1))+20 + ((LINE_SPACING*1)+20 - (LINE_SPACING*(1-1))+20 )/2
y_imd_point = (LINE_SPACING*(5-1))+20 + ((LINE_SPACING*5)+20 - (LINE_SPACING*(5-1))+20 )/2
pygame.draw.rect(screen, LIGHT_BLUE, (x_imd_point-65, y_imd_point-65, 90, 90) )
x_imd_point = (LINE_SPACING*(1-1))+20 + ((LINE_SPACING*1)+20 - (LINE_SPACING*(1-1))+20 )/2
y_imd_point = (LINE_SPACING*(1-1))+20 + ((LINE_SPACING*1)+20 - (LINE_SPACING*(1-1))+20 )/2
pygame.draw.rect(screen, LIGHT_BLUE, (x_imd_point-65, y_imd_point-65, 90, 90) )
x_imd_point = (LINE_SPACING*(5-1))+20 + ((LINE_SPACING*5)+20 - (LINE_SPACING*(5-1))+20 )/2
y_imd_point = (LINE_SPACING*(1-1))+20 + ((LINE_SPACING*1)+20 - (LINE_SPACING*(1-1))+20 )/2
pygame.draw.rect(screen, LIGHT_BLUE, (x_imd_point-65, y_imd_point-65, 90, 90) )
x_imd_point = (LINE_SPACING*(4-1))+20 + ((LINE_SPACING*4)+20 - (LINE_SPACING*(4-1))+20 )/2
y_imd_point = (LINE_SPACING*(5-1))+20 + ((LINE_SPACING*5)+20 - (LINE_SPACING*(5-1))+20 )/2
pygame.draw.rect(screen, LIGHT_BLUE, (x_imd_point-65, y_imd_point-65, 90, 90) )
# lines
for i in range(1,5):
pygame.draw.line(screen, WHITE, ((LINE_SPACING*i)+20, 20), ( (LINE_SPACING*i)+20, 480), 3)
pygame.draw.line(screen, WHITE, (20, (LINE_SPACING*i)+20), ( 480, (LINE_SPACING*i)+20), 3)
# create Walls
pygame.draw.line(screen, RED, ((LINE_SPACING*1)+20, 296), ( (LINE_SPACING*1)+20, 480), 10)
pygame.draw.line(screen, RED, ((LINE_SPACING*3)+20, 296), ( (LINE_SPACING*3)+20, 480), 10)
pygame.draw.line(screen, RED, ((LINE_SPACING*2)+20, 20), ( (LINE_SPACING*2)+20, (LINE_SPACING*1)+20), 10)
if(I != None and J != None):
x, y = coordinate(I, J)
screen.blit(IMAGE, (x-30, y-30) )
def states(x, y):
"""
Input states. Output coordinates.
"""
# i here represent column
for i in range(1, 6):
#if(x > (i*20) and x < (LINE_SPACING*i)+20):
firstline_vertical = (LINE_SPACING*(i-1))+20
secondline_vertical = (LINE_SPACING*i)+20
if(x > firstline_vertical and x < secondline_vertical):
# j here represent row
for j in range(1, 6):
firstline_horizontal = (LINE_SPACING*(j-1))+20
secondline_horizontal = (LINE_SPACING*j)+20
if (y > firstline_horizontal and y < secondline_horizontal):
return j-1, i-1
def coordinate(I, J):
"""
Input coordinates. Output states.
"""
cumulative_linespacing_Y = 0
cumulative_linespacing_X = 0
for i in range(1, 6):
cumulative_linespacing_Y += LINE_SPACING
if (i-1 == I):
# here, 26 is hardoded number
# Changes the position Left or Right.
y = (cumulative_linespacing_Y)-26#+(LINE_SPACING*(i-1))
for j in range(1, 6):
cumulative_linespacing_X += LINE_SPACING
if (j-1 == J):
x = (cumulative_linespacing_X)-26#+(LINE_SPACING*(i-1))
return x, y
def rot_center(image, angle):
"""rotate an image while keeping its center and size"""
orig_rect = image.get_rect()
rot_image = pygame.transform.rotate(image, angle)
rot_rect = orig_rect.copy()
rot_rect.center = rot_image.get_rect().center
rot_image = rot_image.subsurface(rot_rect).copy()
return rot_image
def cell_position_key(action, x, y):
if action == "RIGHT":
x = x+92
rot_image = rot_center(IMAGE, -90)
screen.blit(rot_image, (x-30, y-30))
if action == "LEFT":
x = x-92
rot_image = rot_center(IMAGE, 90)
screen.blit(rot_image, (x-30, y-30))
if action == "UP":
y = y-92
screen.blit(IMAGE, (x-30, y-30))
if action == "DOWN":
y = y+92
rot_image = rot_center(IMAGE, 180)
screen.blit(rot_image, (x-30, y-30))
return int(x), int(y)
def passenger_update(pickup_position):
global PASSENGER_PICKED
if(PASSENGER_PICKED == False):
# get the passenger states
passenger_states = PASSENGER_POS[pickup_position]
# display the sprite
passenger_sprite(passenger_states)
def dropoff_update(dropoff_position):
global PASSENGER_DROPPED
if PASSENGER_DROPPED == False:
# dropoff location
dropoff_states = PASSENGER_POS[dropoff_position]
# display the sprite
dropoff_sprite(dropoff_states)
def passenger_sprite(passenger_states):
"""
To display the passenger sprite
"""
# converts states into coordinates
x, y = coordinate(passenger_states[0], passenger_states[1])
screen.blit(PASSENGER, ((x-30, y-30)))
def dropoff_sprite(dropoff_states):
"""
To display the dropoff sprite
"""
# converts states into coordinates
x, y = coordinate(dropoff_states[0], dropoff_states[1])
screen.blit(DROPOFF, ((x-30, y-30)))
def game_logic(I, J, p_I, p_J, pickup_position, dropoff_position):
global PASSENGER_PICKED
global PASSENGER_DROPPED
global REWARD
global ACTION
global CUMULATIVE_REWARD
# dropoff location, I and J
d_I = PASSENGER_POS[dropoff_position][0]
d_J = PASSENGER_POS[dropoff_position][1]
# and then update the text
text_surface = font2.render(f'Drop off location - {d_I}{d_J}'.format(d_I, d_J), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,625))
# if we havent pick up the passenger yet
if PASSENGER_PICKED == False:
# at every step
REWARD = -1
# if the cab tries to pick the passenger from wrong location.
wrong_pick_up = [i for i in [0, 1, 2, 3] if i != pickup_position]
if ( (PASSENGER_POS[wrong_pick_up[0]][0] == I) and (PASSENGER_POS[wrong_pick_up[0]][1] == J)):
REWARD = -10
elif ( (PASSENGER_POS[wrong_pick_up[1]][0] == I) and (PASSENGER_POS[wrong_pick_up[1]][1] == J)):
REWARD = -10
elif ( (PASSENGER_POS[wrong_pick_up[2]][0] == I) and (PASSENGER_POS[wrong_pick_up[2]][1] == J)):
REWARD = -10
# first check the car and the passenger positions
elif( I == p_I and J == p_J):
p_I = I
p_J = J
REWARD = 30
PASSENGER_PICKED = True
# now the passenger location is same as car's location
return p_I, p_J, REWARD, PASSENGER_PICKED
# if the pssenger is picked, then the passenger's location is same as cab's location.
if(PASSENGER_PICKED == True):
# the cab's location is now same as passenger's location.
p_I = I
p_J = J
# if the cab tries to DROP OFF the passenger TO wrong location.
wrong_drop_off = [i for i in [0, 1, 2, 3] if (i != dropoff_position) ] # and (i != pickup_position)
if ( (PASSENGER_POS[wrong_drop_off[0]][0] == I) and (PASSENGER_POS[wrong_drop_off[0]][1] == J)):
REWARD = -10
elif ( (PASSENGER_POS[wrong_drop_off[1]][0] == I) and (PASSENGER_POS[wrong_drop_off[1]][1] == J)):
REWARD = -10
elif ( (PASSENGER_POS[wrong_drop_off[2]][0] == I) and (PASSENGER_POS[wrong_drop_off[2]][1] == J)):
REWARD = -10
else:
REWARD = -1
return p_I, p_J, REWARD, PASSENGER_PICKED
def restart(I, J, p_I, p_J):
global PASSENGER_PICKED
global PASSENGER_DROPPED
global pickup_position
global dropoff_position
global NEW_GAME
# pickup location of the passenger, a new location
pickup_position = random.randint(0, 3)
# new p_I and p_J
p_I = PASSENGER_POS[pickup_position][0]
p_J = PASSENGER_POS[pickup_position][1]
# dropoff position shuuld be different from pickoff location
dropoff_position = k = random.choice([i for i in [0, 1, 2, 3] if i != pickup_position])
PASSENGER_PICKED = False
NEW_GAME = False
passenger_update(pickup_position)
dropoff_update(dropoff_position)
# again check th game logic,
# becoz there might
p_I, p_J, REWARD, PASSENGER_PICKED = game_logic(I, J, p_I, p_J, pickup_position, dropoff_position)
return NEW_GAME, p_I, p_J, REWARD, PASSENGER_PICKED
def mainGame(get_actions, counter, run, input_):
global pickup_position
global dropoff_position
global PASSENGER_PICKED
global ACTION
global CUMULATIVE_REWARD
global REWARD
global NEW_GAME
global CUMULATIVE_REWARD_LIST, C
GameBoard()
# place the car
# To find the mid point
initial_x = 2
initial_y = 5
x = x_imd_point = ((LINE_SPACING*(initial_x-1))+20) + (((LINE_SPACING*initial_x)+20) - ((LINE_SPACING*(initial_x-1))+20) )/2
y = y_imd_point = ((LINE_SPACING*(initial_y-1))+20) + (((LINE_SPACING*initial_y)+20) - ((LINE_SPACING*(initial_y-1))+20) )/2
screen.blit(IMAGE, (x_imd_point-30, y_imd_point-30))
I, J = states(x, y)
# pickup location of the passenger
pickup_position = random.randint(0, 3)
# dropoff position shuuld be different from pickoff location
dropoff_position = k = random.choice([i for i in [0, 1, 2, 3] if i != pickup_position])
passenger_update(pickup_position)
dropoff_update(dropoff_position)
p_I = PASSENGER_POS[pickup_position][0]
p_J = PASSENGER_POS[pickup_position][1]
# to display the States
# lower rectangle.
# --- Go ahead and update the screen with what we've drawn.
text_surface = font.render(f'State: {I}, {J}'.format(I, J), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,550))
# update cab's location
text_surface = font2.render(f'Passenger location - {p_I}{p_J}'.format(p_I, p_J), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,600))
# and then update the text
d_I = PASSENGER_POS[dropoff_position][0]
d_J = PASSENGER_POS[dropoff_position][1]
text_surface = font2.render(f'Drop off location - {d_I}{d_J}'.format(d_I, d_J), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,625))
# random initial policy
ACTION = "UP"
# -------- Main Program Loop -----------
game = True
while game:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
game = False # Flag that we are done so we exit this loop
"""
elif event.type == pygame.KEYDOWN :
if event.key == pygame.K_RIGHT:
if (J < 4):
if ( (I != 3 or J != 2) and (I != 4 or J != 2) and (I != 3 or J != 0) and (I != 4 or J != 0) and (I != 0 or J != 1) ):
GameBoard()
x, y = cell_position_key("RIGHT", x, y)
I, J = states(x, y)
# check of we dropped the passenger or not.
p_I, p_J, REWARD, PASSENGER_PICKED = game_logic(I, J, p_I, p_J, pickup_position, dropoff_position)
# only drop off when you are at drop off location and already picked up the passenger.
if((I == PASSENGER_POS[dropoff_position][0] and J == PASSENGER_POS[dropoff_position][1]) and (PASSENGER_PICKED == True)):
REWARD = 20
NEW_GAME = True
print(REWARD)
CUMULATIVE_REWARD += REWARD
#ACTION =
#get_actions(I, J, p_I, p_J, dropoff_position, REWARD)
#ACTION = run(I, J, p_I, p_J, dropoff_position)
# display reward
#print("the cumulative reward is: ", CUMULATIVE_REWARD)
text_surface = font2.render(f'Reward-> {REWARD}'.format(REWARD), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,650))
if(NEW_GAME == True):
print("the cumulative reward is: ", CUMULATIVE_REWARD)
counter(input_)
# first remove the previous dropped location
# cumulation of rewards at each episode.
CUMULATIVE_REWARD = 0
GameBoard(I, J)
NEW_GAME, p_I, p_J, REWARD, PASSENGER_PICKED = restart(I, J, p_I, p_J)
# update cab's location
text_surface = font2.render(f'Passenger location - {p_I}{p_J}'.format(p_I, p_J), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,600))
else:
REWARD = -1
else:
REWARD = -1
if event.key == pygame.K_LEFT:
if (J > 0):
if ( (I != 4 or J != 1) and (I != 3 or J != 1) and (I != 3 or J != 3) and (I != 4 or J != 3) and (I != 0 or J != 2) ):
GameBoard()
x, y = cell_position_key("LEFT", x, y)
#print("Coordinate:", x, y)
I, J = states(x, y)
# check of we dropped the passenger or not.
p_I, p_J, REWARD, PASSENGER_PICKED = game_logic(I, J, p_I, p_J, pickup_position, dropoff_position)
# only drop off when you are at drop off location and already picked up the passenger.
if((I == PASSENGER_POS[dropoff_position][0] and J == PASSENGER_POS[dropoff_position][1]) and (PASSENGER_PICKED == True)):
REWARD = 20
NEW_GAME = True
print(REWARD)
CUMULATIVE_REWARD += REWARD
#ACTION =
#get_actions(I, J, p_I, p_J, dropoff_position, REWARD)
#ACTION = run(I, J, p_I, p_J, dropoff_position)
# display reward
#print("the cumulative reward is: ", CUMULATIVE_REWARD)
text_surface = font2.render(f'Reward-> {REWARD}'.format(REWARD), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,650))
if(NEW_GAME == True):
print("the cumulative reward is: ", CUMULATIVE_REWARD)
counter(input_)
# first remove the previous dropped location
# cumulation of rewards at each episode.
CUMULATIVE_REWARD = 0
GameBoard(I, J)
NEW_GAME, p_I, p_J, REWARD, PASSENGER_PICKED = restart(I, J, p_I, p_J)
# update cab's location
text_surface = font2.render(f'Passenger location - {p_I}{p_J}'.format(p_I, p_J), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,600))
else:
REWARD = -1
else:
REWARD = -1
if event.key == pygame.K_UP:
if (I > 0):
GameBoard()
x, y = cell_position_key("UP", x, y)
I, J = states(x, y)
# check of we dropped the passenger or not.
p_I, p_J, REWARD, PASSENGER_PICKED = game_logic(I, J, p_I, p_J, pickup_position, dropoff_position)
# only drop off when you are at drop off location and already picked up the passenger.
if((I == PASSENGER_POS[dropoff_position][0] and J == PASSENGER_POS[dropoff_position][1]) and (PASSENGER_PICKED == True)):
REWARD = 20
NEW_GAME = True
print(REWARD)
CUMULATIVE_REWARD += REWARD
#ACTION =
#get_actions(I, J, p_I, p_J, dropoff_position, REWARD)
#ACTION = run(I, J, p_I, p_J, dropoff_position)
# display reward
#print("the cumulative reward is: ", CUMULATIVE_REWARD)
text_surface = font2.render(f'Reward-> {REWARD}'.format(REWARD), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,650))
if(NEW_GAME == True):
print("the cumulative reward is: ", CUMULATIVE_REWARD)
counter(input_)
# first remove the previous dropped location
# cumulation of rewards at each episode.
CUMULATIVE_REWARD = 0
GameBoard(I, J)
NEW_GAME, p_I, p_J, REWARD, PASSENGER_PICKED = restart(I, J, p_I, p_J)
# update cab's location
text_surface = font2.render(f'Passenger location - {p_I}{p_J}'.format(p_I, p_J), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,600))
else:
REWARD = -1
if event.key == pygame.K_DOWN:
if(I < 4):
GameBoard()
x, y = cell_position_key("DOWN", x, y)
I, J = states(x, y)
# check of we dropped the passenger or not.
p_I, p_J, REWARD, PASSENGER_PICKED = game_logic(I, J, p_I, p_J, pickup_position, dropoff_position)
# only drop off when you are at drop off location and already picked up the passenger.
if((I == PASSENGER_POS[dropoff_position][0] and J == PASSENGER_POS[dropoff_position][1]) and (PASSENGER_PICKED == True)):
REWARD = 20
NEW_GAME = True
print(REWARD)
CUMULATIVE_REWARD += REWARD
#ACTION =
#get_actions(I, J, p_I, p_J, dropoff_position, REWARD)
#ACTION = run(I, J, p_I, p_J, dropoff_position)
# display reward
#print("the cumulative reward is: ", CUMULATIVE_REWARD)
text_surface = font2.render(f'Reward-> {REWARD}'.format(REWARD), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,650))
if(NEW_GAME == True):
print("the cumulative reward is: ", CUMULATIVE_REWARD)
counter(input_)
# first remove the previous dropped location
# cumulation of rewards at each episode.
CUMULATIVE_REWARD = 0
GameBoard(I, J)
NEW_GAME, p_I, p_J, REWARD, PASSENGER_PICKED = restart(I, J, p_I, p_J)
# update cab's location
text_surface = font2.render(f'Passenger location - {p_I}{p_J}'.format(p_I, p_J), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,600))
else:
REWARD = -1
"""
# from RL task
if ACTION == "RIGHT":
if (J < 4):
if ( (I != 3 or J != 2) and (I != 4 or J != 2) and (I != 3 or J != 0) and (I != 4 or J != 0) and (I != 0 or J != 1) ):
GameBoard()
x, y = cell_position_key("RIGHT", x, y)
I, J = states(x, y)
# check If we dropped the passenger or not.
p_I, p_J, REWARD, PASSENGER_PICKED = game_logic(I, J, p_I, p_J, pickup_position, dropoff_position)
# only drop off when you are at drop off location and already picked up the passenger.
if((I == PASSENGER_POS[dropoff_position][0] and J == PASSENGER_POS[dropoff_position][1]) and (PASSENGER_PICKED == True)):
REWARD = 20
NEW_GAME = True
CUMULATIVE_REWARD += REWARD
if input_ == "TRAIN":
ACTION = get_actions(I, J, p_I, p_J, dropoff_position, REWARD)
elif input_ == "RUN":
ACTION = run(I, J, p_I, p_J, dropoff_position)
# display reward
text_surface = font2.render(f'Reward-> {REWARD}'.format(REWARD), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,670))
if(NEW_GAME == True):
CUMULATIVE_REWARD_LIST.append(CUMULATIVE_REWARD)
C+=1
if C % C_limit == 0:
with open(f"cumulative_reward_{C}.txt".format(C), "wb") as fp: #Pickling
pickle.dump(CUMULATIVE_REWARD_LIST, fp)
CUMULATIVE_REWARD_LIST=[]
print("the cumulative reward is: ", CUMULATIVE_REWARD)
counter(input_)
# first remove the previous dropped location
# cumulation of rewards at each episode.
CUMULATIVE_REWARD = 0
GameBoard(I, J)
NEW_GAME, p_I, p_J, REWARD, PASSENGER_PICKED = restart(I, J, p_I, p_J)
# update cab's location
text_surface = font2.render(f'Passenger location - {p_I}{p_J}'.format(p_I, p_J), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,600))
else:
REWARD = -1
if input_ == "TRAIN":
ACTION = get_actions(I, J, p_I, p_J, dropoff_position, REWARD)
elif input_ == "RUN":
ACTION = run(I, J, p_I, p_J, dropoff_position)
else:
REWARD = -1
if input_ == "TRAIN":
ACTION = get_actions(I, J, p_I, p_J, dropoff_position, REWARD)
elif input_ == "RUN":
ACTION = run(I, J, p_I, p_J, dropoff_position)
if ACTION == "LEFT":
if (J > 0):
if ( (I != 4 or J != 1) and (I != 3 or J != 1) and (I != 3 or J != 3) and (I != 4 or J != 3) and (I != 0 or J != 2) ):
GameBoard()
x, y = cell_position_key("LEFT", x, y)
I, J = states(x, y)
# check of we dropped the passenger or not.
p_I, p_J, REWARD, PASSENGER_PICKED = game_logic(I, J, p_I, p_J, pickup_position, dropoff_position)
# only drop off when you are at drop off location and already picked up the passenger.
if((I == PASSENGER_POS[dropoff_position][0] and J == PASSENGER_POS[dropoff_position][1]) and (PASSENGER_PICKED == True)):
REWARD = 20
NEW_GAME = True
CUMULATIVE_REWARD += REWARD
if input_ == "TRAIN":
ACTION = get_actions(I, J, p_I, p_J, dropoff_position, REWARD)
elif input_ == "RUN":
ACTION = run(I, J, p_I, p_J, dropoff_position)
# display reward
text_surface = font2.render(f'Reward-> {REWARD}'.format(REWARD), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,670))
if(NEW_GAME == True):
CUMULATIVE_REWARD_LIST.append(CUMULATIVE_REWARD)
C+=1
if C % C_limit == 0:
with open(f"cumulative_reward_{C}.txt".format(C), "wb") as fp: #Pickling
pickle.dump(CUMULATIVE_REWARD_LIST, fp)
CUMULATIVE_REWARD_LIST=[]
print("the cumulative reward is: ", CUMULATIVE_REWARD)
counter(input_)
# first remove the previous dropped location
# cumulation of rewards at each episode.
CUMULATIVE_REWARD = 0
GameBoard(I, J)
NEW_GAME, p_I, p_J, REWARD, PASSENGER_PICKED = restart(I, J, p_I, p_J)
# update cab's location
text_surface = font2.render(f'Passenger location - {p_I}{p_J}'.format(p_I, p_J), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,600))
else:
REWARD = -1
if input_ == "TRAIN":
ACTION = get_actions(I, J, p_I, p_J, dropoff_position, REWARD)
elif input_ == "RUN":
ACTION = run(I, J, p_I, p_J, dropoff_position)
else:
REWARD = -1
if input_ == "TRAIN":
ACTION = get_actions(I, J, p_I, p_J, dropoff_position, REWARD)
elif input_ == "RUN":
ACTION = run(I, J, p_I, p_J, dropoff_position)
if ACTION == "UP":
if (I > 0):
GameBoard()
x, y = cell_position_key("UP", x, y)
I, J = states(x, y)
# check of we dropped the passenger or not.
p_I, p_J, REWARD, PASSENGER_PICKED = game_logic(I, J, p_I, p_J, pickup_position, dropoff_position)
# only drop off when you are at drop off location and already picked up the passenger.
if((I == PASSENGER_POS[dropoff_position][0] and J == PASSENGER_POS[dropoff_position][1]) and (PASSENGER_PICKED == True)):
REWARD = 20
NEW_GAME = True
CUMULATIVE_REWARD += REWARD
if input_ == "TRAIN":
ACTION = get_actions(I, J, p_I, p_J, dropoff_position, REWARD)
elif input_ == "RUN":
ACTION = run(I, J, p_I, p_J, dropoff_position)
# display reward
text_surface = font2.render(f'Reward-> {REWARD}'.format(REWARD), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,670))
if(NEW_GAME == True):
CUMULATIVE_REWARD_LIST.append(CUMULATIVE_REWARD)
C+=1
if C % C_limit == 0:
with open(f"cumulative_reward_{C}.txt".format(C), "wb") as fp: #Pickling
pickle.dump(CUMULATIVE_REWARD_LIST, fp)
CUMULATIVE_REWARD_LIST=[]
print("the cumulative reward is: ", CUMULATIVE_REWARD)
counter(input_)
# first remove the previous dropped location
# cumulation of rewards at each episode.
CUMULATIVE_REWARD = 0
GameBoard(I, J)
NEW_GAME, p_I, p_J, REWARD, PASSENGER_PICKED = restart(I, J, p_I, p_J)
# update cab's location
text_surface = font2.render(f'Passenger location - {p_I}{p_J}'.format(p_I, p_J), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,600))
else:
REWARD = -1
if input_ == "TRAIN":
ACTION = get_actions(I, J, p_I, p_J, dropoff_position, REWARD)
elif input_ == "RUN":
ACTION = run(I, J, p_I, p_J, dropoff_position)
if ACTION == "DOWN":
if(I < 4):
GameBoard()
x, y = cell_position_key("DOWN", x, y)
I, J = states(x, y)
# check of we dropped the passenger or not.
p_I, p_J, REWARD, PASSENGER_PICKED = game_logic(I, J, p_I, p_J, pickup_position, dropoff_position)
# only drop off when you are at drop off location and already picked up the passenger.
if((I == PASSENGER_POS[dropoff_position][0] and J == PASSENGER_POS[dropoff_position][1]) and (PASSENGER_PICKED == True)):
REWARD = 20
NEW_GAME = True
CUMULATIVE_REWARD += REWARD
if input_ == "TRAIN":
ACTION = get_actions(I, J, p_I, p_J, dropoff_position, REWARD)
elif input_ == "RUN":
ACTION = run(I, J, p_I, p_J, dropoff_position)
# display reward
text_surface = font2.render(f'Reward-> {REWARD}'.format(REWARD), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,670))
if(NEW_GAME == True):
CUMULATIVE_REWARD_LIST.append(CUMULATIVE_REWARD)
C+=1
if C % C_limit == 0:
with open(f"cumulative_reward_{C}.txt".format(C), "wb") as fp: #Pickling
pickle.dump(CUMULATIVE_REWARD_LIST, fp)
CUMULATIVE_REWARD_LIST=[]
print("the cumulative reward is: ", CUMULATIVE_REWARD)
counter(input_)
# first remove the previous dropped location
# cumulation of rewards at each episode.
CUMULATIVE_REWARD = 0
GameBoard(I, J)
NEW_GAME, p_I, p_J, REWARD, PASSENGER_PICKED = restart(I, J, p_I, p_J)
# update cab's location
text_surface = font2.render(f'Passenger location - {p_I}{p_J}'.format(p_I, p_J), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,600))
else:
REWARD = -1
if input_ == "TRAIN":
ACTION = get_actions(I, J, p_I, p_J, dropoff_position, REWARD)
elif input_ == "RUN":
ACTION = run(I, J, p_I, p_J, dropoff_position)
# pick from here
passenger_update(pickup_position)
# drop here
dropoff_update(dropoff_position)
# to display the States
# lower rectangle.
# --- Go ahead and update the screen with what we've drawn.
text_surface = font.render(f'State: {I}, {J}'.format(I, J), False, (255, 255, 255))
screen.blit(text_surface, dest=(150,550))
# --- Limit to 60 frames per second
if input_ == "TRAIN":
clock.tick(0)
#pygame.display.update()
elif input_ == "RUN":
clock.tick(10)
pygame.display.update()
#Once we have exited the main program loop we can stop the game engine:
pygame.quit()
#if __name__ == "__main__":
# create the board
#mainGame()
| 43.67284
| 149
| 0.498544
|
8408087ad7f3f74405e025aba03814d25db3d6b4
| 4,754
|
py
|
Python
|
sources/VS/ThirdParty/wxWidgets/misc/gdb/print.py
|
Sasha7b9Work/S8-53M2
|
fdc9cb5e3feb8055fd3f7885a6f6362f62ff6b6e
|
[
"MIT"
] | 86
|
2015-08-06T11:30:01.000Z
|
2022-02-28T04:50:22.000Z
|
sources/VS/ThirdParty/wxWidgets/misc/gdb/print.py
|
Sasha7b9Work/S8-53M2
|
fdc9cb5e3feb8055fd3f7885a6f6362f62ff6b6e
|
[
"MIT"
] | 6
|
2016-01-04T19:36:22.000Z
|
2021-08-08T02:43:48.000Z
|
sources/VS/ThirdParty/wxWidgets/misc/gdb/print.py
|
Sasha7b9Work/S8-53M2
|
fdc9cb5e3feb8055fd3f7885a6f6362f62ff6b6e
|
[
"MIT"
] | 22
|
2015-11-04T04:04:54.000Z
|
2022-02-28T04:50:24.000Z
|
###############################################################################
# Name: misc/gdb/print.py
# Purpose: pretty-printers for wx data structures: this file is meant to
# be sourced from gdb using "source -p" (or, better, autoloaded
# in the future...)
# Author: Vadim Zeitlin
# Created: 2009-01-04
# Copyright: (c) 2009 Vadim Zeitlin
# Licence: wxWindows licence
###############################################################################
# Define wxFooPrinter class implementing (at least) to_string() method for each
# wxFoo class we want to pretty print. Then just add wxFoo to the types array
# in wxLookupFunction at the bottom of this file.
import datetime
import gdb
import itertools
import sys
if sys.version_info[0] > 2:
# Python 3
Iterator = object
long = int
else:
# Python 2, we need to make an adaptor, so we can use Python 3 iterator implementations.
class Iterator:
def next(self):
return self.__next__()
# shamelessly stolen from std::string example
class wxStringPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
return self.val['m_impl']['_M_dataplus']['_M_p']
def display_hint(self):
return 'string'
class wxArrayStringPrinter:
class _iterator(Iterator):
def __init__ (self, firstItem, count):
self.item = firstItem
self.count = count
self.current = 0
def __iter__(self):
return self
def __next__(self):
current = self.current
self.current = self.current + 1
if current == self.count:
raise StopIteration
elt = self.item.dereference()
self.item = self.item + 1
return ('[%d]' % current, elt)
def __init__(self, val):
self.val = val
def children(self):
return self._iterator(self.val['m_pItems'], self.val['m_nCount'])
def to_string(self):
count = self.val['m_nCount']
capacity = self.val['m_nSize']
return ('length %d, capacity %d' % (int (count), int (capacity)))
def display_hint(self):
return 'array'
class wxDateTimePrinter:
def __init__(self, val):
self.val = val
def to_string(self):
# A value of type wxLongLong can't be used in Python arithmetic
# expressions directly so we need to convert it to long long first and
# then cast to int explicitly to be able to use it as a timestamp.
msec = self.val['m_time'].cast(gdb.lookup_type('long long'))
if msec == 0x8000000000000000:
return 'NONE'
sec = int(msec / 1000)
return datetime.datetime.fromtimestamp(sec).isoformat(' ')
class wxFileNamePrinter:
def __init__(self, val):
self.val = val
def to_string(self):
# It is simpler to just call the internal function here than to iterate
# over m_dirs array ourselves. The disadvantage of this approach is
# that it requires a live inferior process and so doesn't work when
# debugging using only a core file. If this ever becomes a serious
# problem, this should be rewritten to use m_dirs and m_name and m_ext.
return gdb.parse_and_eval('((wxFileName*)%s)->GetFullPath(0)' %
self.val.address)
class wxXYPrinterBase:
def __init__(self, val):
self.x = val['x']
self.y = val['y']
class wxPointPrinter(wxXYPrinterBase):
def to_string(self):
return '(%d, %d)' % (self.x, self.y)
class wxSizePrinter(wxXYPrinterBase):
def to_string(self):
return '%d*%d' % (self.x, self.y)
class wxRectPrinter(wxXYPrinterBase):
def __init__(self, val):
wxXYPrinterBase.__init__(self, val)
self.width = val['width']
self.height = val['height']
def to_string(self):
return '(%d, %d) %d*%d' % (self.x, self.y, self.width, self.height)
# The function looking up the pretty-printer to use for the given value.
def wxLookupFunction(val):
# Using a list is probably ok for so few items but consider switching to a
# set (or a dict and cache class types as the keys in it?) if needed later.
types = ['wxString',
'wxArrayString',
'wxDateTime',
'wxFileName',
'wxPoint',
'wxSize',
'wxRect']
for t in types:
if val.type.tag == t:
# Not sure if this is the best name to create the object of a class
# by name but at least it beats eval()
return globals()[t + 'Printer'](val)
return None
gdb.pretty_printers.append(wxLookupFunction)
| 31.90604
| 92
| 0.592764
|
b61b484e7a4f4718a67610e441dd3bf9848e942e
| 899
|
py
|
Python
|
Trie.py
|
simonluoUW/boggle-solver-flask
|
d6d533fea6ded670e6496b9f64790e2e9149223e
|
[
"MIT"
] | null | null | null |
Trie.py
|
simonluoUW/boggle-solver-flask
|
d6d533fea6ded670e6496b9f64790e2e9149223e
|
[
"MIT"
] | null | null | null |
Trie.py
|
simonluoUW/boggle-solver-flask
|
d6d533fea6ded670e6496b9f64790e2e9149223e
|
[
"MIT"
] | null | null | null |
'''
represents a Trie at root
- contains method to add words to the trie
'''
class Trie:
def __init__(self):
self.root = TrieNode()
def add_word(self,word):
cur = self.root
for character in word:
if character not in cur.children:
cur.children[character] = TrieNode()
cur = cur.children[character]
cur.word = word
'''
node in Trie
- word is not None if node represents a word
- children dictionary
key is next letter in word
value is the next TrieNode for that letter
'''
class TrieNode:
def __init__(self):
self.word = None
self.children = dict()
# Returns a trie of words in boggle_dict.txt
def build_trie():
trie = Trie()
with open("boggle_dict.txt", "r") as file:
for line in file:
word = line.rstrip('\n')
trie.add_word(word)
return trie
| 23.051282
| 52
| 0.605117
|
f7b55829eac409b58b4bffb4985438a8dc55689e
| 455
|
py
|
Python
|
model/project.py
|
OIGurev/python_training_mantis
|
0f410fc046185e6f8372a2eaeccee2b9397df107
|
[
"Apache-2.0"
] | null | null | null |
model/project.py
|
OIGurev/python_training_mantis
|
0f410fc046185e6f8372a2eaeccee2b9397df107
|
[
"Apache-2.0"
] | null | null | null |
model/project.py
|
OIGurev/python_training_mantis
|
0f410fc046185e6f8372a2eaeccee2b9397df107
|
[
"Apache-2.0"
] | null | null | null |
from sys import maxsize
class Project:
def __init__(self, id=None, name=None):
self.id = id
self.name = name
def __repr__(self):
return "%s:%s" % (self.id, self.name)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 22.75
| 103
| 0.571429
|
582d38d9b838eea540019de571fc733f8833b9e7
| 123
|
py
|
Python
|
archive/apps.py
|
n2o/dpb
|
9e44ef91dc25782a12150e1001983aeee62bc566
|
[
"MIT"
] | 3
|
2020-11-05T10:09:04.000Z
|
2021-03-13T11:27:05.000Z
|
archive/apps.py
|
n2o/dpb
|
9e44ef91dc25782a12150e1001983aeee62bc566
|
[
"MIT"
] | 31
|
2015-07-26T13:53:26.000Z
|
2020-09-28T06:08:03.000Z
|
archive/apps.py
|
n2o/dpb
|
9e44ef91dc25782a12150e1001983aeee62bc566
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class ArchiveConfig(AppConfig):
name = "archive"
verbose_name = "Bundesarchiv"
| 17.571429
| 33
| 0.739837
|
e695a3a6a818a524d4d7a18acf73826635bafd22
| 8,363
|
py
|
Python
|
infra/build/functions/project_sync.py
|
xuri/oss-fuzz
|
be9a96b09cac719f0fd43a2ff7dfee09ae27c849
|
[
"Apache-2.0"
] | null | null | null |
infra/build/functions/project_sync.py
|
xuri/oss-fuzz
|
be9a96b09cac719f0fd43a2ff7dfee09ae27c849
|
[
"Apache-2.0"
] | null | null | null |
infra/build/functions/project_sync.py
|
xuri/oss-fuzz
|
be9a96b09cac719f0fd43a2ff7dfee09ae27c849
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Cloud functions for build scheduling."""
from collections import namedtuple
import logging
import os
import re
import yaml
from github import Github
from google.api_core import exceptions
from google.cloud import ndb
from google.cloud import scheduler_v1
import build_and_run_coverage
import build_project
from datastore_entities import GithubCreds
from datastore_entities import Project
VALID_PROJECT_NAME = re.compile(r'^[a-zA-Z0-9_-]+$')
DEFAULT_BUILDS_PER_DAY = 1
MAX_BUILDS_PER_DAY = 4
COVERAGE_SCHEDULE = '0 6 * * *'
FUZZING_BUILD_TOPIC = 'request-build'
COVERAGE_BUILD_TOPIC = 'request-coverage-build'
ProjectMetadata = namedtuple(
'ProjectMetadata', 'schedule project_yaml_contents dockerfile_contents')
class ProjectYamlError(Exception):
"""Error in project.yaml format."""
def create_scheduler(cloud_scheduler_client, project_name, schedule, tag,
topic):
"""Creates schedulers for new projects."""
project_id = os.environ.get('GCP_PROJECT')
location_id = os.environ.get('FUNCTION_REGION')
parent = cloud_scheduler_client.location_path(project_id, location_id)
job = {
'name': parent + '/jobs/' + project_name + '-scheduler-' + tag,
'pubsub_target': {
'topic_name': 'projects/' + project_id + '/topics/' + topic,
'data': project_name.encode()
},
'schedule': schedule
}
cloud_scheduler_client.create_job(parent, job)
def delete_scheduler(cloud_scheduler_client, project_name):
"""Deletes schedulers for projects that were removed."""
project_id = os.environ.get('GCP_PROJECT')
location_id = os.environ.get('FUNCTION_REGION')
name = cloud_scheduler_client.job_path(project_id, location_id,
project_name + '-scheduler')
cloud_scheduler_client.delete_job(name)
def update_scheduler(cloud_scheduler_client, project, schedule):
"""Updates schedule in case schedule was changed."""
project_id = os.environ.get('GCP_PROJECT')
location_id = os.environ.get('FUNCTION_REGION')
parent = cloud_scheduler_client.location_path(project_id, location_id)
job = {
'name': parent + '/jobs/' + project.name + '-scheduler',
'pubsub_target': {
'topic_name': 'projects/' + project_id + '/topics/request-build',
'data': project.name.encode()
},
'schedule': project.schedule
}
update_mask = {'schedule': schedule}
cloud_scheduler_client.update(job, update_mask)
# pylint: disable=too-many-branches
def sync_projects(cloud_scheduler_client, projects):
"""Sync projects with cloud datastore."""
for project in Project.query():
if project.name in projects:
continue
logging.info('Deleting project %s', project.name)
try:
delete_scheduler(cloud_scheduler_client, project.name)
project.key.delete()
except exceptions.GoogleAPICallError as error:
logging.error('Scheduler deletion for %s failed with %s', project.name,
error)
existing_projects = {project.name for project in Project.query()}
for project_name in projects:
if project_name in existing_projects:
continue
try:
create_scheduler(cloud_scheduler_client, project_name,
projects[project_name].schedule,
build_project.FUZZING_BUILD_TAG, FUZZING_BUILD_TOPIC)
create_scheduler(cloud_scheduler_client, project_name, COVERAGE_SCHEDULE,
build_and_run_coverage.COVERAGE_BUILD_TAG,
COVERAGE_BUILD_TOPIC)
project_metadata = projects[project_name]
Project(name=project_name,
schedule=project_metadata.schedule,
project_yaml_contents=project_metadata.project_yaml_contents,
dockerfile_contents=project_metadata.dockerfile_contents).put()
except exceptions.GoogleAPICallError as error:
logging.error('Scheduler creation for %s failed with %s', project_name,
error)
for project in Project.query():
if project.name not in projects:
continue
logging.info('Setting up project %s', project.name)
project_metadata = projects[project.name]
project_changed = False
if project.schedule != project_metadata.schedule:
try:
logging.info('Schedule changed.')
update_scheduler(cloud_scheduler_client, project,
projects[project.name].schedule)
project.schedule = project_metadata.schedule
project_changed = True
except exceptions.GoogleAPICallError as error:
logging.error('Updating scheduler for %s failed with %s', project.name,
error)
if project.project_yaml_contents != project_metadata.project_yaml_contents:
project.project_yaml_contents = project_metadata.project_yaml_contents
project_changed = True
if project.dockerfile_contents != project_metadata.dockerfile_contents:
project.dockerfile_contents = project_metadata.dockerfile_contents
project_changed = True
if project_changed:
project.put()
def _has_docker_file(project_contents):
"""Checks if project has a Dockerfile."""
return any(
content_file.name == 'Dockerfile' for content_file in project_contents)
def get_project_metadata(project_contents):
"""Checks for schedule parameter in yaml file else uses DEFAULT_SCHEDULE."""
for content_file in project_contents:
if content_file.name == 'project.yaml':
project_yaml_contents = content_file.decoded_content.decode('utf-8')
if content_file.name == 'Dockerfile':
dockerfile_contents = content_file.decoded_content.decode('utf-8')
project_yaml = yaml.safe_load(project_yaml_contents)
builds_per_day = project_yaml.get('builds_per_day', DEFAULT_BUILDS_PER_DAY)
if not isinstance(builds_per_day, int) or builds_per_day not in range(
1, MAX_BUILDS_PER_DAY + 1):
raise ProjectYamlError('Parameter is not an integer in range [1-4]')
# Starting at 6:00 am, next build schedules are added at 'interval' slots
# Example for interval 2, hours = [6, 18] and schedule = '0 6,18 * * *'
interval = 24 // builds_per_day
hours = []
for hour in range(6, 30, interval):
hours.append(hour % 24)
schedule = '0 ' + ','.join(str(hour) for hour in hours) + ' * * *'
return ProjectMetadata(schedule, project_yaml_contents, dockerfile_contents)
def get_projects(repo):
"""Get project list from git repository."""
projects = {}
contents = repo.get_contents('projects')
for content_file in contents:
if content_file.type != 'dir' or not VALID_PROJECT_NAME.match(
content_file.name):
continue
project_contents = repo.get_contents(content_file.path)
if not _has_docker_file(project_contents):
continue
try:
projects[content_file.name] = get_project_metadata(project_contents)
except ProjectYamlError as error:
logging.error(
'Incorrect format for project.yaml file of %s with error %s',
content_file.name, error)
return projects
def get_github_creds():
"""Retrieves GitHub client credentials."""
git_creds = GithubCreds.query().get()
if git_creds is None:
raise RuntimeError('Git credentials not available.')
return git_creds
def sync(event, context):
"""Sync projects with cloud datastore."""
del event, context #unused
with ndb.Client().context():
git_creds = get_github_creds()
github_client = Github(git_creds.client_id, git_creds.client_secret)
repo = github_client.get_repo('google/oss-fuzz')
projects = get_projects(repo)
cloud_scheduler_client = scheduler_v1.CloudSchedulerClient()
sync_projects(cloud_scheduler_client, projects)
| 35.892704
| 80
| 0.708956
|
70fe581de4ea704b057b8cdbe2c6196bc21ac832
| 7,765
|
py
|
Python
|
status/views.py
|
riptano/statuspage
|
eb5b3c983f2e23b9ec2e1f30c8cfe5cc624699d4
|
[
"BSD-3-Clause"
] | null | null | null |
status/views.py
|
riptano/statuspage
|
eb5b3c983f2e23b9ec2e1f30c8cfe5cc624699d4
|
[
"BSD-3-Clause"
] | null | null | null |
status/views.py
|
riptano/statuspage
|
eb5b3c983f2e23b9ec2e1f30c8cfe5cc624699d4
|
[
"BSD-3-Clause"
] | null | null | null |
from datetime import date, timedelta
from django.conf import settings
from django.contrib.sites.shortcuts import get_current_site
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render
from django.template import RequestContext, Template
from django.template.loader import get_template
from django.utils.decorators import method_decorator
from django.views.generic import (
MonthArchiveView, YearArchiveView, CreateView, DeleteView, DetailView, ListView, TemplateView
)
from stronghold.decorators import public
from status.models import Incident, IncidentUpdate
from status.forms import IncidentCreateForm, IncidentUpdateCreateForm
import slack
import slack.chat
import logging
logger = logging.getLogger(__name__)
def send_to_slack(message, channel='engineering', username='statusbot', emoji=':statusbot:', override_debug=False):
slack.api_token = settings.SLACK_TOKEN
if settings.DEBUG and not override_debug:
logger.info('Diverting from %s to dev while in debug mode as %s: %s' % (channel, username, message))
slack.chat.post_message('dev', 'DEBUG: ' + message, username=username, icon_emoji=emoji)
else:
logger.info('Sending to channel %s as %s: %s' % (channel, username, message))
slack.chat.post_message(channel, message, username=username, icon_emoji=emoji)
def create_incident(request):
if request.method == 'POST':
form = IncidentCreateForm(request.POST)
form2 = IncidentUpdateCreateForm(request.POST)
if form.is_valid() and form2.is_valid():
i = form.save(commit=False)
i.user = request.user
print i
i.save()
f = form2.save(commit=False)
f.incident = i
f.user = request.user
f.save()
if settings.SLACK_CHANNEL and settings.SLACK_TOKEN:
if len(f.description) > 50:
description = f.description[:50] + '...'
else:
description = f.description
try:
message = "<https://%s%s|%s> (%s): %s" % (
get_current_site(request),
reverse('status:incident_detail', args=[i.pk, ]),
i.name,
f.status.name,
description
)
send_to_slack(message, username=settings.SLACK_USERNAME, channel=settings.SLACK_CHANNEL)
except Exception as e:
logger.warn('Unable to send to slack: %s' % (e))
return HttpResponseRedirect('/')
else:
form = IncidentCreateForm()
form2 = IncidentUpdateCreateForm()
request_context = RequestContext(request)
request_context.push({'form': form, 'form2': form2})
t = get_template('status/incident_create_form.html')
rendered_template = t.render(request_context.flatten(), request)
return HttpResponse(rendered_template)
#return get_template('status/incident_create_form.html').render(request_context.flatten(), request)
#return render(request, template_name='status/incident_create_form.html', context=request_context)
class DashboardView(ListView):
model = Incident
def get_queryset(self):
return Incident.objects.exclude(hidden=True)
class HiddenDashboardView(ListView):
model = Incident
class IncidentHideView(DeleteView):
model = Incident
template_name = 'status/incident_hide.html'
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.hidden = True
self.object.save()
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
return reverse('status:dashboard')
class IncidentDeleteView(DeleteView):
model = Incident
def get_success_url(self):
return reverse('status:dashboard')
class IncidentUpdateUpdateView(CreateView):
model = IncidentUpdate
form_class = IncidentUpdateCreateForm
template_name = 'status/incident_form.html'
def get_success_url(self):
return reverse('status:incident_detail', args=[self.kwargs['pk']])
def form_valid(self, form):
iu = form.save(commit=False)
i = Incident.objects.get(pk=self.kwargs['pk'])
i.hidden = False
i.save()
iu.incident = i
iu.incident.hidden = False
iu.incident.save()
iu.user = self.request.user
iu.save()
return HttpResponseRedirect(self.get_success_url())
class IncidentDetailView(DetailView):
model = Incident
@method_decorator(public)
def dispatch(self, *args, **kwargs):
return super(IncidentDetailView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(IncidentDetailView, self).get_context_data(**kwargs)
context.update({
'form': IncidentUpdateCreateForm(),
})
return context
class IncidentArchiveYearView(YearArchiveView):
make_object_list = True
queryset = Incident.objects.all()
date_field = 'updated'
@method_decorator(public)
def dispatch(self, *args, **kwargs):
return super(IncidentArchiveYearView, self).dispatch(*args, **kwargs)
class IncidentArchiveMonthView(MonthArchiveView):
make_object_list = True
queryset = Incident.objects.all()
date_field = 'updated'
month_format = '%m'
@method_decorator(public)
def dispatch(self, *args, **kwargs):
return super(IncidentArchiveMonthView, self).dispatch(*args, **kwargs)
class HomeView(TemplateView):
http_method_names = ['get', ]
template_name = 'status/home.html'
@method_decorator(public)
def dispatch(self, *args, **kwargs):
return super(HomeView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(HomeView, self).get_context_data(**kwargs)
incident_list = Incident.objects.filter(hidden=False).order_by('-updated')
context.update({
'incident_list': incident_list
})
if hasattr(settings, 'STATUS_TICKET_URL'):
context.update({'STATUS_TICKET_URL': settings.STATUS_TICKET_URL})
if hasattr(settings, 'STATUS_LOGO_URL'):
context.update({'STATUS_LOGO_URL': settings.STATUS_LOGO_URL})
if hasattr(settings, 'STATUS_TITLE'):
context.update({'STATUS_TITLE': settings.STATUS_TITLE})
status_level = 'success'
active_list = []
completed_list = []
for incident in incident_list:
try:
if incident.get_latest_update().status.type == 'danger':
status_level = 'danger'
active_list.append(incident)
elif incident.get_latest_update().status.type == 'warning':
if status_level != 'danger':
status_level = 'warning'
active_list.append(incident)
elif incident.get_latest_update().status.type == 'info':
if status_level not in ('warning', 'danger'):
status_level = 'info'
active_list.append(incident)
elif incident.get_latest_update().status.type == 'success':
completed_list.append(incident)
except AttributeError:
# Unable to get_latest_update(), 'None' has no .status
pass
context.update({
'status_level': status_level,
'active_list': active_list,
'completed_list': completed_list,
})
return context
| 35.135747
| 115
| 0.642112
|
1d946e3243fa8ec71be0b4cf173afac4faccb4bc
| 429
|
py
|
Python
|
twisted/plugins/lala_plugin.py
|
mineo/lala
|
dc5b516c6817187135b0371e45bd8e14bca0f6fc
|
[
"MIT"
] | null | null | null |
twisted/plugins/lala_plugin.py
|
mineo/lala
|
dc5b516c6817187135b0371e45bd8e14bca0f6fc
|
[
"MIT"
] | 5
|
2015-04-21T11:29:26.000Z
|
2018-08-08T21:07:38.000Z
|
twisted/plugins/lala_plugin.py
|
mineo/lala
|
dc5b516c6817187135b0371e45bd8e14bca0f6fc
|
[
"MIT"
] | null | null | null |
from zope.interface import implementer
from twisted.plugin import IPlugin
from twisted.application.service import IServiceMaker
from lala.main import LalaOptions, getService
@implementer(IServiceMaker, IPlugin)
class LalaServiceMaker(object):
tapname = "lala"
description = "IRC Bot"
options = LalaOptions
def makeService(self, options):
return getService(options)
serviceMaker = LalaServiceMaker()
| 22.578947
| 53
| 0.771562
|
de4b4efd6f4531f204ea573bbf2bbeb6743e8432
| 14,174
|
py
|
Python
|
src/gluonts/model/wavenet/_estimator.py
|
RingoIngo/gluon-ts
|
62fb20c36025fc969653accaffaa783671709564
|
[
"Apache-2.0"
] | 7
|
2021-07-20T21:46:28.000Z
|
2022-01-12T04:18:14.000Z
|
src/gluonts/model/wavenet/_estimator.py
|
RingoIngo/gluon-ts
|
62fb20c36025fc969653accaffaa783671709564
|
[
"Apache-2.0"
] | null | null | null |
src/gluonts/model/wavenet/_estimator.py
|
RingoIngo/gluon-ts
|
62fb20c36025fc969653accaffaa783671709564
|
[
"Apache-2.0"
] | 3
|
2021-08-28T06:01:27.000Z
|
2022-01-12T04:18:13.000Z
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
from functools import partial
from typing import List, Optional
import mxnet as mx
import numpy as np
from gluonts import transform
from gluonts.core.component import validated
from gluonts.dataset.common import DataEntry, Dataset
from gluonts.dataset.field_names import FieldName
from gluonts.dataset.loader import (
DataLoader,
TrainDataLoader,
ValidationDataLoader,
)
from gluonts.env import env
from gluonts.model.predictor import Predictor
from gluonts.model.wavenet._network import (
WaveNet,
WaveNetSampler,
WaveNetTraining,
)
from gluonts.mx.batchify import as_in_context, batchify
from gluonts.mx.model.estimator import GluonEstimator
from gluonts.mx.model.predictor import RepresentableBlockPredictor
from gluonts.mx.trainer import Trainer
from gluonts.mx.util import copy_parameters, get_hybrid_forward_input_names
from gluonts.itertools import maybe_len
from gluonts.time_feature import (
get_seasonality,
time_features_from_frequency_str,
)
from gluonts.transform import (
AddAgeFeature,
AddObservedValuesIndicator,
AddTimeFeatures,
AsNumpyArray,
Chain,
ExpectedNumInstanceSampler,
InstanceSampler,
InstanceSplitter,
SelectFields,
SetFieldIfNotPresent,
SimpleTransformation,
TestSplitSampler,
ValidationSplitSampler,
VstackFeatures,
)
class QuantizeScaled(SimpleTransformation):
"""
Rescale and quantize the target variable.
Requires
past_target and future_target fields.
The mean absolute value of the past_target is used to rescale past_target and future_target.
Then the bin_edges are used to quantize the rescaled target.
The calculated scale is included as a new field "scale"
"""
@validated()
def __init__(
self,
bin_edges: List[float],
past_target: str,
future_target: str,
scale: str = "scale",
):
self.bin_edges = np.array(bin_edges)
self.future_target = future_target
self.past_target = past_target
self.scale = scale
def transform(self, data: DataEntry) -> DataEntry:
p = data[self.past_target]
m = np.mean(np.abs(p))
scale = m if m > 0 else 1.0
data[self.future_target] = np.digitize(
data[self.future_target] / scale, bins=self.bin_edges, right=False
)
data[self.past_target] = np.digitize(
data[self.past_target] / scale, bins=self.bin_edges, right=False
)
data[self.scale] = np.array([scale])
return data
class WaveNetEstimator(GluonEstimator):
"""
Model with Wavenet architecture and quantized target.
Parameters
----------
freq
Frequency of the data to train on and predict
prediction_length
Length of the prediction horizon
trainer
Trainer object to be used (default: Trainer())
cardinality
Number of values of the each categorical feature (default: [1])
embedding_dimension
Dimension of the embeddings for categorical features (the same
dimension is used for all embeddings, default: 5)
num_bins
Number of bins used for quantization of signal (default: 1024)
hybridize_prediction_net
Boolean (default: False)
n_residue
Number of residual channels in wavenet architecture (default: 24)
n_skip
Number of skip channels in wavenet architecture (default: 32)
dilation_depth
Number of dilation layers in wavenet architecture.
If set to None (default), dialation_depth is set such that the receptive length is at least
as long as typical seasonality for the frequency and at least 2 * prediction_length.
n_stacks
Number of dilation stacks in wavenet architecture (default: 1)
temperature
Temparature used for sampling from softmax distribution.
For temperature = 1.0 (default) sampling is according to estimated probability.
act_type
Activation type used after before output layer (default: "elu").
Can be any of 'elu', 'relu', 'sigmoid', 'tanh', 'softrelu', 'softsign'.
num_parallel_samples
Number of evaluation samples per time series to increase parallelism during inference.
This is a model optimization that does not affect the accuracy (default: 200)
train_sampler
Controls the sampling of windows during training.
validation_sampler
Controls the sampling of windows during validation.
batch_size
The size of the batches to be used training and prediction.
"""
@validated()
def __init__(
self,
freq: str,
prediction_length: int,
trainer: Trainer = Trainer(
learning_rate=0.01,
epochs=200,
num_batches_per_epoch=50,
hybridize=False,
),
cardinality: List[int] = [1],
seasonality: Optional[int] = None,
embedding_dimension: int = 5,
num_bins: int = 1024,
hybridize_prediction_net: bool = False,
n_residue=24,
n_skip=32,
dilation_depth: Optional[int] = None,
n_stacks: int = 1,
train_window_length: Optional[int] = None,
temperature: float = 1.0,
act_type: str = "elu",
num_parallel_samples: int = 200,
train_sampler: Optional[InstanceSampler] = None,
validation_sampler: Optional[InstanceSampler] = None,
batch_size: int = 32,
negative_data: bool = False,
) -> None:
super().__init__(trainer=trainer, batch_size=batch_size)
self.freq = freq
self.prediction_length = prediction_length
self.cardinality = cardinality
self.embedding_dimension = embedding_dimension
self.num_bins = num_bins
self.hybridize_prediction_net = hybridize_prediction_net
self.n_residue = n_residue
self.n_skip = n_skip
self.n_stacks = n_stacks
self.train_window_length = (
train_window_length
if train_window_length is not None
else prediction_length
)
self.temperature = temperature
self.act_type = act_type
self.num_parallel_samples = num_parallel_samples
self.train_sampler = (
train_sampler
if train_sampler is not None
else ExpectedNumInstanceSampler(
num_instances=1.0, min_future=self.train_window_length
)
)
self.validation_sampler = (
validation_sampler
if validation_sampler is not None
else ValidationSplitSampler(min_future=self.train_window_length)
)
self.negative_data = negative_data
low = -10.0 if self.negative_data else 0
high = 10.0
bin_centers = np.linspace(low, high, self.num_bins)
bin_edges = np.concatenate(
[[-1e20], (bin_centers[1:] + bin_centers[:-1]) / 2.0, [1e20]]
)
self.bin_centers = bin_centers.tolist()
self.bin_edges = bin_edges.tolist()
seasonality = (
get_seasonality(
self.freq,
{
"H": 7 * 24,
"D": 7,
"W": 52,
"M": 12,
"B": 7 * 5,
"min": 24 * 60,
},
)
if seasonality is None
else seasonality
)
goal_receptive_length = max(
2 * seasonality, 2 * self.prediction_length
)
if dilation_depth is None:
d = 1
while (
WaveNet.get_receptive_field(
dilation_depth=d, n_stacks=n_stacks
)
< goal_receptive_length
):
d += 1
self.dilation_depth = d
else:
self.dilation_depth = dilation_depth
self.context_length = WaveNet.get_receptive_field(
dilation_depth=self.dilation_depth, n_stacks=n_stacks
)
self.logger = logging.getLogger(__name__)
self.logger.info(
"Using dilation depth %d and receptive field length %d",
self.dilation_depth,
self.context_length,
)
def create_transformation(self) -> transform.Transformation:
return Chain(
[
AsNumpyArray(field=FieldName.TARGET, expected_ndim=1),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
),
AddTimeFeatures(
start_field=FieldName.START,
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_TIME,
time_features=time_features_from_frequency_str(self.freq),
pred_length=self.prediction_length,
),
AddAgeFeature(
target_field=FieldName.TARGET,
output_field=FieldName.FEAT_AGE,
pred_length=self.prediction_length,
),
VstackFeatures(
output_field=FieldName.FEAT_TIME,
input_fields=[FieldName.FEAT_TIME, FieldName.FEAT_AGE],
),
SetFieldIfNotPresent(
field=FieldName.FEAT_STATIC_CAT, value=[0.0]
),
AsNumpyArray(field=FieldName.FEAT_STATIC_CAT, expected_ndim=1),
]
)
def _create_instance_splitter(self, mode: str):
assert mode in ["training", "validation", "test"]
instance_sampler = {
"training": self.train_sampler,
"validation": self.validation_sampler,
"test": TestSplitSampler(),
}[mode]
return InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
instance_sampler=instance_sampler,
past_length=self.context_length,
future_length=self.prediction_length
if mode == "test"
else self.train_window_length,
output_NTC=False,
time_series_fields=[
FieldName.FEAT_TIME,
FieldName.OBSERVED_VALUES,
],
) + QuantizeScaled(
bin_edges=self.bin_edges,
future_target="future_target",
past_target="past_target",
)
def create_training_data_loader(
self,
data: Dataset,
**kwargs,
) -> DataLoader:
input_names = get_hybrid_forward_input_names(WaveNetTraining)
with env._let(max_idle_transforms=maybe_len(data) or 0):
instance_splitter = self._create_instance_splitter("training")
return TrainDataLoader(
dataset=data,
transform=instance_splitter + SelectFields(input_names),
batch_size=self.batch_size,
stack_fn=partial(batchify, ctx=self.trainer.ctx, dtype=self.dtype),
decode_fn=partial(as_in_context, ctx=self.trainer.ctx),
**kwargs,
)
def create_validation_data_loader(
self,
data: Dataset,
**kwargs,
) -> DataLoader:
input_names = get_hybrid_forward_input_names(WaveNetTraining)
with env._let(max_idle_transforms=maybe_len(data) or 0):
instance_splitter = self._create_instance_splitter("validation")
return ValidationDataLoader(
dataset=data,
transform=instance_splitter + SelectFields(input_names),
batch_size=self.batch_size,
stack_fn=partial(batchify, ctx=self.trainer.ctx, dtype=self.dtype),
)
def _get_wavenet_args(self):
return dict(
n_residue=self.n_residue,
n_skip=self.n_skip,
dilation_depth=self.dilation_depth,
n_stacks=self.n_stacks,
act_type=self.act_type,
cardinality=self.cardinality,
embedding_dimension=self.embedding_dimension,
bin_values=self.bin_centers,
pred_length=self.prediction_length,
)
def create_training_network(self) -> WaveNetTraining:
params = self._get_wavenet_args()
params.update(pred_length=self.train_window_length)
return WaveNetTraining(**params)
def create_predictor(
self,
transformation: transform.Transformation,
trained_network: mx.gluon.HybridBlock,
) -> Predictor:
prediction_splitter = self._create_instance_splitter("test")
prediction_network = WaveNetSampler(
num_samples=self.num_parallel_samples,
temperature=self.temperature,
**self._get_wavenet_args(),
)
# The lookup layer is specific to the sampling network here
# we make sure it is initialized.
prediction_network.initialize()
copy_parameters(
net_source=trained_network,
net_dest=prediction_network,
allow_missing=True,
)
return RepresentableBlockPredictor(
input_transform=transformation + prediction_splitter,
prediction_net=prediction_network,
batch_size=self.batch_size,
freq=self.freq,
prediction_length=self.prediction_length,
ctx=self.trainer.ctx,
)
| 34.740196
| 99
| 0.627487
|
648c6e19f5538a069ce1143f6b7732db580f9fd3
| 12,900
|
py
|
Python
|
vase/insilico_filter.py
|
gantzgraf/vape
|
f939cb527d72d852cb0919a57332110c15c5fd4a
|
[
"MIT"
] | 4
|
2020-03-25T06:09:39.000Z
|
2021-03-23T11:22:00.000Z
|
vase/insilico_filter.py
|
gantzgraf/vape
|
f939cb527d72d852cb0919a57332110c15c5fd4a
|
[
"MIT"
] | 1
|
2020-10-02T14:50:30.000Z
|
2020-10-12T15:24:24.000Z
|
vase/insilico_filter.py
|
gantzgraf/vape
|
f939cb527d72d852cb0919a57332110c15c5fd4a
|
[
"MIT"
] | 1
|
2021-02-20T11:32:34.000Z
|
2021-02-20T11:32:34.000Z
|
import re
import os
vep_internal_pred_re = re.compile(r'\(\d(\.\d+)?\)')
# for removing numbers in brackets at end of PolyPhen, SIFT and Condel VEP
# annotations
class InSilicoFilter(object):
'''
Stores in silico prediction formats for VEP and indicates
whether a given variant consequence should be filtered on the
basis of the options provided on initialization. Data on the in
silico formats recognised are stored in the text file
"data/vep_insilico_pred.tsv"
'''
def __init__(self, programs, filter_unpredicted=False,
keep_if_any_damaging=False, pred_file=None):
'''
Initialize with a list of program names to use as filters.
Args:
programs: A list of in silico prediction programs to
use for filtering. These must be present in
the VEP annotations of a VcfRecord as added
either directly by VEP or via a VEP plugin
such as dbNSFP. You may also optionally
specify score criteria for filtering as in
the the following examples:
FATHMM_pred=D
MutationTaster_pred=A
MetaSVM_rankscore=0.8
Or you may just provide the program names
and the default 'damaging' prediction values
will be used, as listed in the prediction
data file (see pred_file argument).
The filter() function, which must be
provided with dict of VEP consequences to
values, will return False if ALL of the
programs provided here contain appropriate
prediction values or have no prediction.
This behaviour can be modified with the
keep_if_any_damaging/filter_unpredicted
arguments.
filter_unpredicted:
The default behaviour is to ignore a program
if there is no prediction given (i.e. the
score/pred is empty). That is, if there are
no predictions for any of the programs
filter() will return False, while if
predictions are missing for only some,
filtering will proceed as normal, ignoring
those programs with missing predictions. If
this argument is set to True, filter() will
return True if any program does not have a
prediction/score.
keep_if_any_damaging:
If set to True, filter() will return False
if ANY of the given programs has a
matching prediction/score unless
'filter_unpredicted' is True and a
prediction/score is missing for any program.
pred_file:
TSV file with a column for prediction
program names, valid/default filtering
values and type. Type can be 'valid',
'default' or 'regex' to indicate
acceptable and default filtering values for
string based comparisons or 'score' to
indicate that values are numeric (in this
case an optional fourth column may specify
'lower=damaging' if lower values are more
damaging than higher values). The 'regex'
type can be used to indicate acceptable
patterns but not default patterns. See the
default file for examples. A default value
for any annotation should always be
provided.
Default="data/vep_insilico_pred.tsv".
'''
self.filter_unpredicted = filter_unpredicted
self.keep_if_any_damaging = keep_if_any_damaging
self.pred_filters = {}
self.score_filters = {}
default_progs = {}
case_insensitive = {}
self.lower_more_damaging = set()
if pred_file is None:
pred_file = os.path.join(os.path.dirname(__file__), "data",
"vep_insilico_pred.tsv")
with open(pred_file, encoding='UTF-8') as insilico_d:
for line in insilico_d:
if line.startswith('#'):
continue
cols = line.rstrip().split('\t')
case_insensitive[cols[0].lower()] = cols[0]
if cols[0] in default_progs:
if cols[2] == 'regex':
value = re.compile(cols[1])
if 'valid' not in default_progs[cols[0]]:
default_progs[cols[0]]['valid'] = []
else:
value = cols[1]
if cols[2] not in default_progs[cols[0]]:
default_progs[cols[0]][cols[2]] = [value]
elif cols[2] != 'score' :
default_progs[cols[0]][cols[2]].append(value)
else:
raise RuntimeError("Error in {}:".format(pred_file) +
" Should only have one entry for " +
"score prediction '{}'"
.format(cols[0]))
else:
if cols[2] == 'score':
default_progs[cols[0]] = {'type' : 'score',
'default' : float(cols[1])}
else:
default_progs[cols[0]] = {'type' : 'pred'}
if cols[2] == 'regex':
default_progs[cols[0]]['regex'] = re.compile(
cols[2])
if 'valid' not in default_progs[cols[0]]:
default_progs[cols[0]]['valid'] = []
else:
default_progs[cols[0]][cols[2]] = [cols[1]]
if len(cols) >= 4:
if cols[3] == 'lower=damaging':
self.lower_more_damaging.add(cols[0])
for prog in programs:
split = prog.split('=')
pred = None
if len(split) > 1:
prog = split[0]
pred = split[1]
if prog.lower() in case_insensitive:
prog = case_insensitive[prog.lower()]
else:
raise RuntimeError("ERROR: in silico prediction program '{}' "
.format(prog) + "not recognised.")
if pred is not None:
if default_progs[prog]['type'] == 'score':
try:
score = float(pred)
self.score_filters[prog] = score
except ValueError:
raise RuntimeError("ERROR: {} score must be numeric. "
.format(prog) + "Could not " +
"convert value '{}' to a number."
.format(pred))
elif (pred in default_progs[prog]['default'] or
pred in default_progs[prog]['valid']):
if prog in self.pred_filters:
self.pred_filters[prog].add(pred)
else:
self.pred_filters[prog] = set([pred])
elif 'regex' in default_progs[prog]:
re_matched = False
for regex in default_progs[prog]['regex']:
if regex.match(pred):
if prog in self.pred_filters:
self.pred_filters[prog].add(pred)
else:
self.pred_filters[prog] = set([pred])
re_matched = True
break
if not re_matched:
raise RuntimeError("ERROR: score '{}' ".format(pred) +
"not recognised as valid for in " +
"silico prediction program '{}' "
.format(prog))
else:
raise RuntimeError("ERROR: score '{}' not " .format(pred) +
"recognised as valid for in silico " +
"prediction program '{}' ".format(prog))
else:
if default_progs[prog]['type'] == 'score':
score = float(default_progs[prog]['default'])
self.score_filters[prog] = score
else:
self.pred_filters[prog] = default_progs[prog]['default']
self._n_prog = len(self.pred_filters) + len(self.score_filters)
def filter(self, csq):
'''
Returns False if prediction matches filters for given
consequence, otherwise returns True.
Args:
csq: dict of VEP consequence fields to values, as
provided by the CSQ property of a VcfRecord object.
'''
unpredicted = 0
for prog in self.pred_filters:
try:
if csq[prog] != '':
do_filter = True
for p in csq[prog].split('&'):
p = vep_internal_pred_re.sub('', p)
if p in self.pred_filters[prog]: #matches, don't filter
do_filter = False
break
if self.keep_if_any_damaging:
if not do_filter: #matched
return False
elif do_filter: #haven't matched - filter
return True
else:
if self.filter_unpredicted:
return True
unpredicted += 1
except KeyError:
raise RuntimeError(self._get_prog_missing_string(prog))
for prog in self.score_filters:
try:
if csq[prog] == '':
if self.filter_unpredicted:
return True
unpredicted += 1
else:
do_filter = True
for p in csq[prog].split('&'):
try:
score = float(p)
except ValueError:
continue
if prog in self.lower_more_damaging:
if score <= self.score_filters[prog]: #
do_filter = False
break
else:
if score >= self.score_filters[prog]: #
do_filter = False
break
if self.keep_if_any_damaging:
if not do_filter:
return False
elif do_filter:
return True #score not over threshold - filter
except KeyError:
raise RuntimeError(self._get_prog_missing_string(prog))
if self.keep_if_any_damaging:
#would have already returned False if anything passed filters
if not self.filter_unpredicted and unpredicted == self._n_prog:
#everything unpredicted - do not filter
return False
#filter - no damaging pred & at least 1 prog with non-missing score
return True
return False
def _get_prog_missing_string(self, prog):
return ("'{}' in silico filter program is not present in".format(prog) +
" CSQ field of input VCF - please ensure your input was " +
"annotated with the relevant program by VEP.")
| 48.679245
| 80
| 0.445349
|
0d73993b1bcb99b2fed97a72827c53886cc2c7a2
| 596
|
py
|
Python
|
discord_bot_eternal_dice/discord_bot_eternal_dice/model/discord_member.py
|
lesteenman/discord-bot-eternal-dice
|
717d7ac10b8dbc3bef5d96dec417fd74154241e3
|
[
"MIT"
] | null | null | null |
discord_bot_eternal_dice/discord_bot_eternal_dice/model/discord_member.py
|
lesteenman/discord-bot-eternal-dice
|
717d7ac10b8dbc3bef5d96dec417fd74154241e3
|
[
"MIT"
] | null | null | null |
discord_bot_eternal_dice/discord_bot_eternal_dice/model/discord_member.py
|
lesteenman/discord-bot-eternal-dice
|
717d7ac10b8dbc3bef5d96dec417fd74154241e3
|
[
"MIT"
] | null | null | null |
from typing import Dict
class DiscordMember:
def __init__(self, username: str = None, user_id: int = None, nickname: str = None):
self.username = username
self.user_id = user_id
self.nickname = nickname
@property
def name(self):
if self.nickname is not None:
return self.nickname
return self.username
def member_from_data(member_data: Dict) -> DiscordMember:
return DiscordMember(
username=member_data['user']['username'],
user_id=int(member_data['user']['id']),
nickname=member_data['nick'],
)
| 25.913043
| 88
| 0.637584
|
96a610116a38c1c36fadc724ab01898fe2887136
| 1,088
|
py
|
Python
|
tensor__cpu/matplot/try_fft.py
|
Zhang-O/small
|
bfb41b2267159bd5e408dba524713d3bc0b28074
|
[
"MIT"
] | 1
|
2017-09-25T03:16:00.000Z
|
2017-09-25T03:16:00.000Z
|
tensor__cpu/matplot/try_fft.py
|
Zhang-O/small
|
bfb41b2267159bd5e408dba524713d3bc0b28074
|
[
"MIT"
] | null | null | null |
tensor__cpu/matplot/try_fft.py
|
Zhang-O/small
|
bfb41b2267159bd5e408dba524713d3bc0b28074
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.fftpack import fft,ifft
import matplotlib.pyplot as plt
import seaborn
#采样点选择1400个,因为设置的信号频率分量最高为600赫兹,根据采样定理知采样频率要大于信号频率2倍,所以这里设置采样频率为1400赫兹(即一秒内有1400个采样点,一样意思的)
x=np.linspace(0,1,1400)
#设置需要采样的信号,频率分量有180,390和600
y=7*np.sin(2*np.pi*180*x) + 2.8*np.sin(2*np.pi*390*x)+5.1*np.sin(2*np.pi*600*x)
yy=fft(y) #快速傅里叶变换
yreal = yy.real # 获取实数部分
yimag = yy.imag # 获取虚数部分
yf=abs(fft(y)) # 取绝对值
yf1=abs(fft(y))/len(x) #归一化处理
yf2 = yf1[range(int(len(x)/2))] #由于对称性,只取一半区间
xf = np.arange(len(y)) # 频率
xf1 = xf
xf2 = xf[range(int(len(x)/2))] #取一半区间
plt.subplot(221)
plt.plot(x[0:50],y[0:50])
plt.title('Original wave')
plt.subplot(222)
plt.plot(xf,yf,'r')
plt.title('FFT of Mixed wave(two sides frequency range)',fontsize=7,color='#7A378B') #注意这里的颜色可以查询颜色代码表
plt.subplot(223)
plt.plot(xf1,yf1,'g')
plt.title('FFT of Mixed wave(normalization)',fontsize=9,color='r')
plt.subplot(224)
plt.plot(xf2,yf2,'b')
plt.title('FFT of Mixed wave)',fontsize=10,color='#F08080')
plt.show()
| 25.302326
| 103
| 0.659007
|
c45558c6b4bd35a8045ab10b61397e634b5497f5
| 790
|
py
|
Python
|
backend/images/migrations/0012_auto_20180412_2155.py
|
vframeio/vcat
|
554e94a819818131ddf5ec3db9f0ca4d2795313e
|
[
"MIT"
] | 5
|
2018-11-26T07:43:39.000Z
|
2020-03-09T08:01:39.000Z
|
backend/images/migrations/0012_auto_20180412_2155.py
|
vframeio/_vcat_archived
|
554e94a819818131ddf5ec3db9f0ca4d2795313e
|
[
"MIT"
] | null | null | null |
backend/images/migrations/0012_auto_20180412_2155.py
|
vframeio/_vcat_archived
|
554e94a819818131ddf5ec3db9f0ca4d2795313e
|
[
"MIT"
] | 1
|
2020-04-30T11:16:04.000Z
|
2020-04-30T11:16:04.000Z
|
# Generated by Django 2.0.1 on 2018-04-12 19:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('images', '0011_auto_20180409_1811'),
]
operations = [
migrations.AlterField(
model_name='image',
name='image_group',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='images', to='images.ImageGroup'),
),
migrations.AlterField(
model_name='imagegroup',
name='assigned_to',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| 30.384615
| 139
| 0.655696
|
188144d9a7c99fdd8c528c63524feb1c027d83da
| 11,909
|
py
|
Python
|
manim/utils/color.py
|
janLuke/manim
|
18bab2075f7c9987cbba7b173c3b02971b78a560
|
[
"MIT"
] | 1
|
2021-07-03T14:18:38.000Z
|
2021-07-03T14:18:38.000Z
|
manim/utils/color.py
|
janLuke/manim
|
18bab2075f7c9987cbba7b173c3b02971b78a560
|
[
"MIT"
] | 3
|
2020-07-14T02:46:11.000Z
|
2020-09-09T15:15:55.000Z
|
manim/utils/color.py
|
janLuke/manim
|
18bab2075f7c9987cbba7b173c3b02971b78a560
|
[
"MIT"
] | null | null | null |
"""Colors and utility functions for conversion between different color models."""
__all__ = [
"color_to_rgb",
"color_to_rgba",
"rgb_to_color",
"rgba_to_color",
"rgb_to_hex",
"hex_to_rgb",
"invert_color",
"color_to_int_rgb",
"color_to_int_rgba",
"color_gradient",
"interpolate_color",
"average_color",
"random_bright_color",
"random_color",
"get_shaded_rgb",
]
import random
from enum import Enum
from typing import Iterable, List, Union
import numpy as np
from colour import Color
from ..utils.bezier import interpolate
from ..utils.simple_functions import clip_in_place
from ..utils.space_ops import normalize
class Colors(Enum):
"""A list of pre-defined colors.
Examples
--------
.. manim:: ColorsOverview
:save_last_frame:
:hide_source:
from manim.utils.color import Colors
class ColorsOverview(Scene):
def construct(self):
def color_group(color):
group = VGroup(
*[
Line(ORIGIN, RIGHT * 1.5, stroke_width=35, color=Colors[name].value)
for name in subnames(color)
]
).arrange_submobjects(buff=0.4, direction=DOWN)
name = Text(color).scale(0.6).next_to(group, UP, buff=0.3)
if any(decender in color for decender in "gjpqy"):
name.shift(DOWN * 0.08)
group.add(name)
return group
def subnames(name):
return [name + "_" + char for char in "abcde"]
color_groups = VGroup(
*[
color_group(color)
for color in [
"blue",
"teal",
"green",
"yellow",
"gold",
"red",
"maroon",
"purple",
]
]
).arrange_submobjects(buff=0.2, aligned_edge=DOWN)
for line, char in zip(color_groups[0], "abcde"):
color_groups.add(Text(char).scale(0.6).next_to(line, LEFT, buff=0.2))
def named_lines_group(length, colors, names, text_colors, align_to_block):
lines = VGroup(
*[
Line(
ORIGIN,
RIGHT * length,
stroke_width=55,
color=Colors[color].value,
)
for color in colors
]
).arrange_submobjects(buff=0.6, direction=DOWN)
for line, name, color in zip(lines, names, text_colors):
line.add(Text(name, color=color).scale(0.6).move_to(line))
lines.next_to(color_groups, DOWN, buff=0.5).align_to(
color_groups[align_to_block], LEFT
)
return lines
other_colors = (
"pink",
"light_pink",
"orange",
"light_brown",
"dark_brown",
"gray_brown",
)
other_lines = named_lines_group(
3.2,
other_colors,
other_colors,
[BLACK] * 4 + [WHITE] * 2,
0,
)
gray_lines = named_lines_group(
6.6,
["white"] + subnames("gray") + ["black"],
[
"white",
"lighter_gray / gray_a",
"light_gray / gray_b",
"gray / gray_c",
"dark_gray / gray_d",
"darker_gray / gray_e",
"black",
],
[BLACK] * 3 + [WHITE] * 4,
2,
)
pure_colors = (
"pure_red",
"pure_green",
"pure_blue",
)
pure_lines = named_lines_group(
3.2,
pure_colors,
pure_colors,
[BLACK, BLACK, WHITE],
6,
)
self.add(color_groups, other_lines, gray_lines, pure_lines)
VGroup(*self.mobjects).move_to(ORIGIN)
The preferred way of using these colors is by importing their constants from manim:
.. code-block:: pycon
>>> from manim import RED, GREEN, BLUE
>>> RED
'#FC6255'
Note this way uses the name of the colors in UPPERCASE.
Alternatively, you can also import this Enum directly and use its members
directly, through the use of :code:`color.value`. Note this way uses the
name of the colors in lowercase.
.. code-block:: pycon
>>> from manim.utils.color import Colors
>>> Colors.red.value
'#FC6255'
.. note::
The colors of type "C" have an alias equal to the colorname without a letter,
e.g. GREEN = GREEN_C
"""
white = "#FFFFFF"
gray_a = "#DDDDDD"
gray_b = "#BBBBBB"
gray_c = "#888888"
gray_d = "#444444"
gray_e = "#222222"
black = "#000000"
lighter_gray = gray_a
light_gray = gray_b
gray = gray_c
dark_gray = gray_d
darker_gray = gray_e
blue_a = "#C7E9F1"
blue_b = "#9CDCEB"
blue_c = "#58C4DD"
blue_d = "#29ABCA"
blue_e = "#236B8E"
pure_blue = "#0000FF"
blue = blue_c
teal_a = "#ACEAD7"
teal_b = "#76DDC0"
teal_c = "#5CD0B3"
teal_d = "#55C1A7"
teal_e = "#49A88F"
teal = teal_c
green_a = "#C9E2AE"
green_b = "#A6CF8C"
green_c = "#83C167"
green_d = "#77B05D"
green_e = "#699C52"
pure_green = "#00FF00"
green = green_c
yellow_a = "#FFF1B6"
yellow_b = "#FFEA94"
yellow_c = "#FFFF00"
yellow_d = "#F4D345"
yellow_e = "#E8C11C"
yellow = yellow_c
gold_a = "#F7C797"
gold_b = "#F9B775"
gold_c = "#F0AC5F"
gold_d = "#E1A158"
gold_e = "#C78D46"
gold = gold_c
red_a = "#F7A1A3"
red_b = "#FF8080"
red_c = "#FC6255"
red_d = "#E65A4C"
red_e = "#CF5044"
pure_red = "#FF0000"
red = red_c
maroon_a = "#ECABC1"
maroon_b = "#EC92AB"
maroon_c = "#C55F73"
maroon_d = "#A24D61"
maroon_e = "#94424F"
maroon = maroon_c
purple_a = "#CAA3E8"
purple_b = "#B189C6"
purple_c = "#9A72AC"
purple_d = "#715582"
purple_e = "#644172"
purple = purple_c
pink = "#D147BD"
light_pink = "#DC75CD"
orange = "#FF862F"
light_brown = "#CD853F"
dark_brown = "#8B4513"
gray_brown = "#736357"
# Create constants from Colors enum
constants_names = []
for name, value in Colors.__members__.items():
name = name.upper()
value = value.value
constants_names.append(name)
locals()[name] = value
if "GRAY" in name:
name = name.replace("GRAY", "GREY")
locals()[name] = value
constants_names.append(name)
# Add constants to module exports. Simply adding constants_names would work fine, but
# would make it hard for IDEs to understand that colors are exported. Therefor the
# result of the following print statement is added instead.
# print(constants_names)
__all__ += [ # noqa: F822 # used to stop flake8 from complaining about undefined vars
"WHITE",
"GRAY_A",
"GREY_A",
"GRAY_B",
"GREY_B",
"GRAY_C",
"GREY_C",
"GRAY_D",
"GREY_D",
"GRAY_E",
"GREY_E",
"BLACK",
"LIGHTER_GRAY",
"LIGHTER_GREY",
"LIGHT_GRAY",
"LIGHT_GREY",
"GRAY",
"GREY",
"DARK_GRAY",
"DARK_GREY",
"DARKER_GRAY",
"DARKER_GREY",
"BLUE_A",
"BLUE_B",
"BLUE_C",
"BLUE_D",
"BLUE_E",
"PURE_BLUE",
"BLUE",
"TEAL_A",
"TEAL_B",
"TEAL_C",
"TEAL_D",
"TEAL_E",
"TEAL",
"GREEN_A",
"GREEN_B",
"GREEN_C",
"GREEN_D",
"GREEN_E",
"PURE_GREEN",
"GREEN",
"YELLOW_A",
"YELLOW_B",
"YELLOW_C",
"YELLOW_E",
"YELLOW_D",
"YELLOW",
"GOLD_A",
"GOLD_B",
"GOLD_C",
"GOLD_D",
"GOLD_E",
"GOLD",
"RED_A",
"RED_B",
"RED_C",
"RED_D",
"RED_E",
"PURE_RED",
"RED",
"MAROON_A",
"MAROON_B",
"MAROON_C",
"MAROON_D",
"MAROON_E",
"MAROON",
"PURPLE_A",
"PURPLE_B",
"PURPLE_C",
"PURPLE_D",
"PURPLE_E",
"PURPLE",
"PINK",
"LIGHT_PINK",
"ORANGE",
"LIGHT_BROWN",
"DARK_BROWN",
"GRAY_BROWN",
"GREY_BROWN",
]
def color_to_rgb(color: Union[Color, str]) -> np.ndarray:
if isinstance(color, str):
return hex_to_rgb(color)
elif isinstance(color, Color):
return np.array(color.get_rgb())
else:
raise ValueError("Invalid color type")
def color_to_rgba(color: Union[Color, str], alpha: float = 1) -> np.ndarray:
return np.array([*color_to_rgb(color), alpha])
def rgb_to_color(rgb: Iterable[float]) -> Color:
return Color(rgb=rgb)
def rgba_to_color(rgba: Iterable[float]) -> Color:
return rgb_to_color(rgba[:3])
def rgb_to_hex(rgb: Iterable[float]) -> str:
return "#" + "".join("%02x" % int(255 * x) for x in rgb)
def hex_to_rgb(hex_code: str) -> np.ndarray:
hex_part = hex_code[1:]
if len(hex_part) == 3:
hex_part = "".join([2 * c for c in hex_part])
return np.array([int(hex_part[i : i + 2], 16) / 255 for i in range(0, 6, 2)])
def invert_color(color: Color) -> Color:
return rgb_to_color(1.0 - color_to_rgb(color))
def color_to_int_rgb(color: Color) -> np.ndarray:
return (255 * color_to_rgb(color)).astype("uint8")
def color_to_int_rgba(color: Color, opacity: float = 1.0) -> np.ndarray:
alpha = int(255 * opacity)
return np.append(color_to_int_rgb(color), alpha)
def color_gradient(
reference_colors: Iterable[Color], length_of_output: int
) -> List[Color]:
if length_of_output == 0:
return reference_colors[0]
rgbs = list(map(color_to_rgb, reference_colors))
alphas = np.linspace(0, (len(rgbs) - 1), length_of_output)
floors = alphas.astype("int")
alphas_mod1 = alphas % 1
# End edge case
alphas_mod1[-1] = 1
floors[-1] = len(rgbs) - 2
return [
rgb_to_color(interpolate(rgbs[i], rgbs[i + 1], alpha))
for i, alpha in zip(floors, alphas_mod1)
]
def interpolate_color(color1: Color, color2: Color, alpha: float) -> Color:
rgb = interpolate(color_to_rgb(color1), color_to_rgb(color2), alpha)
return rgb_to_color(rgb)
def average_color(*colors: Color) -> Color:
rgbs = np.array(list(map(color_to_rgb, colors)))
mean_rgb = np.apply_along_axis(np.mean, 0, rgbs)
return rgb_to_color(mean_rgb)
def random_bright_color() -> Color:
color = random_color()
curr_rgb = color_to_rgb(color)
new_rgb = interpolate(curr_rgb, np.ones(len(curr_rgb)), 0.5)
return Color(rgb=new_rgb)
def random_color() -> Color:
return random.choice([c.value for c in list(Colors)])
def get_shaded_rgb(
rgb: np.ndarray,
point: np.ndarray,
unit_normal_vect: np.ndarray,
light_source: np.ndarray,
) -> np.ndarray:
to_sun = normalize(light_source - point)
factor = 0.5 * np.dot(unit_normal_vect, to_sun) ** 3
if factor < 0:
factor *= 0.5
result = rgb + factor
clip_in_place(rgb + factor, 0, 1)
return result
| 25.777056
| 96
| 0.522882
|
d81ef189225a2b4c6ac29b0b4b10a9863dd869d4
| 2,833
|
py
|
Python
|
lex_parser.py
|
hanshiyi/POSBERT
|
4d290945371b0043b3370459b9436364311320f8
|
[
"MIT"
] | 3
|
2019-12-24T20:06:17.000Z
|
2020-01-15T19:13:29.000Z
|
lex_parser.py
|
hanshiyi/POSBERT
|
4d290945371b0043b3370459b9436364311320f8
|
[
"MIT"
] | null | null | null |
lex_parser.py
|
hanshiyi/POSBERT
|
4d290945371b0043b3370459b9436364311320f8
|
[
"MIT"
] | null | null | null |
from nltk.parse.stanford import StanfordParser
# parser = StanfordParser()
from nltk.parse.corenlp import CoreNLPParser
from bert_tokenization import BasicTokenizer
# If you want to parse the id
# open terminal and
# cd POSBERT/
# wget http://nlp.stanford.edu/software/stanford-corenlp-full-2018-10-05.zip
# unzip stanford-corenlp-full-2018-10-05.zip
# cd stanford-corenlp-full-2018-10-05
# java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -preload tokenize,ssplit,pos,lemma,ner,parse,depparse -status_port 9000 -port 9000 -timeout 15000 &
# Then run this scipt
from typing import Union
class Lex_parser:
def __init__(self, tag_id_initialized=False, tag_id=None, uncased=True):
self.uncased=uncased
self.tag_id_initialized = tag_id_initialized
if tag_id_initialized:
self.tag_to_id = tag_id
else:
self.tag_to_id = {"CLSSEP": 0, "UNKNOWN": 1}
self.parser = CoreNLPParser(url='http://localhost:9000', tagtype='pos')
self.basic_tokenizer = BasicTokenizer()
def tokenize(self, sentence):
return list(self.parser.tokenize(sentence))
def convert_sentence_to_tags(self, sentence: Union[str, list]):
if type(sentence) == str:
if self.uncased:
sentence = sentence.lower()
else:
sentence = " ".join(sentence)
if self.uncased:
sentence = sentence.lower()
sentence = self.basic_tokenizer.tokenize(sentence)
# print("sentence here,", sentence)
sentence = list(map(lambda x: x.upper() if x == 'i' else x, sentence))
tags = self.parser.tag(sentence)
# print("sentence here,", sentence)
# print("tags here", tags)
# exit(-2)
if not self.tag_id_initialized:
for tag in tags:
if tag[1] not in self.tag_to_id:
self.tag_to_id[tag[1]] = len(self.tag_to_id)
return tags
def convert_tags_to_ids(self, tags):
res = list(map(lambda x: self.tag_to_id[x[1]], tags))
# print("to ids ==")
# print(len(tags), tags)
# print(len(res), res)
return res
def convert_sentence_to_ids(self, sentence: Union[str, list]):
if not self.parser:
self.parser = CoreNLPParser(url='http://localhost:9000', tagtype='pos')
tags = self.convert_sentence_to_tags(sentence)
ids = self.convert_tags_to_ids(tags)
print(type(sentence), len(sentence), len(tags), len(ids))
return list(ids)
if __name__ == "__main__":
lex_parser = Lex_parser()
print(list(lex_parser.convert_sentence_to_tags("The price of car is N, "" which is unaffordable. <eos>")))
print(list(lex_parser.convert_sentence_to_ids("The price of car is N, which is unaffordable.")))
| 35.4125
| 168
| 0.646664
|
ee0d15d52aec93278d38c8a20d55f26676463221
| 1,708
|
py
|
Python
|
Multitask-CNN-RNN/torchsampler/imbalanced_VA.py
|
wtomin/Multitask-Emotion-Recognition-with-Incomplete-Labels
|
e6df7ffc9b0318fdce405e40993c79785b47c785
|
[
"MIT"
] | 74
|
2020-03-08T15:29:00.000Z
|
2022-03-05T14:57:33.000Z
|
Multitask-CNN/torchsampler/imbalanced_VA.py
|
HKUST-NISL/Multitask-Emotion-Recognition-with-Incomplete-Labels
|
ac5152b7a4b9c6c54d13c06c9302270350e8ce3f
|
[
"MIT"
] | 19
|
2020-03-06T08:56:51.000Z
|
2022-03-27T05:07:35.000Z
|
Multitask-CNN/torchsampler/imbalanced_VA.py
|
wtomin/Multitask-Emotion-Recognition-with-Incomplete-Labels
|
e6df7ffc9b0318fdce405e40993c79785b47c785
|
[
"MIT"
] | 23
|
2020-03-20T08:19:55.000Z
|
2022-03-16T17:40:09.000Z
|
import torch
import torch.utils.data
import torchvision
from tqdm import tqdm
import numpy as np
import random
class ImbalancedDatasetSampler_VA(torch.utils.data.sampler.Sampler):
"""Samples elements randomly from a given list of indices for imbalanced dataset
Arguments:
indices (list, optional): a list of indices
num_samples (int, optional): number of samples to draw
callback_get_label func: a callback-like function which takes two arguments - dataset and index
"""
def __init__(self, dataset, indices=None, num_samples=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
all_labels = dataset._get_all_label()
N, C = all_labels.shape
assert C == 2
hist, x_edges, y_edges = np.histogram2d(all_labels[:, 0], all_labels[:, 1], bins=[20, 20])
x_bin_id = np.digitize( all_labels[:, 0], bins = x_edges) - 1
y_bin_id = np.digitize( all_labels[:, 1], bins = y_edges) - 1
# for value beyond the edges, the function returns len(digitize_num), but it needs to be replaced by len(edges)-1
x_bin_id[x_bin_id==20] = 20-1
y_bin_id[y_bin_id==20] = 20-1
weights = []
for x, y in zip(x_bin_id, y_bin_id):
assert hist[x, y]!=0
weights += [1 / hist[x, y]]
self.weights = torch.DoubleTensor(weights)
def __iter__(self):
return (self.indices[i] for i in torch.multinomial(
self.weights, self.num_samples, replacement=True))
def __len__(self):
return self.num_samples
| 33.490196
| 115
| 0.719555
|
00ce4c4cc328216664fa63037689900cd2646d55
| 9,328
|
py
|
Python
|
tensorflow_probability/python/bijectors/gev_cdf.py
|
gisilvs/probability
|
fd8be3ca1243f956578bf1b1280f9d3ed13541f0
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/bijectors/gev_cdf.py
|
gisilvs/probability
|
fd8be3ca1243f956578bf1b1280f9d3ed13541f0
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/bijectors/gev_cdf.py
|
gisilvs/probability
|
fd8be3ca1243f956578bf1b1280f9d3ed13541f0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""GeneralizedExtremeValue bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector
from tensorflow_probability.python.bijectors import softplus as softplus_bijector
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import tensor_util
__all__ = [
'GeneralizedExtremeValueCDF',
]
@bijector.auto_composite_tensor_bijector
class GeneralizedExtremeValueCDF(bijector.AutoCompositeTensorBijector):
"""Compute the GeneralizedExtremeValue CDF.
Compute `Y = g(X) = exp(-t(X))`,
where `t(x)` is defined to be:
*`(1 + conc * (x - loc) / scale) ) ** (-1 / conc)` when `conc != 0`;
*`exp(-(x - loc) / scale)` when `conc = 0`.
This bijector maps inputs from the domain to `[0, 1]`, where the domain is
* [loc - scale/conc, inf) when conc > 0;
* (-inf, loc - scale/conc] when conc < 0;
* (-inf, inf) when conc = 0;
The inverse of the bijector applied to a uniform random variable
`X ~ U(0, 1)` gives back a random variable with the
[Generalized extreme value distribution](
https://https://en.wikipedia.org/wiki/Generalized_extreme_value_distribution):
When `concentration -> +-inf`, the probability mass concentrates near `loc`.
```none
Y ~ GeneralizedExtremeValueCDF(loc, scale, conc)
pdf(y; loc, scale, conc) = t(y; loc, scale, conc) ** (1 + conc) * exp(
- t(y; loc, scale, conc) ) / scale
where t(x) =
* (1 + conc * (x - loc) / scale) ) ** (-1 / conc) when conc != 0;
* exp(-(x - loc) / scale) when conc = 0.
```
"""
def __init__(self,
loc=0.,
scale=1.,
concentration=0,
validate_args=False,
name='generalizedextremevalue_cdf'):
"""Instantiates the `GeneralizedExtremeValueCDF` bijector.
Args:
loc: Float-like `Tensor` that is the same dtype and is broadcastable with
`scale` and `concentration`.
scale: Positive Float-like `Tensor` that is the same dtype and is
broadcastable with `loc` and `concentration`.
concentration: Nonzero float-like `Tensor` that is the same dtype and is
broadcastable with `loc` and `scale`.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([loc, scale, concentration],
dtype_hint=tf.float32)
self._loc = tensor_util.convert_nonref_to_tensor(
loc, dtype=dtype, name='loc')
self._scale = tensor_util.convert_nonref_to_tensor(
scale, dtype=dtype, name='scale')
self._concentration = tensor_util.convert_nonref_to_tensor(
concentration, dtype=dtype, name='concentration')
super(GeneralizedExtremeValueCDF, self).__init__(
validate_args=validate_args,
forward_min_event_ndims=0,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype):
return dict(
loc=parameter_properties.ParameterProperties(),
scale=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))),
concentration=parameter_properties.ParameterProperties(
default_constraining_bijector_fn=(
lambda: softplus_bijector.Softplus(low=dtype_util.eps(dtype)))))
@property
def loc(self):
"""The location parameter in the Generalized Extreme Value CDF."""
return self._loc
@property
def scale(self):
"""The scale parameter in the Generalized Extreme Value CDF."""
return self._scale
@property
def concentration(self):
"""The concentration parameter in the Generalized Extreme Value CDF."""
return self._concentration
@classmethod
def _is_increasing(cls):
return True
def _forward(self, x):
loc = tf.convert_to_tensor(self.loc)
scale = tf.convert_to_tensor(self.scale)
conc = tf.convert_to_tensor(self.concentration)
with tf.control_dependencies(
self._maybe_assert_valid_x(
x, loc=loc, scale=scale, concentration=conc)):
z = (x - loc) / scale
equal_zero = tf.equal(conc, 0.)
# deal with case that gradient is N/A when conc = 0
safe_conc = tf.where(equal_zero, tf.ones_like(conc), conc)
t = tf.where(
equal_zero, tf.math.exp(-z),
tf.math.exp(-tf.math.log1p(z * safe_conc) / safe_conc))
return tf.exp(-t)
def _inverse(self, y):
with tf.control_dependencies(self._maybe_assert_valid_y(y)):
t = -tf.math.log(y)
conc = tf.convert_to_tensor(self.concentration)
equal_zero = tf.equal(conc, 0.)
# deal with case that gradient is N/A when conc = 0
safe_conc = tf.where(equal_zero, tf.ones_like(conc), conc)
z = tf.where(
equal_zero, -tf.math.log(t),
tf.math.expm1(-tf.math.log(t) * safe_conc) / safe_conc)
return self.loc + self.scale * z
def _forward_log_det_jacobian(self, x):
loc = tf.convert_to_tensor(self.loc)
scale = tf.convert_to_tensor(self.scale)
conc = tf.convert_to_tensor(self.concentration)
with tf.control_dependencies(
self._maybe_assert_valid_x(
x, loc=loc, scale=scale, concentration=conc)):
z = (x - loc) / scale
equal_zero = tf.equal(conc, 0.)
# deal with case that gradient is N/A when conc = 0
safe_conc = tf.where(equal_zero, tf.ones_like(conc), conc)
log_t = tf.where(
equal_zero, -z,
-tf.math.log1p(z * safe_conc) / safe_conc)
return (tf.math.multiply_no_nan(conc + 1., log_t) -
tf.math.exp(log_t) - tf.math.log(scale))
def _inverse_log_det_jacobian(self, y):
with tf.control_dependencies(self._maybe_assert_valid_y(y)):
t = -tf.math.log(y)
log_dt = tf.math.xlogy(-self.concentration - 1., t)
return tf.math.log(self.scale / y) + log_dt
def _maybe_assert_valid_x(self, x, loc=None, scale=None, concentration=None):
if not self.validate_args:
return []
loc = tf.convert_to_tensor(self.loc) if loc is None else loc
scale = tf.convert_to_tensor(self.scale) if scale is None else scale
concentration = (
tf.convert_to_tensor(self.concentration) if concentration is None else
concentration)
# We intentionally compute the boundary with (1.0 / concentration) * scale
# instead of just scale / concentration.
# Why? The sampler returns loc + (foo / concentration) * scale,
# and at high-ish values of concentration, foo has a decent
# probability of being numerically exactly -1. We therefore mimic
# the pattern of round-off that occurs in the sampler to make sure
# that samples emitted from this distribution will pass its own
# validations. This is sometimes necessary: in TF's float32,
# 0.69314826 / 37.50019 < (1.0 / 37.50019) * 0.69314826
boundary = loc - (1.0 / concentration) * scale
# The support of this bijector depends on the sign of concentration.
is_in_bounds = tf.where(concentration > 0., x >= boundary, x <= boundary)
# For concentration 0, the domain is the whole line.
is_in_bounds = is_in_bounds | tf.math.equal(concentration, 0.)
return [
assert_util.assert_equal(
is_in_bounds,
True,
message='Forward transformation input must be inside domain.')
]
def _maybe_assert_valid_y(self, y):
if not self.validate_args:
return []
is_positive = assert_util.assert_non_negative(
y, message='Inverse transformation input must be greater than 0.')
less_than_one = assert_util.assert_less_equal(
y,
tf.constant(1., y.dtype),
message='Inverse transformation input must be less than or equal to 1.')
return [is_positive, less_than_one]
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
assertions = []
if is_init != tensor_util.is_ref(self.scale):
assertions.append(
assert_util.assert_positive(
self.scale, message='Argument `scale` must be positive.'))
return assertions
| 38.386831
| 81
| 0.671741
|
5e073be4f36a531e88a4d01b213cc1e5cb4fe8d6
| 2,347
|
py
|
Python
|
gpx_csv_converter/__init__.py
|
oth-aw/gpx-csv-converter
|
b340eb7f4837ae277481b0f1b17f2190545a813a
|
[
"MIT"
] | null | null | null |
gpx_csv_converter/__init__.py
|
oth-aw/gpx-csv-converter
|
b340eb7f4837ae277481b0f1b17f2190545a813a
|
[
"MIT"
] | null | null | null |
gpx_csv_converter/__init__.py
|
oth-aw/gpx-csv-converter
|
b340eb7f4837ae277481b0f1b17f2190545a813a
|
[
"MIT"
] | null | null | null |
from xml.dom import minidom
import calendar
import dateutil.parser
import pandas as pd
test = "gpx_csv_converter"
def iso_to_epoch(iso_time):
return calendar.timegm(dateutil.parser.parse(iso_time).timetuple())
class Converter:
def __init__(self, string, name):
if name[-4:] != '.csv':
name = name + '.csv'
# parse an xml file by name
mydoc = minidom.parseString(string)
trkpt = mydoc.getElementsByTagName('trkpt')
time = mydoc.getElementsByTagName('time')
ele = mydoc.getElementsByTagName('ele')
#hr = mydoc.getElementsByTagName('gpxtpx:hr')
hr = mydoc.getElementsByTagName('ns3:hr')
cad = mydoc.getElementsByTagName('ns3:cad')
lats = []
longs = []
times = []
eles = []
hrs = []
dates = []
parsed_times = []
cads = []
for elem in trkpt:
lats.append(elem.attributes['lat'].value)
longs.append(elem.attributes['lon'].value)
for elem in time:
times.append(elem.firstChild.data)
for elem in hr:
hrs.append(elem.firstChild.data)
times.pop(0)
base_time = iso_to_epoch(times[0])
time_differences = []
for item in times:
time_differences.append(iso_to_epoch(item) - base_time)
date_obj = (dateutil.parser.parse(item))
dates.append(str(date_obj.year) + "-" + str(date_obj.month) + "-" + str(date_obj.day))
parsed_times.append(str(date_obj.hour) + ":" + str(date_obj.minute) + ":" + str(date_obj.second))
for elem in ele:
eles.append(elem.firstChild.data)
for elem in cad:
cads.append(elem.firstChild.data)
hrs.append(0)
data = {'date': dates,
'time': parsed_times,
'latitude': lats,
'longitude': longs,
'elevation': eles,
'heart_rate': hrs,
'cadence': cads}
print(len(dates), len(parsed_times), len(lats), len(longs), len(eles), len(hrs), len(cads))
df = pd.DataFrame(data=data)
df = df[['date', 'time', 'latitude', 'longitude', 'elevation', 'heart_rate', 'cadence']]
df.to_csv(name, encoding='utf-8', index=False)
print("Done!")
| 27.290698
| 109
| 0.559864
|
e8555181c464dc40c908dbe2e39c325747e6b04a
| 30,770
|
py
|
Python
|
test/unit/test_objectstore.py
|
Tomasz69/galaxy
|
ee7e632ed677bf7d8a8de57745fce6ddc373dc3e
|
[
"CC-BY-3.0"
] | 1
|
2021-02-28T18:59:16.000Z
|
2021-02-28T18:59:16.000Z
|
test/unit/test_objectstore.py
|
Tomasz69/galaxy
|
ee7e632ed677bf7d8a8de57745fce6ddc373dc3e
|
[
"CC-BY-3.0"
] | 12
|
2020-07-24T23:55:19.000Z
|
2021-12-19T11:40:06.000Z
|
test/unit/test_objectstore.py
|
Tomasz69/galaxy
|
ee7e632ed677bf7d8a8de57745fce6ddc373dc3e
|
[
"CC-BY-3.0"
] | 1
|
2021-09-02T03:44:15.000Z
|
2021-09-02T03:44:15.000Z
|
import os
from contextlib import contextmanager
from shutil import rmtree
from string import Template
from tempfile import mkdtemp
from uuid import uuid4
import yaml
from six import StringIO
from galaxy import objectstore
from galaxy.exceptions import ObjectInvalid
from galaxy.objectstore.azure_blob import AzureBlobObjectStore
from galaxy.objectstore.cloud import Cloud
from galaxy.objectstore.pithos import PithosObjectStore
from galaxy.objectstore.s3 import S3ObjectStore
from galaxy.util import (
directory_hash_id,
XML,
)
DISK_TEST_CONFIG = """<?xml version="1.0"?>
<object_store type="disk">
<files_dir path="${temp_directory}/files1"/>
<extra_dir type="temp" path="${temp_directory}/tmp1"/>
<extra_dir type="job_work" path="${temp_directory}/job_working_directory1"/>
</object_store>
"""
DISK_TEST_CONFIG_YAML = """
type: disk
files_dir: "${temp_directory}/files1"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp1"
- type: job_work
path: "${temp_directory}/job_working_directory1"
"""
def test_disk_store():
for config_str in [DISK_TEST_CONFIG, DISK_TEST_CONFIG_YAML]:
with TestConfig(config_str) as (directory, object_store):
# Test no dataset with id 1 exists.
absent_dataset = MockDataset(1)
assert not object_store.exists(absent_dataset)
# Write empty dataset 2 in second backend, ensure it is empty and
# exists.
empty_dataset = MockDataset(2)
directory.write("", "files1/000/dataset_2.dat")
assert object_store.exists(empty_dataset)
assert object_store.empty(empty_dataset)
# Write non-empty dataset in backend 1, test it is not emtpy & exists.
hello_world_dataset = MockDataset(3)
directory.write("Hello World!", "files1/000/dataset_3.dat")
assert object_store.exists(hello_world_dataset)
assert not object_store.empty(hello_world_dataset)
# Test get_data
data = object_store.get_data(hello_world_dataset)
assert data == "Hello World!"
data = object_store.get_data(hello_world_dataset, start=1, count=6)
assert data == "ello W"
# Test Size
# Test absent and empty datasets yield size of 0.
assert object_store.size(absent_dataset) == 0
assert object_store.size(empty_dataset) == 0
# Elsewise
assert object_store.size(hello_world_dataset) > 0 # Should this always be the number of bytes?
# Test percent used (to some degree)
percent_store_used = object_store.get_store_usage_percent()
assert percent_store_used > 0.0
assert percent_store_used < 100.0
# Test update_from_file test
output_dataset = MockDataset(4)
output_real_path = os.path.join(directory.temp_directory, "files1", "000", "dataset_4.dat")
assert not os.path.exists(output_real_path)
output_working_path = directory.write("NEW CONTENTS", "job_working_directory1/example_output")
object_store.update_from_file(output_dataset, file_name=output_working_path, create=True)
assert os.path.exists(output_real_path)
# Test delete
to_delete_dataset = MockDataset(5)
to_delete_real_path = directory.write("content to be deleted!", "files1/000/dataset_5.dat")
assert object_store.exists(to_delete_dataset)
assert object_store.delete(to_delete_dataset)
assert not object_store.exists(to_delete_dataset)
assert not os.path.exists(to_delete_real_path)
DISK_TEST_CONFIG_BY_UUID_YAML = """
type: disk
files_dir: "${temp_directory}/files1"
store_by: uuid
extra_dirs:
- type: temp
path: "${temp_directory}/tmp1"
- type: job_work
path: "${temp_directory}/job_working_directory1"
"""
def test_disk_store_by_uuid():
for config_str in [DISK_TEST_CONFIG_BY_UUID_YAML]:
with TestConfig(config_str) as (directory, object_store):
# Test no dataset with id 1 exists.
absent_dataset = MockDataset(1)
assert not object_store.exists(absent_dataset)
# Write empty dataset 2 in second backend, ensure it is empty and
# exists.
empty_dataset = MockDataset(2)
directory.write("", "files1/%s/dataset_%s.dat" % (empty_dataset.rel_path_for_uuid_test(), empty_dataset.uuid))
assert object_store.exists(empty_dataset)
assert object_store.empty(empty_dataset)
# Write non-empty dataset in backend 1, test it is not emtpy & exists.
hello_world_dataset = MockDataset(3)
directory.write("Hello World!", "files1/%s/dataset_%s.dat" % (hello_world_dataset.rel_path_for_uuid_test(), hello_world_dataset.uuid))
assert object_store.exists(hello_world_dataset)
assert not object_store.empty(hello_world_dataset)
# Test get_data
data = object_store.get_data(hello_world_dataset)
assert data == "Hello World!"
data = object_store.get_data(hello_world_dataset, start=1, count=6)
assert data == "ello W"
# Test Size
# Test absent and empty datasets yield size of 0.
assert object_store.size(absent_dataset) == 0
assert object_store.size(empty_dataset) == 0
# Elsewise
assert object_store.size(hello_world_dataset) > 0 # Should this always be the number of bytes?
# Test percent used (to some degree)
percent_store_used = object_store.get_store_usage_percent()
assert percent_store_used > 0.0
assert percent_store_used < 100.0
# Test update_from_file test
output_dataset = MockDataset(4)
output_real_path = os.path.join(directory.temp_directory, "files1", output_dataset.rel_path_for_uuid_test(), "dataset_%s.dat" % output_dataset.uuid)
assert not os.path.exists(output_real_path)
output_working_path = directory.write("NEW CONTENTS", "job_working_directory1/example_output")
object_store.update_from_file(output_dataset, file_name=output_working_path, create=True)
assert os.path.exists(output_real_path)
# Test delete
to_delete_dataset = MockDataset(5)
to_delete_real_path = directory.write("content to be deleted!", "files1/%s/dataset_%s.dat" % (to_delete_dataset.rel_path_for_uuid_test(), to_delete_dataset.uuid))
assert object_store.exists(to_delete_dataset)
assert object_store.delete(to_delete_dataset)
assert not object_store.exists(to_delete_dataset)
assert not os.path.exists(to_delete_real_path)
def test_disk_store_alt_name_relpath():
""" Test that alt_name cannot be used to access arbitrary paths using a
relative path
"""
with TestConfig(DISK_TEST_CONFIG) as (directory, object_store):
empty_dataset = MockDataset(1)
directory.write("", "files1/000/dataset_1.dat")
directory.write("foo", "foo.txt")
try:
assert object_store.get_data(
empty_dataset,
extra_dir='dataset_1_files',
alt_name='../../../foo.txt') != 'foo'
except ObjectInvalid:
pass
def test_disk_store_alt_name_abspath():
""" Test that alt_name cannot be used to access arbitrary paths using a
absolute path
"""
with TestConfig(DISK_TEST_CONFIG) as (directory, object_store):
empty_dataset = MockDataset(1)
directory.write("", "files1/000/dataset_1.dat")
absfoo = os.path.abspath(os.path.join(directory.temp_directory, "foo.txt"))
with open(absfoo, 'w') as f:
f.write("foo")
try:
assert object_store.get_data(
empty_dataset,
extra_dir='dataset_1_files',
alt_name=absfoo) != 'foo'
except ObjectInvalid:
pass
MIXED_STORE_BY_HIERARCHICAL_TEST_CONFIG = """<?xml version="1.0"?>
<object_store type="hierarchical">
<backends>
<backend id="files1" type="disk" weight="1" order="0" store_by="id">
<files_dir path="${temp_directory}/files1"/>
<extra_dir type="temp" path="${temp_directory}/tmp1"/>
<extra_dir type="job_work" path="${temp_directory}/job_working_directory1"/>
</backend>
<backend id="files2" type="disk" weight="1" order="1" store_by="uuid">
<files_dir path="${temp_directory}/files2"/>
<extra_dir type="temp" path="${temp_directory}/tmp2"/>
<extra_dir type="job_work" path="${temp_directory}/job_working_directory2"/>
</backend>
</backends>
</object_store>
"""
HIERARCHICAL_TEST_CONFIG = """<?xml version="1.0"?>
<object_store type="hierarchical">
<backends>
<backend id="files1" type="disk" weight="1" order="0">
<files_dir path="${temp_directory}/files1"/>
<extra_dir type="temp" path="${temp_directory}/tmp1"/>
<extra_dir type="job_work" path="${temp_directory}/job_working_directory1"/>
</backend>
<backend id="files2" type="disk" weight="1" order="1">
<files_dir path="${temp_directory}/files2"/>
<extra_dir type="temp" path="${temp_directory}/tmp2"/>
<extra_dir type="job_work" path="${temp_directory}/job_working_directory2"/>
</backend>
</backends>
</object_store>
"""
HIERARCHICAL_TEST_CONFIG_YAML = """
type: hierarchical
backends:
- id: files1
type: disk
weight: 1
files_dir: "${temp_directory}/files1"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp1"
- type: job_work
path: "${temp_directory}/job_working_directory1"
- id: files2
type: disk
weight: 1
files_dir: "${temp_directory}/files2"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp2"
- type: job_work
path: "${temp_directory}/job_working_directory2"
"""
def test_hierarchical_store():
for config_str in [HIERARCHICAL_TEST_CONFIG, HIERARCHICAL_TEST_CONFIG_YAML]:
with TestConfig(config_str) as (directory, object_store):
# Test no dataset with id 1 exists.
assert not object_store.exists(MockDataset(1))
# Write empty dataset 2 in second backend, ensure it is empty and
# exists.
directory.write("", "files2/000/dataset_2.dat")
assert object_store.exists(MockDataset(2))
assert object_store.empty(MockDataset(2))
# Write non-empty dataset in backend 1, test it is not emtpy & exists.
directory.write("Hello World!", "files1/000/dataset_3.dat")
assert object_store.exists(MockDataset(3))
assert not object_store.empty(MockDataset(3))
# Assert creation always happens in first backend.
for i in range(100):
dataset = MockDataset(100 + i)
object_store.create(dataset)
assert object_store.get_filename(dataset).find("files1") > 0
as_dict = object_store.to_dict()
_assert_has_keys(as_dict, ["backends", "extra_dirs", "type"])
_assert_key_has_value(as_dict, "type", "hierarchical")
def test_mixed_store_by():
with TestConfig(MIXED_STORE_BY_HIERARCHICAL_TEST_CONFIG) as (directory, object_store):
as_dict = object_store.to_dict()
assert as_dict["backends"][0]["store_by"] == "id"
assert as_dict["backends"][1]["store_by"] == "uuid"
DISTRIBUTED_TEST_CONFIG = """<?xml version="1.0"?>
<object_store type="distributed">
<backends>
<backend id="files1" type="disk" weight="2">
<files_dir path="${temp_directory}/files1"/>
<extra_dir type="temp" path="${temp_directory}/tmp1"/>
<extra_dir type="job_work" path="${temp_directory}/job_working_directory1"/>
</backend>
<backend id="files2" type="disk" weight="1">
<files_dir path="${temp_directory}/files2"/>
<extra_dir type="temp" path="${temp_directory}/tmp2"/>
<extra_dir type="job_work" path="${temp_directory}/job_working_directory2"/>
</backend>
</backends>
</object_store>
"""
DISTRIBUTED_TEST_CONFIG_YAML = """
type: distributed
backends:
- id: files1
type: disk
weight: 2
files_dir: "${temp_directory}/files1"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp1"
- type: job_work
path: "${temp_directory}/job_working_directory1"
- id: files2
type: disk
weight: 1
files_dir: "${temp_directory}/files2"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp2"
- type: job_work
path: "${temp_directory}/job_working_directory2"
"""
def test_distributed_store():
for config_str in [DISTRIBUTED_TEST_CONFIG, DISTRIBUTED_TEST_CONFIG_YAML]:
with TestConfig(config_str) as (directory, object_store):
with __stubbed_persistence() as persisted_ids:
for i in range(100):
dataset = MockDataset(100 + i)
object_store.create(dataset)
# Test distributes datasets between backends according to weights
backend_1_count = len([v for v in persisted_ids.values() if v == "files1"])
backend_2_count = len([v for v in persisted_ids.values() if v == "files2"])
assert backend_1_count > 0
assert backend_2_count > 0
assert backend_1_count > backend_2_count
as_dict = object_store.to_dict()
_assert_has_keys(as_dict, ["backends", "extra_dirs", "type"])
_assert_key_has_value(as_dict, "type", "distributed")
extra_dirs = as_dict["extra_dirs"]
assert len(extra_dirs) == 2
# Unit testing the cloud and advanced infrastructure object stores is difficult, but
# we can at least stub out initializing and test the configuration of these things from
# XML and dicts.
class UnitializedPithosObjectStore(PithosObjectStore):
def _initialize(self):
pass
class UnitializeS3ObjectStore(S3ObjectStore):
def _initialize(self):
pass
class UnitializedAzureBlobObjectStore(AzureBlobObjectStore):
def _initialize(self):
pass
class UnitializedCloudObjectStore(Cloud):
def _initialize(self):
pass
PITHOS_TEST_CONFIG = """<?xml version="1.0"?>
<object_store type="pithos">
<auth url="http://example.org/" token="extoken123" />
<container name="foo" project="cow" />
<extra_dir type="temp" path="database/tmp_pithos"/>
<extra_dir type="job_work" path="database/working_pithos"/>
</object_store>
"""
PITHOS_TEST_CONFIG_YAML = """
type: pithos
auth:
url: http://example.org/
token: extoken123
container:
name: foo
project: cow
extra_dirs:
- type: temp
path: database/tmp_pithos
- type: job_work
path: database/working_pithos
"""
def test_config_parse_pithos():
for config_str in [PITHOS_TEST_CONFIG, PITHOS_TEST_CONFIG_YAML]:
with TestConfig(config_str, clazz=UnitializedPithosObjectStore) as (directory, object_store):
configured_config_dict = object_store.config_dict
_assert_has_keys(configured_config_dict, ["auth", "container", "extra_dirs"])
auth_dict = configured_config_dict["auth"]
_assert_key_has_value(auth_dict, "url", "http://example.org/")
_assert_key_has_value(auth_dict, "token", "extoken123")
container_dict = configured_config_dict["container"]
_assert_key_has_value(container_dict, "name", "foo")
_assert_key_has_value(container_dict, "project", "cow")
assert object_store.extra_dirs["job_work"] == "database/working_pithos"
assert object_store.extra_dirs["temp"] == "database/tmp_pithos"
as_dict = object_store.to_dict()
_assert_has_keys(as_dict, ["auth", "container", "extra_dirs", "type"])
_assert_key_has_value(as_dict, "type", "pithos")
auth_dict = as_dict["auth"]
_assert_key_has_value(auth_dict, "url", "http://example.org/")
_assert_key_has_value(auth_dict, "token", "extoken123")
container_dict = as_dict["container"]
_assert_key_has_value(container_dict, "name", "foo")
_assert_key_has_value(container_dict, "project", "cow")
extra_dirs = as_dict["extra_dirs"]
assert len(extra_dirs) == 2
S3_TEST_CONFIG = """<object_store type="s3">
<auth access_key="access_moo" secret_key="secret_cow" />
<bucket name="unique_bucket_name_all_lowercase" use_reduced_redundancy="False" />
<cache path="database/object_store_cache" size="1000" />
<extra_dir type="job_work" path="database/job_working_directory_s3"/>
<extra_dir type="temp" path="database/tmp_s3"/>
</object_store>
"""
S3_TEST_CONFIG_YAML = """
type: s3
auth:
access_key: access_moo
secret_key: secret_cow
bucket:
name: unique_bucket_name_all_lowercase
use_reduced_redundancy: false
cache:
path: database/object_store_cache
size: 1000
extra_dirs:
- type: job_work
path: database/job_working_directory_s3
- type: temp
path: database/tmp_s3
"""
def test_config_parse_s3():
for config_str in [S3_TEST_CONFIG, S3_TEST_CONFIG_YAML]:
with TestConfig(config_str, clazz=UnitializeS3ObjectStore) as (directory, object_store):
assert object_store.access_key == "access_moo"
assert object_store.secret_key == "secret_cow"
assert object_store.bucket == "unique_bucket_name_all_lowercase"
assert object_store.use_rr is False
assert object_store.host is None
assert object_store.port == 6000
assert object_store.multipart is True
assert object_store.is_secure is True
assert object_store.conn_path == "/"
assert object_store.cache_size == 1000
assert object_store.staging_path == "database/object_store_cache"
assert object_store.extra_dirs["job_work"] == "database/job_working_directory_s3"
assert object_store.extra_dirs["temp"] == "database/tmp_s3"
as_dict = object_store.to_dict()
_assert_has_keys(as_dict, ["auth", "bucket", "connection", "cache", "extra_dirs", "type"])
_assert_key_has_value(as_dict, "type", "s3")
auth_dict = as_dict["auth"]
bucket_dict = as_dict["bucket"]
connection_dict = as_dict["connection"]
cache_dict = as_dict["cache"]
_assert_key_has_value(auth_dict, "access_key", "access_moo")
_assert_key_has_value(auth_dict, "secret_key", "secret_cow")
_assert_key_has_value(bucket_dict, "name", "unique_bucket_name_all_lowercase")
_assert_key_has_value(bucket_dict, "use_reduced_redundancy", False)
_assert_key_has_value(connection_dict, "host", None)
_assert_key_has_value(connection_dict, "port", 6000)
_assert_key_has_value(connection_dict, "multipart", True)
_assert_key_has_value(connection_dict, "is_secure", True)
_assert_key_has_value(cache_dict, "size", 1000)
_assert_key_has_value(cache_dict, "path", "database/object_store_cache")
extra_dirs = as_dict["extra_dirs"]
assert len(extra_dirs) == 2
CLOUD_AWS_TEST_CONFIG = """<object_store type="cloud" provider="aws">
<auth access_key="access_moo" secret_key="secret_cow" />
<bucket name="unique_bucket_name_all_lowercase" use_reduced_redundancy="False" />
<cache path="database/object_store_cache" size="1000" />
<extra_dir type="job_work" path="database/job_working_directory_cloud"/>
<extra_dir type="temp" path="database/tmp_cloud"/>
</object_store>
"""
CLOUD_AWS_TEST_CONFIG_YAML = """
type: cloud
provider: aws
auth:
access_key: access_moo
secret_key: secret_cow
bucket:
name: unique_bucket_name_all_lowercase
use_reduced_redundancy: false
cache:
path: database/object_store_cache
size: 1000
extra_dirs:
- type: job_work
path: database/job_working_directory_cloud
- type: temp
path: database/tmp_cloud
"""
CLOUD_AZURE_TEST_CONFIG = """<object_store type="cloud" provider="azure">
<auth subscription_id="a_sub_id" client_id="and_a_client_id" secret="and_a_secret_key"
tenant="and_some_tenant_info" />
<bucket name="unique_bucket_name_all_lowercase" use_reduced_redundancy="False" />
<cache path="database/object_store_cache" size="1000" />
<extra_dir type="job_work" path="database/job_working_directory_cloud"/>
<extra_dir type="temp" path="database/tmp_cloud"/>
</object_store>
"""
CLOUD_AZURE_TEST_CONFIG_YAML = """
type: cloud
provider: azure
auth:
subscription_id: a_sub_id
client_id: and_a_client_id
secret: and_a_secret_key
tenant: and_some_tenant_info
bucket:
name: unique_bucket_name_all_lowercase
use_reduced_redundancy: false
cache:
path: database/object_store_cache
size: 1000
extra_dirs:
- type: job_work
path: database/job_working_directory_cloud
- type: temp
path: database/tmp_cloud
"""
CLOUD_GOOGLE_TEST_CONFIG = """<object_store type="cloud" provider="google">
<auth credentials_file="gcp.config" />
<bucket name="unique_bucket_name_all_lowercase" use_reduced_redundancy="False" />
<cache path="database/object_store_cache" size="1000" />
<extra_dir type="job_work" path="database/job_working_directory_cloud"/>
<extra_dir type="temp" path="database/tmp_cloud"/>
</object_store>
"""
CLOUD_GOOGLE_TEST_CONFIG_YAML = """
type: cloud
provider: google
auth:
credentials_file: gcp.config
bucket:
name: unique_bucket_name_all_lowercase
use_reduced_redundancy: false
cache:
path: database/object_store_cache
size: 1000
extra_dirs:
- type: job_work
path: database/job_working_directory_cloud
- type: temp
path: database/tmp_cloud
"""
def test_config_parse_cloud():
for config_str in [CLOUD_AWS_TEST_CONFIG, CLOUD_AWS_TEST_CONFIG_YAML,
CLOUD_AZURE_TEST_CONFIG, CLOUD_AZURE_TEST_CONFIG_YAML,
CLOUD_GOOGLE_TEST_CONFIG, CLOUD_GOOGLE_TEST_CONFIG_YAML]:
if "google" in config_str:
tmpdir = mkdtemp()
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
path = os.path.join(tmpdir, "gcp.config")
open(path, "w").write("some_gcp_config")
config_str = config_str.replace("gcp.config", path)
with TestConfig(config_str, clazz=UnitializedCloudObjectStore) as (directory, object_store):
assert object_store.bucket_name == "unique_bucket_name_all_lowercase"
assert object_store.use_rr is False
assert object_store.host is None
assert object_store.port == 6000
assert object_store.multipart is True
assert object_store.is_secure is True
assert object_store.conn_path == "/"
assert object_store.cache_size == 1000.0
assert object_store.staging_path == "database/object_store_cache"
assert object_store.extra_dirs["job_work"] == "database/job_working_directory_cloud"
assert object_store.extra_dirs["temp"] == "database/tmp_cloud"
as_dict = object_store.to_dict()
_assert_has_keys(as_dict, ["provider", "auth", "bucket", "connection", "cache", "extra_dirs", "type"])
_assert_key_has_value(as_dict, "type", "cloud")
auth_dict = as_dict["auth"]
bucket_dict = as_dict["bucket"]
connection_dict = as_dict["connection"]
cache_dict = as_dict["cache"]
provider = as_dict["provider"]
if provider == "aws":
_assert_key_has_value(auth_dict, "access_key", "access_moo")
_assert_key_has_value(auth_dict, "secret_key", "secret_cow")
elif provider == "azure":
_assert_key_has_value(auth_dict, "subscription_id", "a_sub_id")
_assert_key_has_value(auth_dict, "client_id", "and_a_client_id")
_assert_key_has_value(auth_dict, "secret", "and_a_secret_key")
_assert_key_has_value(auth_dict, "tenant", "and_some_tenant_info")
elif provider == "google":
_assert_key_has_value(auth_dict, "credentials_file", path)
_assert_key_has_value(bucket_dict, "name", "unique_bucket_name_all_lowercase")
_assert_key_has_value(bucket_dict, "use_reduced_redundancy", False)
_assert_key_has_value(connection_dict, "host", None)
_assert_key_has_value(connection_dict, "port", 6000)
_assert_key_has_value(connection_dict, "multipart", True)
_assert_key_has_value(connection_dict, "is_secure", True)
_assert_key_has_value(cache_dict, "size", 1000.0)
_assert_key_has_value(cache_dict, "path", "database/object_store_cache")
extra_dirs = as_dict["extra_dirs"]
assert len(extra_dirs) == 2
AZURE_BLOB_TEST_CONFIG = """<object_store type="azure_blob">
<auth account_name="azureact" account_key="password123" />
<container name="unique_container_name" max_chunk_size="250"/>
<cache path="database/object_store_cache" size="100" />
<extra_dir type="job_work" path="database/job_working_directory_azure"/>
<extra_dir type="temp" path="database/tmp_azure"/>
</object_store>
"""
AZURE_BLOB_TEST_CONFIG_YAML = """
type: azure_blob
auth:
account_name: azureact
account_key: password123
container:
name: unique_container_name
max_chunk_size: 250
cache:
path: database/object_store_cache
size: 100
extra_dirs:
- type: job_work
path: database/job_working_directory_azure
- type: temp
path: database/tmp_azure
"""
def test_config_parse_azure():
for config_str in [AZURE_BLOB_TEST_CONFIG, AZURE_BLOB_TEST_CONFIG_YAML]:
with TestConfig(config_str, clazz=UnitializedAzureBlobObjectStore) as (directory, object_store):
assert object_store.account_name == "azureact"
assert object_store.account_key == "password123"
assert object_store.container_name == "unique_container_name"
assert object_store.max_chunk_size == 250
assert object_store.cache_size == 100
assert object_store.staging_path == "database/object_store_cache"
assert object_store.extra_dirs["job_work"] == "database/job_working_directory_azure"
assert object_store.extra_dirs["temp"] == "database/tmp_azure"
as_dict = object_store.to_dict()
_assert_has_keys(as_dict, ["auth", "container", "cache", "extra_dirs", "type"])
_assert_key_has_value(as_dict, "type", "azure_blob")
auth_dict = as_dict["auth"]
container_dict = as_dict["container"]
cache_dict = as_dict["cache"]
_assert_key_has_value(auth_dict, "account_name", "azureact")
_assert_key_has_value(auth_dict, "account_key", "password123")
_assert_key_has_value(container_dict, "name", "unique_container_name")
_assert_key_has_value(container_dict, "max_chunk_size", 250)
_assert_key_has_value(cache_dict, "size", 100)
_assert_key_has_value(cache_dict, "path", "database/object_store_cache")
extra_dirs = as_dict["extra_dirs"]
assert len(extra_dirs) == 2
class TestConfig(object):
def __init__(self, config_str=DISK_TEST_CONFIG, clazz=None, store_by="id"):
self.temp_directory = mkdtemp()
if config_str.startswith("<"):
config_file = "store.xml"
else:
config_file = "store.yaml"
self.write(config_str, config_file)
config = MockConfig(self.temp_directory, config_file, store_by=store_by)
if clazz is None:
self.object_store = objectstore.build_object_store_from_config(config)
elif config_file == "store.xml":
self.object_store = clazz.from_xml(config, XML(config_str))
else:
self.object_store = clazz(config, yaml.safe_load(StringIO(config_str)))
def __enter__(self):
return self, self.object_store
def __exit__(self, type, value, tb):
rmtree(self.temp_directory)
def write(self, contents, name):
path = os.path.join(self.temp_directory, name)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
contents_template = Template(contents)
expanded_contents = contents_template.safe_substitute(temp_directory=self.temp_directory)
open(path, "w").write(expanded_contents)
return path
class MockConfig(object):
def __init__(self, temp_directory, config_file, store_by="id"):
self.file_path = temp_directory
self.object_store_config_file = os.path.join(temp_directory, config_file)
self.object_store_check_old_style = False
self.object_store_cache_path = os.path.join(temp_directory, "staging")
self.object_store_store_by = store_by
self.jobs_directory = temp_directory
self.new_file_path = temp_directory
self.umask = 0000
self.gid = 1000
class MockDataset(object):
def __init__(self, id):
self.id = id
self.object_store_id = None
self.uuid = uuid4()
self.tags = []
def rel_path_for_uuid_test(self):
rel_path = os.path.join(*directory_hash_id(self.uuid))
return rel_path
# Poor man's mocking. Need to get a real mocking library as real Galaxy development
# dependnecy.
PERSIST_METHOD_NAME = "_create_object_in_session"
@contextmanager
def __stubbed_persistence():
real_method = getattr(objectstore, PERSIST_METHOD_NAME)
try:
persisted_ids = {}
def persist(object):
persisted_ids[object.id] = object.object_store_id
setattr(objectstore, PERSIST_METHOD_NAME, persist)
yield persisted_ids
finally:
setattr(objectstore, PERSIST_METHOD_NAME, real_method)
def _assert_has_keys(the_dict, keys):
for key in keys:
assert key in the_dict, "key [%s] not in [%s]" % (key, the_dict)
def _assert_key_has_value(the_dict, key, value):
assert key in the_dict, "dict [%s] doesn't container expected key [%s]" % (key, the_dict)
assert the_dict[key] == value, "%s != %s" % (the_dict[key], value)
| 35.988304
| 174
| 0.667436
|
f05f1f9077cbc912b5172ebb4245c2a91830ff3e
| 8,572
|
py
|
Python
|
src/server_agent.py
|
cletusajibade/PrivacyFL
|
59de52d985ef972d66af6c3ff8f1a3e8612cca0e
|
[
"MIT"
] | null | null | null |
src/server_agent.py
|
cletusajibade/PrivacyFL
|
59de52d985ef972d66af6c3ff8f1a3e8612cca0e
|
[
"MIT"
] | null | null | null |
src/server_agent.py
|
cletusajibade/PrivacyFL
|
59de52d985ef972d66af6c3ff8f1a3e8612cca0e
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('..')
import numpy as np
import config
from datetime import datetime
import multiprocessing
from multiprocessing.pool import ThreadPool
from utils.latency_helper import find_slowest_time
from agent import Agent
from message import Message
def client_computation_caller(inp):
client_instance, message = inp
return_message = client_instance.produce_weights(message=message)
return return_message
def client_weights_returner(inp):
client_instance, message = inp
converged = client_instance.receive_weights(message)
return converged
def client_agent_dropout_caller(inp):
client_instance, message = inp
__ = client_instance.remove_active_clients(message)
return None
class ServerAgent(Agent):
""" Server agent that averages (federated) weights and returns them to clients"""
def __init__(self, agent_number, simulation_output_view):
super(ServerAgent, self).__init__(agent_number=agent_number, agent_type='server_agent')
self.averaged_weights = {}
self.averaged_intercepts = {}
self.simulation_output_view = simulation_output_view
def request_values(self, num_iterations):
"""
Method invoked to start simulation. Prints out what clients have converged on what iteration.
Also prints out accuracy for each client on each iteration (what weights would be if not for the simulation) and federated accuaracy.
:param iters: number of iterations to run
"""
converged = {} # maps client names to iteration of convergence. Contains all inactive clients
active_clients = set(self.directory.clients.keys())
for i in range(1, num_iterations + 1):
weights = {}
intercepts = {}
m = multiprocessing.Manager()
lock = m.Lock()
with ThreadPool(len(active_clients)) as calling_pool:
args = []
for client_name in active_clients:
client_instance = self.directory.clients[client_name]
body = {'iteration': i, 'lock': lock, 'simulated_time': config.LATENCY_DICT[self.name][client_name]}
arg = Message(sender_name=self.name, recipient_name=client_name, body=body)
args.append((client_instance, arg))
messages = calling_pool.map(client_computation_caller, args)
server_logic_start = datetime.now()
vals = {message.sender: (message.body['weights'], message.body['intercepts']) for message in messages}
simulated_time = find_slowest_time(messages)
# add them to the weights_dictionary
for client_name, return_vals in vals.items():
client_weights, client_intercepts = return_vals
weights[client_name] = np.array(client_weights)
intercepts[client_name] = np.array(client_intercepts)
weights_np = list(weights.values()) # the weights for this iteration!
intercepts_np = list(intercepts.values())
try:
averaged_weights = np.average(weights_np, axis=0) # gets rid of security offsets
except:
raise ValueError('''DATA INSUFFICIENT: Some client does not have a sample from each class so dimension of weights is incorrect. Make
train length per iteration larger for each client to avoid this issue''')
averaged_intercepts = np.average(intercepts_np, axis=0)
self.averaged_weights[i] = averaged_weights ## averaged weights for this iteration!!
self.averaged_intercepts[i] = averaged_intercepts
# add time server logic takes
server_logic_end = datetime.now()
server_logic_time = server_logic_end - server_logic_start
simulated_time += server_logic_time
with ThreadPool(len(active_clients)) as returning_pool:
args = []
for client_name in active_clients:
client_instance = self.directory.clients[client_name]
body = {'iteration': i, 'return_weights': averaged_weights,
'return_intercepts': averaged_intercepts,
'simulated_time': simulated_time + config.LATENCY_DICT[self.name][client_name]}
message = Message(sender_name=self.name, recipient_name=client_name, body=body)
args.append((client_instance, message))
return_messages = returning_pool.map(client_weights_returner, args)
simulated_time = find_slowest_time(return_messages)
server_logic_start = datetime.now()
clients_to_remove = set()
for message in return_messages:
if message.body['converged'] == True and message.sender not in converged: # converging
converged[message.sender] = i # iteration of convergence
clients_to_remove.add(message.sender)
server_logic_end = datetime.now()
server_logic_time = server_logic_end - server_logic_start
simulated_time += server_logic_time
if config.CLIENT_DROPOUT:
# tell the clients which other clients have dropped out
active_clients -= clients_to_remove
if len(active_clients) < 2: # no point in continuing if don't have at least 2 clients
self.print_convergences(converged)
return
with ThreadPool(len(active_clients)) as calling_pool:
args = []
for client_name in active_clients:
client_instance = self.directory.clients[client_name]
body = {'clients_to_remove': clients_to_remove,
'simulated_time': simulated_time + config.LATENCY_DICT[self.name][client_name],
'iteration': i}
message = Message(sender_name=self.name, recipient_name=client_name, body=body)
args.append((client_instance, message))
__ = calling_pool.map(client_agent_dropout_caller, args)
# at end of all iterations
self.print_convergences(converged)
def print_convergences(self, converged):
"""
Used to print out all the clients that have converged at the end of request values
:param converged: dict of converged clients containing iteration of convergence
:type converged: dict
"""
for client_name in self.directory.clients.keys():
if client_name in converged:
print('Client {} converged on iteration {}'.format(client_name, converged[client_name]))
self.simulation_output_view.appendPlainText(
'Client {} converged on iteration {}'.format(client_name, converged[client_name]))
if client_name not in converged:
print('Client {} never converged'.format(client_name))
self.simulation_output_view.appendPlainText('Client {} never converged'.format(client_name))
self.simulation_output_view.appendPlainText('\n')
def final_statistics(self, simulation_output_view):
"""
USED FOR RESEARCH PURPOSES.
"""
# for research purposes
client_accs = []
fed_acc = []
for client_name, client_instance in self.directory.clients.items():
fed_acc.append(list(client_instance.federated_accuracy.values()))
client_accs.append(list(client_instance.personal_accuracy.values()))
if config.CLIENT_DROPOUT:
print('Federated accuracies are {}'.format(dict(zip(self.directory.clients, fed_acc))))
simulation_output_view.appendPlainText(
'Federated accuracies are {}'.format(dict(zip(self.directory.clients, fed_acc))))
else:
client_accs = list(np.mean(client_accs, axis=0))
fed_acc = list(np.mean(fed_acc, axis=0))
print('Personal accuracy on final iteration is {}'.format(client_accs))
print('Federated accuracy on final iteration is {}'.format(fed_acc)) # should all be the same if no dropout
simulation_output_view.appendPlainText('Personal accuracy on final iteration is {}'.format(client_accs))
simulation_output_view.appendPlainText('Federated accuracy on final iteration is {}'.format(fed_acc))
| 47.888268
| 148
| 0.64314
|
409b055e52d98f372aadf020af9209ac8e6aa733
| 339
|
py
|
Python
|
src/app/profile/mixins.py
|
serious-notreally/cappa
|
993a8df35ca6c3b22f3ca811937fd29c07fc71aa
|
[
"MIT"
] | 9
|
2020-04-05T07:35:55.000Z
|
2021-08-03T05:50:05.000Z
|
src/app/profile/mixins.py
|
serious-notreally/cappa
|
993a8df35ca6c3b22f3ca811937fd29c07fc71aa
|
[
"MIT"
] | 89
|
2020-01-26T11:50:06.000Z
|
2022-03-31T07:14:18.000Z
|
src/app/profile/mixins.py
|
serious-notreally/cappa
|
993a8df35ca6c3b22f3ca811937fd29c07fc71aa
|
[
"MIT"
] | 13
|
2020-03-10T14:45:07.000Z
|
2021-07-31T02:43:40.000Z
|
from urllib import parse
class NextPathMixin:
HOME_PATH = '/'
def _get_next_path(self, request) -> str:
next_path = request.GET.get('next')
if next_path is None:
next_path = parse.urlparse(
request.META.get('HTTP_REFERER', self.HOME_PATH)
).path
return next_path
| 22.6
| 64
| 0.59587
|
06881951773d444e59f24d31f2f6cd0c8b718852
| 10,832
|
py
|
Python
|
ib_tws_server/codegen/graphql_schema_generator.py
|
ajmal017/ib_tws_server_py
|
768dfb77432ed83700066e1720ff71129b4b7fd6
|
[
"MIT"
] | null | null | null |
ib_tws_server/codegen/graphql_schema_generator.py
|
ajmal017/ib_tws_server_py
|
768dfb77432ed83700066e1720ff71129b4b7fd6
|
[
"MIT"
] | null | null | null |
ib_tws_server/codegen/graphql_schema_generator.py
|
ajmal017/ib_tws_server_py
|
768dfb77432ed83700066e1720ff71129b4b7fd6
|
[
"MIT"
] | null | null | null |
from typing import Generator
from ib_tws_server.api_definition import *
from ib_tws_server.codegen.generator_utils import *
from ib_tws_server.ib_imports import *
from ib_tws_server.util.type_util import *
from inspect import *
import logging
import os
import re
logger = logging.getLogger()
class GraphQLSchemaGenerator:
@staticmethod
def generate(filename):
builtin_type_mappings = {
'str': 'String',
'bool': 'Boolean',
'int': "Int",
'float': 'Float'
}
callback_types: Set[str] = set()
unprocessed_types: Set[(str,bool)] = set()
processed_types: Dict[str, str] = {}
scalars: Set[str] = set()
enums: Set[str] = set()
union_types: Set[str] = set()
container_re = re.compile("(?:typing\.)?(Set|List)\[([^\]]+)\]")
dict_re = re.compile("(?:typing\.)?Dict\[[\s]*([a-zA-Z\.]+)[\s]*,[\s]*([a-zA-Z\.]+)\]")
# TODO: revisit
enums.add('ibapi.order_condition.OrderCondition')
def graphql_type_name(s:str, is_input: bool):
return f"{GeneratorUtils.unqualified_type_name(s)}{'Input' if is_input else ''}"
def check_if_processed(s:str, is_input: bool):
u = graphql_type_name(s, is_input)
if u in processed_types:
if processed_types[u] != s:
if (not s.startswith('ibapi')) or (not processed_types[u].startswith('ibapi')):
raise RuntimeError(f"Duplicate unqualified type {processed_types[u]} != {s}")
return True
return False
def add_type_for_processing(s: str, is_input: bool):
if s in builtin_type_mappings or s in callback_types or s in union_types or check_if_processed(s, is_input) or (s,is_input) in unprocessed_types:
return
m = container_re.match(s)
if m is not None:
type_to_add = m.group(2)
else:
type_to_add = s
logger.log(logging.DEBUG, f"Adding {type_to_add}")
unprocessed_types.add((type_to_add, is_input))
def add_scalar(s: str, reason: str, warn: bool):
if s in scalars:
return
logger.log(logging.WARN if warn else logging.DEBUG, f"Adding scalar {s} because {reason}")
if (s == 'str'):
raise
if s in builtin_type_mappings:
raise RuntimeError(f"Should not have added {s} as scalar")
scalars.add(s)
def add_enum(e: str):
if e in enums:
return
logger.log(logging.DEBUG, f"Adding enum {e}")
enums.add(e)
def graphql_type(t: str, is_input: bool):
m = container_re.match(t)
if m is not None and len(m.groups()) == 2:
return f"[{graphql_type(m.group(2), is_input)}]"
elif t in builtin_type_mappings:
return builtin_type_mappings[t]
elif t in callback_types or t in union_types:
return t
elif t in ENUM_ALIASES:
return graphql_type(ENUM_ALIASES[t], is_input)
else:
dict_m = dict_re.match(t)
if dict_m is not None:
type_name = f"{GeneratorUtils.unqualified_type_name(dict_m.group(2))}Map"
add_scalar(type_name, "Dictionary", False)
return type_name
resolved_type = find_sym_from_full_name_or_module(t, ibapi)
ret_type_str = GeneratorUtils.unqualified_type_name(t)
if resolved_type is not None:
if isinstance(resolved_type, ibapi.common.Enum):
add_enum(t)
return ret_type_str
elif t in scalars or t in enums:
return ret_type_str
else:
add_type_for_processing(t, is_input)
return graphql_type_name(t, is_input)
else:
raise RuntimeError(f"Could not determine type for {t}")
def object_member_type(obj:object, member_name: str, val: any, is_input: bool):
if obj.__class__.__name__ in OVERRIDDEN_MEMBER_TYPE_HINTS :
hints = OVERRIDDEN_MEMBER_TYPE_HINTS[obj.__class__.__name__]
if member_name in hints:
return graphql_type(hints[member_name], is_input)
if val is not None:
return graphql_type(full_class_name(type(val)), is_input)
else:
raise RuntimeError(f"Could not determine type {obj.__class__.__name__} for member {member_name}")
def generate_global_type(type_name: str, is_input: bool):
if check_if_processed(type_name, is_input) or type_name in enums:
return ""
logger.log(logging.DEBUG, f"Generating {type_name}")
unqualified_name = graphql_type_name(type_name, is_input)
cls = find_sym_from_full_name_or_module(type_name, ibapi)
if cls is None:
raise RuntimeError(f"Could not find symbol for {type_name}")
cls_dict = cls.__dict__
processed_types[unqualified_name] = type_name
if ('__annotations__' in cls_dict):
members = [ (k,graphql_type(v)) for k,v in cls_dict['__annotations__'] ]
else:
obj = cls()
members = [ (n, object_member_type(obj, n, t, is_input)) for n,t in inspect.getmembers(obj) if not n.startswith("__") ]
for m,t in members:
if t is None:
add_scalar(cls.__name__, f"Could not find type for member '{m}'' for class '{cls.__name__}'", True)
return ""
code = f"""
{'input' if is_input else 'type'} {unqualified_name} {{"""
for p,t in members:
code += f"""
{p}: {t}"""
code = code + """
}"""
return code
def generate_callback_type(d: ApiDefinition, m: Callable):
type_name,is_wrapper = GeneratorUtils.callback_type(d, m)
if not is_wrapper:
return ""
if type_name in processed_types:
return ""
callback_types.add(type_name)
logger.log(logging.DEBUG, f"Generating {type_name}")
params = GeneratorUtils.data_class_members(d, [m], d.is_subscription)
for m in params:
f = graphql_type(m.annotation, False)
if f is None:
add_scalar(type_name, f"Could not find type for {m.annotation}", True)
return ""
processed_types[type_name] = type_name
code = f"""
type {type_name} {{"""
for p in params:
code += f"""
{p.name}: {graphql_type(p.annotation, False)}"""
code += """
}"""
return code
def generate_enum(e: str):
resolved_type = find_sym_from_full_name(e)
short_name = GeneratorUtils.unqualified_type_name(resolved_type.__name__)
code = f"""
enum {short_name} {{"""
for k,v in inspect.getmembers(resolved_type):
if isinstance(v, int):
code += f"""
{k.replace("/", "")}"""
code += """
}"""
return code
def generate_union_type(d: ApiDefinition):
if not GeneratorUtils.query_return_item_type_is_union(d):
return ""
else:
union_type = GeneratorUtils.query_return_item_type(d)
union_types.add(union_type)
if union_type in processed_types:
raise RuntimeError(f"Union type {union_type} already processed")
processed_types[union_type] = union_type
return f"""
union {union_type} = {"|".join([graphql_type(c, False) for c in callback_types])}
"""
def generate_query_or_subscription(d: ApiDefinition, is_subscription: bool):
name = d.request_method.__name__
members = list(GeneratorUtils.request_signature(d, False).parameters.values())
del members[0] # remove 'self'
annotations = []
for m in members:
gql = graphql_type(m.annotation, True)
annotations.append(gql)
members = [ f"{m.name}: {a}" for (m,a) in zip(members,annotations) ]
member_str = ",".join(members)
member_sig = "" if len(member_str) == 0 else f"({member_str})"
query_return_type = graphql_type(GeneratorUtils.query_return_item_type(d), False)
if GeneratorUtils.response_is_list(d) and not is_subscription:
query_return_type = f"[{query_return_type}]"
return f"""
{name}{member_sig}: {query_return_type}"""
with open(filename, "w") as f:
for d in REQUEST_DEFINITIONS:
if d.request_method is None or d.callback_methods is None:
continue
for c in d.callback_methods:
f.write(generate_callback_type(d, c))
f.write(generate_union_type(d))
while len(unprocessed_types) != 0:
s,is_input = unprocessed_types.pop()
ret = generate_global_type(s, is_input)
if len(ret) > 0:
f.write(ret)
f.write(os.linesep)
f.write("""
type Query {""")
for d in REQUEST_DEFINITIONS:
if d.request_method is not None and not d.is_subscription and d.callback_methods is not None:
f.write(generate_query_or_subscription(d, False))
f.write("""
}
type Subscription {""")
for d in REQUEST_DEFINITIONS:
if d.request_method is not None and (d.is_subscription or d.subscription_flag_name is not None) and d.callback_methods is not None:
f.write(generate_query_or_subscription(d, True))
f.write("""
}""")
f.write(os.linesep)
while len(unprocessed_types) != 0:
s,is_input = unprocessed_types.pop()
ret = generate_global_type(s, is_input)
if len(ret) > 0:
f.write(ret)
f.write(os.linesep)
for e in enums:
f.write(generate_enum(e))
f.write(os.linesep)
for s in scalars:
f.write(os.linesep)
f.write(f"scalar {s}")
f.write(os.linesep)
f.write("""
schema {
query: Query
subscription: Subscription
}
""")
| 40.267658
| 157
| 0.551791
|
df9b1f34ec0ce6283391a2be27e63cb1f484f2e5
| 201
|
py
|
Python
|
ccal/get_gff3_attribute.py
|
alex-wenzel/ccal
|
74dfc604d93e6ce9e12f34a828b601618df51faa
|
[
"MIT"
] | null | null | null |
ccal/get_gff3_attribute.py
|
alex-wenzel/ccal
|
74dfc604d93e6ce9e12f34a828b601618df51faa
|
[
"MIT"
] | null | null | null |
ccal/get_gff3_attribute.py
|
alex-wenzel/ccal
|
74dfc604d93e6ce9e12f34a828b601618df51faa
|
[
"MIT"
] | null | null | null |
def get_gff3_attribute(attributes, field):
for field_value in attributes.split(sep=";"):
field_, value = field_value.split(sep="=")
if field == field_:
return value
| 20.1
| 50
| 0.616915
|
fa1978e2140f7b136f62b1b79d45b0067d99b176
| 2,893
|
py
|
Python
|
examples/generation/docking_generation/guacamol_tdc/guacamol_baselines/smiles_lstm_hc/train_smiles_lstm_model.py
|
Shicheng-Guo/TDC
|
36e2863aca5abe3147e34f59d20fd77112945242
|
[
"MIT"
] | 577
|
2020-11-17T01:09:15.000Z
|
2022-03-31T22:45:34.000Z
|
examples/generation/docking_generation/guacamol_tdc/guacamol_baselines/smiles_lstm_hc/train_smiles_lstm_model.py
|
Shicheng-Guo/TDC
|
36e2863aca5abe3147e34f59d20fd77112945242
|
[
"MIT"
] | 70
|
2020-11-18T09:35:33.000Z
|
2022-03-25T11:28:38.000Z
|
examples/generation/docking_generation/guacamol_tdc/guacamol_baselines/smiles_lstm_hc/train_smiles_lstm_model.py
|
Shicheng-Guo/TDC
|
36e2863aca5abe3147e34f59d20fd77112945242
|
[
"MIT"
] | 106
|
2020-11-17T01:47:02.000Z
|
2022-03-25T03:34:46.000Z
|
import argparse
import os
from guacamol.utils.helpers import setup_default_logger
from .smiles_rnn_distribution_learner import SmilesRnnDistributionLearner
if __name__ == '__main__':
setup_default_logger()
parser = argparse.ArgumentParser(description='Distribution learning benchmark for SMILES RNN',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--train_data', default='data/guacamol_v1_train.smiles',
help='Full path to SMILES file containing training data')
parser.add_argument('--valid_data', default='data/guacamol_v1_valid.smiles',
help='Full path to SMILES file containing validation data')
parser.add_argument('--batch_size', default=512, type=int, help='Size of a mini-batch for gradient descent')
parser.add_argument('--valid_every', default=1000, type=int, help='Validate every so many batches')
parser.add_argument('--print_every', default=10, type=int, help='Report every so many batches')
parser.add_argument('--n_epochs', default=10, type=int, help='Number of training epochs')
parser.add_argument('--max_len', default=100, type=int, help='Max length of a SMILES string')
parser.add_argument('--hidden_size', default=512, type=int, help='Size of hidden layer')
parser.add_argument('--n_layers', default=3, type=int, help='Number of layers for training')
parser.add_argument('--rnn_dropout', default=0.2, type=float, help='Dropout value for RNN')
parser.add_argument('--lr', default=1e-3, type=float, help='RNN learning rate')
parser.add_argument('--seed', default=42, type=int, help='Random seed')
parser.add_argument('--output_dir', default=None, help='Output directory')
args = parser.parse_args()
if args.output_dir is None:
args.output_dir = os.path.dirname(os.path.realpath(__file__))
trainer = SmilesRnnDistributionLearner(output_dir=args.output_dir,
n_epochs=args.n_epochs,
hidden_size=args.hidden_size,
n_layers=args.n_layers,
max_len=args.max_len,
batch_size=args.batch_size,
rnn_dropout=args.rnn_dropout,
lr=args.lr,
valid_every=args.valid_every)
training_set_file = args.train_data
validation_set_file = args.valid_data
with open(training_set_file) as f:
train_list = f.readlines()
with open(validation_set_file) as f:
valid_list = f.readlines()
trainer.train(training_set=train_list, validation_set=valid_list)
print(f'All done, your trained model is in {args.output_dir}')
| 51.660714
| 112
| 0.642586
|
558408c7e0b7cd2ac41670f3e21f301b8c1198b7
| 7,918
|
py
|
Python
|
qmpy/materials/formation_energy.py
|
bbadass/qmpy
|
e2fd0015e4f2786e1cca185b616e679c3dcb2114
|
[
"MIT"
] | null | null | null |
qmpy/materials/formation_energy.py
|
bbadass/qmpy
|
e2fd0015e4f2786e1cca185b616e679c3dcb2114
|
[
"MIT"
] | null | null | null |
qmpy/materials/formation_energy.py
|
bbadass/qmpy
|
e2fd0015e4f2786e1cca185b616e679c3dcb2114
|
[
"MIT"
] | null | null | null |
# qmpy/materials/formation_energy.py
from django.db import models
from django.db.models import Min
import json
import pprint
import logging
from qmpy.db.custom import DictField
import qmpy.materials.composition as Comp
import qmpy.analysis as vasp
import qmpy.materials.element as elt
from qmpy.data import *
from qmpy.utils import *
logger = logging.getLogger(__name__)
class ExptFormationEnergy(models.Model):
"""Experimentally measured formation energy.
Any external formation energy should be entered as an ExptFormationEnergy
object, rather than a FormationEnergy. If the external source is also
computational, set the "dft" attribute to be True.
Relationships:
| :mod:`~qmpy.Composition` via composition
| :mod:`~qmpy.Fit` via fit
Attributes:
| id: integer primary key.
| delta_e: measured formation energy.
| delta_g: measured free energy of formation.
| dft: (bool) True if the formation energy is from a non-OQMD DFT
| calculation.
| source: (str) Identifier for the source.
"""
composition = models.ForeignKey(
"Composition", null=True, blank=True, on_delete=models.PROTECT
)
delta_e = models.FloatField(null=True)
delta_g = models.FloatField(null=True)
source = models.CharField(max_length=127, blank=True, null=True)
dft = models.BooleanField(default=False)
class Meta:
app_label = "qmpy"
db_table = "expt_formation_energies"
def __str__(self):
return "%s : %s" % (self.composition, self.delta_e)
@classmethod
def read_file(cls, filename, dft=False):
source = filename.split(".")[0]
expts = []
for line in open(filename, "r"):
comp, energy = line.strip().split()
expt, new = ExptFormationEnergy.objects.get_or_create(
delta_e=energy,
composition=Comp.Composition.get(comp),
source=source,
dft=dft,
)
if new:
expt.save()
expts.append(expt)
return expts
class HubbardCorrection(models.Model):
"""
Energy correction for DFT+U energies.
Relationships:
| :mod:`~qmpy.Fit` via fit
| :mod:`~qmpy.Element` via element
| :mod:`~qmpy.Hubbard` via hubbard
Attributes:
| id
| value: Correction energy (eV/atom)
"""
element = models.ForeignKey("Element", on_delete=models.CASCADE)
hubbard = models.ForeignKey("Hubbard", on_delete=models.CASCADE)
value = models.FloatField()
fit = models.ForeignKey(
"Fit",
blank=True,
null=True,
on_delete=models.CASCADE,
related_name="hubbard_correction_set",
)
class Meta:
app_label = "qmpy"
db_table = "hubbard_corrections"
class ReferenceEnergy(models.Model):
"""
Elemental reference energy for evaluating heats of formation.
Relationships:
| :mod:`~qmpy.Fit` via fit
| :mod:`~qmpy.Element` via element
Attributes:
| id
| value: Reference energy (eV/atom)
"""
element = models.ForeignKey("Element", on_delete=models.CASCADE)
value = models.FloatField()
fit = models.ForeignKey(
"Fit",
blank=True,
null=True,
on_delete=models.CASCADE,
related_name="reference_energy_set",
)
class Meta:
app_label = "qmpy"
db_table = "reference_energies"
class FormationEnergy(models.Model):
"""
Base class for a formation energy.
Relationships:
| :mod:`~qmpy.Calculation` via calculation
| :mod:`~qmpy.Composition` via composition
| :mod:`~qmpy.Entry` via entry
| :mod:`~qmpy.FormationEnergy` via equilibrium
| :mod:`~qmpy.Fit` via fit
Attributes:
| id
| delta_e: Formation energy (eV/atom)
| description: A label of the source of the formation energy.
| stability: Distance from the convex hull (eV/atom)
"""
composition = models.ForeignKey(
"Composition", null=True, blank=True, on_delete=models.PROTECT
)
entry = models.ForeignKey("Entry", null=True, blank=True, on_delete=models.CASCADE)
calculation = models.ForeignKey(
"Calculation", null=True, blank=True, on_delete=models.SET_NULL
)
description = models.CharField(max_length=20, null=True, blank=True)
fit = models.ForeignKey("Fit", null=True, on_delete=models.PROTECT)
stability = models.FloatField(blank=True, null=True)
delta_e = models.FloatField(null=True)
equilibrium = models.ManyToManyField("self", blank=True)
class Meta:
app_label = "qmpy"
db_table = "formation_energies"
@classmethod
def get(cls, calculation, fit="standard"):
fit = Fit.get(fit)
try:
return FormationEnergy.objects.get(calculation=calculation, fit=fit)
except FormationEnergy.DoesNotExist:
return FormationEnergy(calculation=calculation, fit=fit)
@staticmethod
def search(bounds, fit="standard"):
space = set()
if isinstance(bounds, str):
bounds = bounds.split("-")
for b in bounds:
bound = parse_comp(b)
space |= set(bound.keys())
in_elts = elt.Element.objects.filter(symbol__in=space)
out_elts = elt.Element.objects.exclude(symbol__in=space)
forms = FormationEnergy.objects.filter(fit=fit)
forms = forms.exclude(composition__element_set__in=out_elts)
forms = forms.filter(composition__element_set__in=in_elts)
forms = forms.filter(composition__ntypes__lte=len(space))
return forms.distinct()
def __str__(self):
return "%s : %s" % (self.composition, self.delta_e)
def save(self, *args, **kwargs):
self.composition = self.calculation.composition
self.entry = self.calculation.entry
super(FormationEnergy, self).save(*args, **kwargs)
Formation = FormationEnergy
class Fit(models.Model):
"""
The core model for a reference energy fitting scheme.
The Fit model links to the experimental data (ExptFormationEnergy objects)
that informed the fit, as well as the DFT calculations (Calculation objects)
that were matched to each experimental formation energy. Once the fit is
completed, it also stores a list of chemical potentials both as a
relationship to ReferenceEnergy and HubbardCorrection objects.
These correction energies can also be accessed by dictionaries at
Fit.mus and Fit.hubbard_mus.
Relationships:
| :mod:`~qmpy.Calculation` via dft
| :mod:`~qmpy.ExptFormationEnergy` via experiments
| :mod:`~qmpy.FormationEnergy` via formationenergy_set
| :mod:`~qmpy.HubbardCorrection` via hubbard_correction_set
| :mod:`~qmpy.ReferenceEnergy` via reference_energy_set
Attributes:
| name: Name for the fitting
Examples::
>>> f = Fit.get('standard')
>>> f.experiments.count()
>>> f.dft.count()
>>> f.mus
>>> f.hubbard_mus
"""
name = models.CharField(max_length=255, primary_key=True)
elements = models.ManyToManyField("Element")
experiments = models.ManyToManyField("ExptFormationEnergy")
dft = models.ManyToManyField("Calculation")
class Meta:
app_label = "qmpy"
db_table = "fits"
@classmethod
def get(cls, name):
try:
return Fit.objects.get(name=name)
except Fit.DoesNotExist:
return Fit(name=name)
@property
def mus(self):
mus = self.reference_energy_set.values_list("element_id", "value")
return dict(mus)
@property
def hubbard_mus(self):
hm = self.hubbard_correction_set.all()
return dict((h.hubbard.key, h.value) for h in hm)
| 29.879245
| 87
| 0.642208
|
d49b1a556c7aacfdb01bf24f1b4510ddfdf79530
| 2,604
|
py
|
Python
|
setup.py
|
abecode/sasa-tool
|
3007fc2e81a715457be9b55fd368e5d0e38c6aa5
|
[
"Apache-2.0"
] | 1
|
2018-02-19T18:36:52.000Z
|
2018-02-19T18:36:52.000Z
|
setup.py
|
abecode/sasa-tool
|
3007fc2e81a715457be9b55fd368e5d0e38c6aa5
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
abecode/sasa-tool
|
3007fc2e81a715457be9b55fd368e5d0e38c6aa5
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup
setup(name='sasa',
version='0.2.0',
description='SAIL/AIL Sentiment Analyzer',
author='Abe Kazemzadeh',
author_email='kazemzad@usc.edu',
url='https://code.google.com/p/sasa-tool/',
packages=['sasa'],
package_dir={'sasa':'sasa'},
package_data={'sasa': ['models/model.unigram.nb.bool.politics.unbiased',
'models/model.unigram.nb.bool.politics.positivebias',
'models/model.unigram.nb.bool.politics']},
install_requires=['nltk>=3.4'],
#package_data={'sasa': ['models/*']},
license='apache 2.0',
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Topic :: Communications",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Human Machine Interfaces",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Sociology",
"Topic :: Text Processing :: Linguistic"],
long_description="""
===============================================
SASA Sentiment Analysis Tool
===============================================
This is a tool for doing sentiment analysis. The short-term goal of this
project is to share the sentiment analysis research. The long-term goal of
this project is to allow researchers the ability to demonstrate and test
sentiment analysis tools, so that performance can be evaluated and compared.
Currently, the models are trained on Twitter data from the 2012 US
Presidential election. In the future we hope to have general models as well as
models for other domains.
This code was first released by the collaboration of two labs at the
University of Southern California (USC), the Signal Analysis and
Interpretation Laboratory (SAIL) and Annenberg Innovation Laboratory
(AIL). The sentiment research at SAIL is supported by grants including NSF,
NIH, and DARPA. The sentiment research at AIL is sponsored by the lab
sponsors, especially IBM, an AIL founding sponsor, and Orange, the flagship
brand of France Telecom.
This work was made possible by using existing open source projects and
code. NLTK (nltk.org) provides some of the basic classes of the SASA tool,
e.g. frequency distributions and classifiers. Christopher Potts'
twitter-specific tokenizer for sentiment analysis is used for tokenization.
""",
)
| 45.684211
| 82
| 0.673579
|
1fbc8cd146c82b07a2a6828a9698c2779bfbb67b
| 1,280
|
py
|
Python
|
dash-network-graph.py
|
oriolmirosa/dash-recipes
|
c01372292d60c3fd8c1f3a47bca6330c2b268ba4
|
[
"MIT"
] | 932
|
2017-07-08T06:37:23.000Z
|
2022-03-31T11:10:07.000Z
|
dash-network-graph.py
|
oriolmirosa/dash-recipes
|
c01372292d60c3fd8c1f3a47bca6330c2b268ba4
|
[
"MIT"
] | 29
|
2018-02-06T20:09:58.000Z
|
2021-08-24T06:01:11.000Z
|
dash-network-graph.py
|
oriolmirosa/dash-recipes
|
c01372292d60c3fd8c1f3a47bca6330c2b268ba4
|
[
"MIT"
] | 301
|
2017-09-25T16:16:41.000Z
|
2022-03-24T23:48:59.000Z
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, Event, State
import visdcc
app = dash.Dash()
app.layout = html.Div([
visdcc.Network(id='net',
data={'nodes':[{'id': 1, 'label': 'Node 1', 'color':'#00ffff'},
{'id': 2, 'label': 'Node 2'},
{'id': 4, 'label': 'Node 4'},
{'id': 5, 'label': 'Node 5'},
{'id': 6, 'label': 'Node 6'}],
'edges':[{'id':'1-3', 'from': 1, 'to': 3},
{'id':'1-2', 'from': 1, 'to': 2}]
},
options=dict(height='600px', width='100%')),
dcc.RadioItems(id='color',
options=[{'label': 'Red' , 'value': '#ff0000'},
{'label': 'Green', 'value': '#00ff00'},
{'label': 'Blue' , 'value': '#0000ff'}],
value='Red')
])
@app.callback(
Output('net', 'options'),
[Input('color', 'value')])
def myfun(x):
return {'nodes':{'color': x}}
if __name__ == '__main__':
app.run_server(debug=True)
| 35.555556
| 84
| 0.409375
|
72293c5db2ae28c08ff0e7d5006e6742e0b1a2d6
| 9,682
|
py
|
Python
|
max_ent/examples/orchestrator_exp.py
|
aloreggia/sofai
|
92694f9372985c0c3a23d695f1de4c4a1fb70728
|
[
"MIT"
] | null | null | null |
max_ent/examples/orchestrator_exp.py
|
aloreggia/sofai
|
92694f9372985c0c3a23d695f1de4c4a1fb70728
|
[
"MIT"
] | 1
|
2021-05-25T14:57:15.000Z
|
2021-05-25T14:57:15.000Z
|
max_ent/examples/orchestrator_exp.py
|
Rahgooy/soft_constraint_irl
|
259d4e7aff5ec8efe78cfbe8b84e9285d4645618
|
[
"MIT"
] | null | null | null |
from max_ent.algorithms.gridworld_icrl import generate_optimal_trajectories
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvas
import random
import pickle
from scipy import stats
import max_ent.examples.grid_9_by_9 as G
from max_ent.utility.support import generate_constraints
from max_ent.gridworld import Directions
colors = ['purple', 'red', 'orange', 'green', 'blue', 'yellow']
N_TRAJ = 100
def dist(demo):
dist = np.ones((81, 8)) * 1e-6
for t in demo:
for s, a, _ in t.transitions():
dist[s, a] += 1
return dist/dist.sum().reshape(-1, 1)
def jsd(x, y):
def kl(p, q):
kl = p * np.log2(p/q)
return kl.sum()
p = dist(x)
q = dist(y)
m = (p + q) / 2
return (kl(p, m) + kl(q, m))/2
def create_world(blue, green, cs=[], ca=[], cc=[], start=0, goal=8):
n_cfg = G.config_world(blue, green, cs, ca, cc, goal, start=start)
n = n_cfg.mdp
# Generate demonstrations and plot the world
demo = G.generate_trajectories(
n.world, n.reward, n.start, n.terminal, n_trajectories=1)
if not demo:
return None, None
return n, n_cfg
def learn_random_worlds(n_tests):
results = []
while len(results) < n_tests:
blue, green, cs, ca, start, goal = generate_constraints(9)
n, _ = create_world(blue, green, start=start, goal=goal)
cc = [1, 2]
c, _ = create_world(blue, green, cs, ca, cc, start=start, goal=goal)
# CHECK WHETHER STATE AND GOAL ARE REACHABLE - IF NOT SKIP THE GRID AND GENERATE A NEW ONE
if c == None:
continue
print(f'Learning world #{len(results) + 1}')
demo_n = G.generate_trajectories(
n.world, n.reward, n.start, n.terminal, n_trajectories=N_TRAJ)
demo_c = G.generate_trajectories(
c.world, c.reward, c.start, c.terminal, n_trajectories=N_TRAJ)
learned_params = G.learn_constraints(
n.reward, c.world, c.terminal, demo_c.trajectories)
demo_l = G.generate_trajectories(
c.world, learned_params.reward, c.start, c.terminal, n_trajectories=N_TRAJ)
results.append({
'start': start,
'goal': goal,
'learned_params': learned_params,
'demo_n': demo_n.trajectories,
'demo_c': demo_c.trajectories,
'demo_l': demo_l.trajectories,
'constraints': {'blue': blue, 'green': green, 'cs': cs, 'ca_idx': [a.idx for a in ca], 'ca': ca}
})
return results
def get_worlds(d):
const = d['constraints']
n, _ = create_world(const['blue'], const['green'], start=d['start'],
goal=d['goal'])
l = G.MDP(n.world, d['learned_params'].reward, n.terminal, n.start)
return n, l
def get_traj_stats(traj, reward, constraints):
avg_length = 0
avg_pen = 0
cs, cc, ca = 0, 0, 0
n = len(traj)
for t in traj:
for s, a, s_ in t.transitions():
avg_length += 1
avg_pen += reward[s, a, s_]
if s in constraints['cs']:
cs += 1
if a in constraints['ca_idx']:
ca += 1
if s in (constraints['blue'] + constraints['green']):
cc += 1
avg_length /= n
avg_pen /= n
cs /= n
ca /= n
cc /= n
violations = cs + ca + cc
return avg_length, avg_pen, violations
def get_stats(demo, traj, reward, constraints, len_baseline, pen_baseline,):
avg_length, avg_pen, avg_vio = get_traj_stats(traj, reward, constraints)
return avg_length / len_baseline, avg_pen / pen_baseline, avg_vio, jsd(demo, traj)
def get_length_baselines(demo_n, demo_l):
n_lens = [len(t.transitions()) for t in demo_n]
c_lens = [len(t.transitions()) for t in demo_l]
min_length = min(n_lens)
avg_nominal_length = sum(n_lens) / len(n_lens)
avg_constrained_length = sum(c_lens) / len(c_lens)
return min_length, avg_nominal_length, avg_constrained_length
def get_penalty_baselines(demo_n, demo_l, demo_g, reward):
def r(t):
return sum([reward[x] for x in t.transitions()])
p_n = sum([r(t) for t in demo_n]) / len(demo_n)
p_l = sum([r(t) for t in demo_l]) / len(demo_l)
p_g = sum([r(t) for t in demo_g]) / len(demo_g)
return p_n, p_l, p_g
def get_violation_baselines(demo_n, demo_l, demo_g, constraints):
color_const = constraints['blue'].tolist() + constraints['green'].tolist()
def v(t):
cs, cc, ca = 0, 0, 0
for s, a, _ in t.transitions():
if s in constraints['cs']:
cs += 1
if a in constraints['ca_idx']:
ca += 1
if s in color_const:
cc += 1
return cs + ca + cc
v_n = sum([v(t) for t in demo_n]) / len(demo_n)
v_l = sum([v(t) for t in demo_l]) / len(demo_l)
v_g = sum([v(t) for t in demo_g]) / len(demo_g)
return v_n, v_l, v_g
def get_orchestrator_results(learned):
wa = np.zeros((len(learned), 11, 4))
mdft = np.zeros((len(learned), 11, 4))
avg_min_len, avg_n_len, avg_c_len = 0, 0, 0
avg_n_pen, avg_c_pen, avg_g_pen = 0, 0, 0
avg_n_v, avg_c_v, avg_g_v = 0, 0, 0
n_tests = len(learned)
for i, d in enumerate(learned):
print(f'Processing world #{i+1} ...')
n, l = get_worlds(d)
demo = d['demo_c']
aml, anl, acl = get_length_baselines(d['demo_n'], d['demo_l'])
avg_min_len += aml
avg_n_len += anl / aml
avg_c_len += acl / aml
demo_g = G.generate_greedy_trajectories(n.world, n.reward, d['learned_params'].reward,
n.start, n.terminal,
n_trajectories=N_TRAJ).trajectories
p_n, p_l, p_g = get_penalty_baselines(
d['demo_n'], d['demo_l'], demo_g, l.reward)
avg_n_pen += p_n / p_l
avg_c_pen += p_l
avg_g_pen += p_g / p_l
v_n, v_l, v_g = get_violation_baselines(
d['demo_n'], d['demo_l'], demo_g, d['constraints'])
avg_n_v += v_n
avg_c_v += v_l
avg_g_v += v_g
for j in range(11):
w = [(j)/10, 1 - (j)/10]
wa_traj = G.generate_weighted_average_trajectories(n.world, n.reward, d['learned_params'].reward,
n.start, n.terminal, w,
n_trajectories=N_TRAJ).trajectories
wa[i, j] = get_stats(demo, wa_traj, l.reward, d['constraints'], aml, p_l)
mdft_traj = G.generate_mdft_trajectories(n.world, n.reward, d['learned_params'].reward,
n.start, n.terminal, w,
n_trajectories=N_TRAJ).trajectories
mdft[i, j] = get_stats(demo, mdft_traj, l.reward, d['constraints'], aml, p_l)
avg_min_len /= n_tests
avg_n_len /= n_tests
avg_c_len /= n_tests
avg_n_pen /= n_tests
avg_c_pen /= n_tests
avg_g_pen /= n_tests
avg_n_v /= n_tests
avg_c_v /= n_tests
avg_g_v /= n_tests
return wa, mdft, avg_min_len, avg_n_len, avg_c_len, avg_n_pen, \
avg_c_pen, avg_g_pen, avg_n_v, avg_c_v, avg_g_v
def draw_metric(wa, mdft, lines, y_label, labels, filename):
y = np.stack([wa.mean(0), mdft.mean(0)])
sem = np.stack([stats.sem(wa, 0), stats.sem(mdft, 0)])
x = list(range(1, 12))
plt.figure(figsize=(12, 7))
lwidth = 0.6
for i in range(y.shape[0]):
plt.plot(x, y[i, :], 'k', color=colors[i], marker='o', fillstyle='none',
linewidth=lwidth, markersize=5, markeredgewidth=lwidth, label=labels[i])
plt.fill_between(x, (y[i, :] - sem[i, :]), y[i, :] + sem[i, :], alpha=0.2,
facecolor=colors[i], linewidth=lwidth, antialiased=True)
i = y.shape[0]
for l in lines:
plt.axhline(y=l, color=colors[i], ls='--', label=labels[i], lw=1)
i += 1
xlabels = [f'({w/10:0.1f}, {1 - w/10:0.1f})' for w in range(11)]
plt.xticks(x, labels=xlabels)
plt.legend(fontsize=14)
plt.xlabel('$(\mathbf{w}_n, \mathbf{w}_c)$', fontsize=14)
plt.ylabel(y_label, fontsize=14)
plt.grid(axis='both', which='major', ls='--', lw=0.5)
plt.savefig(f'./reports/orchestrator/orchestrator_{filename}.pdf')
plt.close()
def main():
n_tests = 100
learn = False
random.seed(123)
np.random.seed(123)
if learn:
learned = learn_random_worlds(n_tests)
with open(f'results/orchestrator/learned_mdps_{n_tests}.pkl', 'wb') as f:
pickle.dump(learned, f)
else:
with open(f'results/orchestrator/learned_mdps_{n_tests}.pkl', 'rb') as f:
learned = pickle.load(f)
wa, mdft, aml, anl, acl, anp, acp, agp, anv, acv, agv = get_orchestrator_results(
learned)
draw_metric(wa[:, :, 0], mdft[:, :, 0], [1, anl, acl],
'Avg Norm. Length', ['WA', 'MDFT', 'Shortest Path', 'Nominal', 'Constrained'], 'length')
draw_metric(wa[:, :, 1], mdft[:, :, 1], [anp, 1, agp],
'Avg Norm. Penalty', ['WA', 'MDFT', 'Nominal', 'Constrained', 'Greedy'], 'penalty')
draw_metric(wa[:, :, 2], mdft[:, :, 2], [anv, acv, agv],
'Avg Num Violated Constraints', ['WA', 'MDFT', 'Nominal', 'Constrained', 'Greedy'], 'violations')
draw_metric(wa[:, :, 3], mdft[:, :, 3], [],
'Avg JS-Divergence', ['WA', 'MDFT'], 'jsd')
print('.')
if __name__ == "__main__":
main()
| 33.50173
| 117
| 0.567032
|
5a9bbba2064b46020b52e63c970b4533375bb5d3
| 2,950
|
py
|
Python
|
tests/database_test.py
|
kpence/easy_db
|
fbe4c22a79336ec08980221405aca5c65bf02caf
|
[
"MIT"
] | null | null | null |
tests/database_test.py
|
kpence/easy_db
|
fbe4c22a79336ec08980221405aca5c65bf02caf
|
[
"MIT"
] | null | null | null |
tests/database_test.py
|
kpence/easy_db
|
fbe4c22a79336ec08980221405aca5c65bf02caf
|
[
"MIT"
] | null | null | null |
import unittest
import sys
sys.path.insert(1, '..')
import easy_db
class TestSQLite(unittest.TestCase):
def setUp(self):
self.database = easy_db.DataBase('test_sqlite3_db.db')
def test_dbtype(self):
print(self.database.db_type)
self.assertTrue(self.database.db_type == 'SQLITE3')
def test_tablename_pull(self):
tables = self.database.pull_all_table_names()
print(tables)
self.assertTrue(len(tables) == 3)
self.assertTrue(tables == sorted(tables))
def test_full_table_pull(self):
test_table_data = self.database.pull_table('TEST_TABLE')
print(test_table_data[0])
self.assertTrue(type(test_table_data) == list)
self.assertTrue(type(test_table_data[0]) == dict)
self.assertTrue(len(test_table_data) == 31)
def test_full_table_pull_specific_columns(self):
test_table_data = self.database.pull_table('TEST_TABLE', columns=('row_id', 'value_1'))
print(test_table_data[0])
self.assertTrue(type(test_table_data) == list)
self.assertTrue(type(test_table_data[0]) == dict)
self.assertTrue(len(test_table_data) == 31)
self.assertTrue(len(test_table_data[0].keys()) == 2)
def test_pull_where_id_in_list(self):
test_pulled_data = self.database.pull_table_where_id_in_list('THIRD_TABLE', 'parameter', [0.66, 0.67], use_multip=False)
self.assertTrue(len(test_pulled_data) == 116)
self.assertTrue(all(d['parameter'] in [0.66, 0.67] for d in test_pulled_data))
def test_table_creation_and_deletion(self):
self.database.create_table('TEST_TABLE_CREATION', {'col_1': str, 'col_2': float})
self.database.append_to_table('TEST_TABLE_CREATION', [{'col_1': 'row_A', 'col_2': 1.5}, {'col_1': 'row_B', 'col_2': 3.7}])
self.database.drop_table('TEST_TABLE_CREATION')
self.assertTrue(True)
def test_progress_callback(self):
callback = lambda *args: print('Making progress...')
data = self.database.pull_table('THIRD_TABLE', progress_handler=callback)
class TestUtil(unittest.TestCase):
def setUp(self):
self.database = easy_db.DataBase('test_sqlite3_db.db')
def test_name_clean(self):
self.assertTrue(easy_db.util.name_clean('table'))
self.assertFalse(easy_db.util.name_clean('Malic10s;--'))
self.assertFalse(easy_db.util.name_clean('DROP TABLE;'))
self.assertTrue(easy_db.util.name_clean('TABLE_1'))
self.assertFalse(easy_db.util.name_clean('{email=dude@test.com}'))
self.assertFalse(easy_db.util.name_clean('drop'))
def test_malicious_query(self):
data = self.database.pull_table('DROP TABLE TEST_TABLE')
self.assertTrue(data is None)
data = self.database.pull_table('TEST_TABLE', columns=('row_id;1=1;--', 'value_1'))
self.assertTrue(data is None)
if __name__ == '__main__':
unittest.main(buffer=True)
| 37.820513
| 130
| 0.681017
|
d068f93cd9d42a38f12b53e71fb0be275ba8dcb5
| 16,319
|
py
|
Python
|
src/genie/libs/parser/iosxe/tests/ShowSdwanTunnelStatistics/cli/equal/pktdup_stats_golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowSdwanTunnelStatistics/cli/equal/pktdup_stats_golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
src/genie/libs/parser/iosxe/tests/ShowSdwanTunnelStatistics/cli/equal/pktdup_stats_golden_output_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | null | null | null |
expected_output = {
"tunnel": {
"150.0.5.1": {
"remote": {
"150.0.0.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.1.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.2.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.3.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.4.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.6.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.7.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.8.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.10.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.40.4": {
"src_port": 12346,
"dst_port": 12366,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.0.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.1.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.2.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.3.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.4.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.6.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.7.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.40.4": {
"src_port": 12346,
"dst_port": 12366,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
}
}
},
"151.0.5.1": {
"remote": {
"150.0.0.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.1.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.2.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.3.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.4.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.6.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.7.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.8.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.10.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"150.0.40.4": {
"src_port": 12346,
"dst_port": 12366,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.0.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.1.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.2.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.3.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.4.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.6.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.7.1": {
"src_port": 12346,
"dst_port": 12346,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
},
"151.0.40.4": {
"src_port": 12346,
"dst_port": 12366,
"pktdup": {
"pktdup_rx": 0,
"pktdup_rx_other": 0,
"pktdup_rx_this": 0,
"pktdup_tx": 0,
"pktdup_tx_other": 0,
"pktdup_capable": "true"
}
}
}
}
}
}
| 36.67191
| 48
| 0.274465
|
f0e528701f18e938d7bf9eea0c313be63eb90cc6
| 812
|
py
|
Python
|
djangoueditor/urls.py
|
it-ailen/django-ueditor
|
b3e242457169d71c1531259746b8b9f597e03e3a
|
[
"MIT"
] | null | null | null |
djangoueditor/urls.py
|
it-ailen/django-ueditor
|
b3e242457169d71c1531259746b8b9f597e03e3a
|
[
"MIT"
] | null | null | null |
djangoueditor/urls.py
|
it-ailen/django-ueditor
|
b3e242457169d71c1531259746b8b9f597e03e3a
|
[
"MIT"
] | null | null | null |
"""djangoueditor URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from .views import *
urlpatterns = [
url(r'^upload/(?P<path>.*)$', ueditor_upload, name="ueditor_upload"),
]
| 33.833333
| 77
| 0.705665
|
6fa89d223418afe321392afbc356757ee97cefad
| 2,339
|
py
|
Python
|
dhnx/dhn_from_osm.py
|
rbv83/DHNx
|
e236d720c7ec3c0f400648b96141454557d35476
|
[
"MIT"
] | 14
|
2020-06-25T14:03:21.000Z
|
2021-11-25T12:53:08.000Z
|
dhnx/dhn_from_osm.py
|
rbv83/DHNx
|
e236d720c7ec3c0f400648b96141454557d35476
|
[
"MIT"
] | 51
|
2020-02-19T14:42:38.000Z
|
2022-03-23T08:30:31.000Z
|
dhnx/dhn_from_osm.py
|
oemof-heat/district_heating_simulation
|
edb5c9be17f74d7f200c1eb6a17000a26633bdc3
|
[
"MIT"
] | 3
|
2020-10-23T15:54:11.000Z
|
2022-02-28T12:53:09.000Z
|
# -*- coding: utf-8
"""
This module is designed to hold functions for importing networks and building
footprints from openstreetmap.
This file is part of project dhnx (). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location:
SPDX-License-Identifier: MIT
"""
try:
import geopandas as gpd
except ImportError:
print("Need to install geopandas to process osm data.")
import pandas as pd
try:
from shapely.geometry import LineString
from shapely.ops import nearest_points
except ImportError:
print("Need to install shapely to download from osm.")
def connect_points_to_network(points, nodes, edges):
r"""
Parameter
---------
points : geopandas.GeoDataFrame
Points to connect to the network
nodes : geopandas.GeoDataFrame
Nodes of the network
edges : geopandas.GeoDataFrame
Edges of the network
Returns
-------
points : geopandas.GeoDataFrame
Points connected to the network
nodes : geopandas.GeoDataFrame
Original nodes of the network and
nearest connecting points on the
network's edges.
edges : geopandas.GeoDataFrame
Edges of the network.
"""
edges_united = edges.unary_union
len_nodes = len(nodes)
len_points = len(points)
# assign ids to new points
n_points = []
n_nearest_points = []
n_edges = []
for i, point in enumerate(points.geometry):
id_nearest_point = len_nodes + i
id_point = len_nodes + len_points + i
nearest_point = nearest_points(edges_united, point)[0]
n_points.append([id_point, point])
n_nearest_points.append([id_nearest_point, nearest_point])
n_edges.append([id_point, id_nearest_point, LineString([point, nearest_point])])
n_points = gpd.GeoDataFrame(
n_points,
columns=['index', 'geometry']).set_index('index')
n_nearest_points = gpd.GeoDataFrame(
n_nearest_points,
columns=['index', 'geometry']).set_index('index')
n_edges = gpd.GeoDataFrame(n_edges, columns=['u', 'v', 'geometry'])
joined_nodes = pd.concat([nodes, n_nearest_points], sort=True)
joined_edges = pd.concat([edges, n_edges], sort=True)
return n_points, joined_nodes, joined_edges
| 24.882979
| 88
| 0.68106
|
b8c863d14b010f5b9e22db407663bbb5ef59ed96
| 5,845
|
py
|
Python
|
WRAPPERS/corrmat_from_regionalmeasures.py
|
harshitjindal/BrainNetworksInPython
|
7b35d0693b5ea05f51a9b7b3e711f82e12c70a24
|
[
"MIT"
] | null | null | null |
WRAPPERS/corrmat_from_regionalmeasures.py
|
harshitjindal/BrainNetworksInPython
|
7b35d0693b5ea05f51a9b7b3e711f82e12c70a24
|
[
"MIT"
] | null | null | null |
WRAPPERS/corrmat_from_regionalmeasures.py
|
harshitjindal/BrainNetworksInPython
|
7b35d0693b5ea05f51a9b7b3e711f82e12c70a24
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#=============================================================================
# Created by Kirstie Whitaker
# at Hot Numbers coffee shop on Trumpington Road in Cambridge, September 2016
# Contact: kw401@cam.ac.uk
#=============================================================================
#=============================================================================
# IMPORTS
#=============================================================================
from __future__ import print_function # Making code python 2 and 3 compatible
import os
import sys
import argparse
import textwrap
import numpy as np
import pandas as pd
sys.path.append(os.path.join(os.path.dirname(__file__), '../SCRIPTS/'))
import make_corr_matrices as mcm
from useful_functions import read_in_data
#=============================================================================
# FUNCTIONS
#=============================================================================
def setup_argparser():
'''
Code to read in arguments from the command line
Also allows you to change some settings
'''
# Build a basic parser.
help_text = (('Generate a structural correlation matrix from an input csv file,\n')+
('a list of region names and (optional) covariates.'))
sign_off = 'Author: Kirstie Whitaker <kw401@cam.ac.uk>'
parser = argparse.ArgumentParser(description=help_text,
epilog=sign_off,
formatter_class=argparse.RawTextHelpFormatter)
# Now add the arguments
parser.add_argument(dest='regional_measures_file',
type=str,
metavar='regional_measures_file',
help=textwrap.dedent(('CSV file that contains regional values for each participant.\n')+
('Column labels should be the region names or covariate variable\n')+
('names. All participants in the file will be included in the\n')+
('correlation matrix.')))
parser.add_argument(dest='names_file',
type=str,
metavar='names_file',
help=textwrap.dedent(('Text file that contains the names of each region to be included\n')+
('in the correlation matrix. One region name on each line.')))
parser.add_argument(dest='output_name',
type=str,
metavar='output_name',
help=textwrap.dedent(('File name of the output correlation matrix.\n')+
('If the output directory does not yet exist it will be created.')))
parser.add_argument('--covars_file',
type=str,
metavar='covars_file',
help=textwrap.dedent(('Text file that contains the names of variables that should be\n')+
('covaried for each regional measure before the creation of the\n')+
('correlation matrix. One variable name on each line.\n')+
(' Default: None')),
default=None)
parser.add_argument('--names_308_style',
action='store_true',
help=textwrap.dedent(('Include this flag if your names are in the NSPN 308\n')+
('parcellation style (which means you have 41 subcortical regions)\n')+
('that are still in the names files and that\n')+
('the names are in <hemi>_<DK-region>_<part> format.\n')+
(' Default: False')),
default=False)
arguments = parser.parse_args()
return arguments, parser
def corrmat_from_regionalmeasures(regional_measures_file,
names_file,
output_name,
covars_file=None,
names_308_style=False):
'''
This is the big function!
It reads in the CSV file that contains the regional measures for each
participant, the names file and the list of covariates.
Then it creates the correlation matrix and writes it out to the output_dir
as a txt file.
'''
# Read in the data
df, names, covars_list = read_in_data(regional_measures_file,
names_file,
covars_file=covars_file,
names_308_style=names_308_style)
# Make your correlation matrix correcting for all the covariates
M = mcm.create_corrmat(df, names, covars_list)
# Save the corrmat
mcm.save_mat(M, output_name)
if __name__ == "__main__":
# Read in the command line arguments
arg, parser = setup_argparser()
# Now run the main function :)
corrmat_from_regionalmeasures(arg.regional_measures_file,
arg.names_file,
arg.output_name,
covars_file=arg.covars_file,
names_308_style=arg.names_308_style)
#=============================================================================
# Wooo! All done :)
#=============================================================================
| 45.310078
| 120
| 0.466039
|
fbdb16dacb533396fe23c9789778779913cae88e
| 2,634
|
py
|
Python
|
deepspeed/autotuning/tuner/base_tuner.py
|
ganik/DeepSpeed
|
788e1c40e83beacfc4901e7daa1e097d2efb82bb
|
[
"MIT"
] | 1
|
2022-03-15T07:00:38.000Z
|
2022-03-15T07:00:38.000Z
|
deepspeed/autotuning/tuner/base_tuner.py
|
ganik/DeepSpeed
|
788e1c40e83beacfc4901e7daa1e097d2efb82bb
|
[
"MIT"
] | null | null | null |
deepspeed/autotuning/tuner/base_tuner.py
|
ganik/DeepSpeed
|
788e1c40e83beacfc4901e7daa1e097d2efb82bb
|
[
"MIT"
] | null | null | null |
import atexit
import sys
from deepspeed.autotuning.constants import *
from deepspeed.autotuning.utils import write_experiments
from deepspeed.utils import logger
import json
class BaseTuner:
def __init__(self, exps, resource_manager, metric):
self.all_exps = exps
self.rm = resource_manager
self.best_iter = 0
self.best_exp = None
self.best_metric_val = None
self.metric = metric if metric else AUTOTUNING_METRIC_DEFAULT
logger.info(f"total number of exps = {len(self.all_exps)}")
def has_next(self):
"""Whether there exists more configurations for evaluation"""
if len(self.all_exps) > 0:
return True
else:
return False
def next_batch(self, sample_size):
"""Select the next batch of configurations for evaluation"""
raise NotImplementedError
def update(self):
""""Update the tuner with what configurations have been evaluated and their performance results"""
def tune(self, sample_size=1, n_trials=1000, early_stopping=None):
i = 0
try:
while i < n_trials and self.has_next():
# Select the next batch of configuratiosn for evaluation
sampled_exps = self.next_batch(sample_size)
# Generate experiments for measurement of performance
exp_paths = write_experiments(sampled_exps, self.rm.exps_dir)
self.rm.schedule_experiments(exp_paths)
self.rm.run()
exp, metric_val = self.rm.parse_results(self.metric)
if self.best_exp == None or self.best_metric_val == None or (
metric_val and metric_val > self.best_metric_val):
# logger.info(f"tuner finds better = {exp}")
self.best_exp = exp
self.best_metric_val = metric_val
self.best_iter = i
i += len(sampled_exps)
# Update the tuner with evaluated performance results
self.update()
self.rm.clear()
# Early stop if no more promising configurations are likely to be found
if early_stopping and i >= self.best_iter + early_stopping:
logger.info(
f"Tuner early stopped at iteration {i}. Best iteration is {self.best_iter}. Early stopping threshold is {early_stopping}"
)
break
return i
except:
logger.info("Tunner Error:", sys.exc_info()[0])
return i
| 37.628571
| 145
| 0.596431
|
cf99c86fbf5d03234123496bd5b440ab7733b0db
| 1,308
|
py
|
Python
|
backend/benefit/users/utils.py
|
City-of-Helsinki/kesaseteli
|
964f801c2dba72c4105b6e436b12b821b199d6d2
|
[
"MIT"
] | 2
|
2021-05-10T09:28:35.000Z
|
2021-05-17T12:15:34.000Z
|
backend/benefit/users/utils.py
|
City-of-Helsinki/yjdh
|
1c07576b456d2be9c3171363450ed46de2c1bbcb
|
[
"MIT"
] | 931
|
2021-05-21T15:24:35.000Z
|
2022-03-31T20:07:40.000Z
|
backend/benefit/users/utils.py
|
City-of-Helsinki/yjdh
|
1c07576b456d2be9c3171363450ed46de2c1bbcb
|
[
"MIT"
] | 6
|
2021-07-06T11:07:02.000Z
|
2022-02-07T12:42:21.000Z
|
from companies.models import Company
from companies.services import get_or_create_organisation_with_business_id
from django.conf import settings
from shared.oidc.utils import get_organization_roles
def get_request_user_from_context(serializer):
request = serializer.context.get("request")
if request:
return request.user
return None
def get_business_id_from_request(request):
if request.user.is_authenticated:
organization_roles = get_organization_roles(request)
return organization_roles.get("identifier")
return None
def get_company_from_request(request):
if settings.DISABLE_AUTHENTICATION:
return Company.objects.all().order_by("name").first()
if business_id := get_business_id_from_request(request):
try:
return Company.objects.get(
business_id=business_id
) # unique constraint ensures at most one is returned
except Company.DoesNotExist:
# In case we cannot find the Company in DB, try to query it from 3rd party source
# This should cover the case when first applicant of company log in because his company
# hasn't been created yet
return get_or_create_organisation_with_business_id(business_id)
else:
return None
| 34.421053
| 99
| 0.722477
|
e13909f5bc704641a107b85dccbe5489209d8f27
| 165
|
py
|
Python
|
MN_lab_1/MN_lab1_zad_5.py
|
matbocz/kurs-mn-python-pwsz-elblag
|
629d778be7c5d3b6cc217b7ba48e2e0d55ccdf36
|
[
"MIT"
] | null | null | null |
MN_lab_1/MN_lab1_zad_5.py
|
matbocz/kurs-mn-python-pwsz-elblag
|
629d778be7c5d3b6cc217b7ba48e2e0d55ccdf36
|
[
"MIT"
] | null | null | null |
MN_lab_1/MN_lab1_zad_5.py
|
matbocz/kurs-mn-python-pwsz-elblag
|
629d778be7c5d3b6cc217b7ba48e2e0d55ccdf36
|
[
"MIT"
] | null | null | null |
#ZADANIE 5
#Napisz program obliczajacy n! (silnia) dla z gory ustalonego n.
N=5
wynik=1
for i in range(1,N+1):
wynik=wynik*i
print("Wynik to: %d" % wynik)
| 16.5
| 64
| 0.660606
|
23cca8ba297885bc44bb93ec869084464cd008b5
| 6,016
|
py
|
Python
|
nomadgram2/users/views.py
|
vanadis0929/nomadgram2
|
4a6aa88848bec25ee41a5afa9be4d7f6af515dc4
|
[
"MIT"
] | 1
|
2020-07-27T19:56:24.000Z
|
2020-07-27T19:56:24.000Z
|
nomadgram2/users/views.py
|
vanadis0929/nomadgram2
|
4a6aa88848bec25ee41a5afa9be4d7f6af515dc4
|
[
"MIT"
] | 28
|
2019-07-03T20:44:58.000Z
|
2022-03-15T19:35:20.000Z
|
nomadgram2/users/views.py
|
vanadis0929/nomadgram2
|
4a6aa88848bec25ee41a5afa9be4d7f6af515dc4
|
[
"MIT"
] | null | null | null |
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from . import models, serializers
from nomadgram2.notifications import views as notification_views
from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter
from rest_auth.registration.views import SocialLoginView
class ExploreUsers(APIView):
def get(self, request, format=None):
last_five = models.User.objects.all().order_by('-date_joined')[:5]
serializer = serializers.ListUserSerializer(
last_five, many=True, context={"request": request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
class FollowUser(APIView):
def post(self, request, user_id, format=None):
user = request.user
try:
user_to_follow = models.User.objects.get(id=user_id)
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
user.following.add(user_to_follow)
user.save()
notification_views.create_notification(user, user_to_follow, 'follow')
return Response(status=status.HTTP_200_OK)
class UnFollowUser(APIView):
def post(self, request, user_id, format=None):
user = request.user
try:
user_to_follow = models.User.objects.get(id=user_id)
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
user.following.remove(user_to_follow)
user.save()
return Response(status=status.HTTP_200_OK)
class UserProfile(APIView):
def get_user(self, username):
try:
found_user = models.User.objects.get(username=username)
return found_user
except models.User.DoesNotExist:
return None
def get(self, request, username, format=None):
found_user = self.get_user(username)
if found_user is None:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = serializers.UserProfileSerializer(
found_user, context={'request': request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
def put(self, request, username, format=None):
user = request.user
found_user = self.get_user(username)
if found_user is None:
return Response(status=status.HTTP_404_NOT_FOUND)
elif found_user.username != user.username:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
serializer = serializers.UserProfileSerializer(
found_user, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(data=serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class UserFollowers(APIView):
def get(self, request, username, format=None):
try:
found_user = models.User.objects.get(username=username)
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
user_followers = found_user.followers.all()
serializer = serializers.ListUserSerializer(
user_followers, many=True, context={"request": request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
class UserFollowing(APIView):
def get(self, request, username, format=None):
try:
found_user = models.User.objects.get(username=username)
except models.User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
user_following = found_user.following.all()
serializer = serializers.ListUserSerializer(
user_following, many=True, context={"request": request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
class Search(APIView):
def get(self, request, format=None):
username = request.query_params.get('username', None)
if username is not None:
users = models.User.objects.filter(username__istartswith=username)
serializer = serializers.ListUserSerializer(
users, many=True, context={"request": request})
return Response(data=serializer.data, status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class ChangePassword(APIView):
def put(self, request, username, format=None):
user = request.user
if user.username == username:
current_password = request.data.get('current_password', None)
if current_password is not None:
passwords_match = user.check_password(current_password)
if passwords_match:
new_password = request.data.get('new_password', None)
if new_password is not None:
user.set_password(new_password)
user.save()
return Response(status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
class FacebookLogin(SocialLoginView):
adapter_class = FacebookOAuth2Adapter
class RegisterPush(APIView):
def post(self, request):
user = request.user
token = request.data.get('token', None)
if token is not None:
user.push_token = token
user.save()
return Response(status=status.HTTP_200_OK)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
| 25.819742
| 91
| 0.651596
|
1454c279d4ca83c2171242a7a5f5177dcb97c548
| 22,971
|
py
|
Python
|
Janaagraha Bot/venv/Lib/site-packages/google/api/service_pb2.py
|
CFGIndia20/team-19
|
e2b27ad8009303d262c2dc60551d6fcc4645b3b5
|
[
"MIT"
] | 11
|
2021-09-19T06:32:44.000Z
|
2022-03-14T19:09:46.000Z
|
Janaagraha Bot/venv/Lib/site-packages/google/api/service_pb2.py
|
CFGIndia20/team-19
|
e2b27ad8009303d262c2dc60551d6fcc4645b3b5
|
[
"MIT"
] | null | null | null |
Janaagraha Bot/venv/Lib/site-packages/google/api/service_pb2.py
|
CFGIndia20/team-19
|
e2b27ad8009303d262c2dc60551d6fcc4645b3b5
|
[
"MIT"
] | 2
|
2020-09-22T06:01:26.000Z
|
2020-09-22T18:23:49.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/service.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import auth_pb2 as google_dot_api_dot_auth__pb2
from google.api import backend_pb2 as google_dot_api_dot_backend__pb2
from google.api import billing_pb2 as google_dot_api_dot_billing__pb2
from google.api import context_pb2 as google_dot_api_dot_context__pb2
from google.api import control_pb2 as google_dot_api_dot_control__pb2
from google.api import documentation_pb2 as google_dot_api_dot_documentation__pb2
from google.api import endpoint_pb2 as google_dot_api_dot_endpoint__pb2
from google.api import http_pb2 as google_dot_api_dot_http__pb2
from google.api import log_pb2 as google_dot_api_dot_log__pb2
from google.api import logging_pb2 as google_dot_api_dot_logging__pb2
from google.api import metric_pb2 as google_dot_api_dot_metric__pb2
from google.api import (
monitored_resource_pb2 as google_dot_api_dot_monitored__resource__pb2,
)
from google.api import monitoring_pb2 as google_dot_api_dot_monitoring__pb2
from google.api import quota_pb2 as google_dot_api_dot_quota__pb2
from google.api import source_info_pb2 as google_dot_api_dot_source__info__pb2
from google.api import system_parameter_pb2 as google_dot_api_dot_system__parameter__pb2
from google.api import usage_pb2 as google_dot_api_dot_usage__pb2
from google.protobuf import api_pb2 as google_dot_protobuf_dot_api__pb2
from google.protobuf import type_pb2 as google_dot_protobuf_dot_type__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/api/service.proto",
package="google.api",
syntax="proto3",
serialized_options=b"\n\016com.google.apiB\014ServiceProtoP\001ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\242\002\004GAPI",
serialized_pb=b"\n\x18google/api/service.proto\x12\ngoogle.api\x1a\x1cgoogle/api/annotations.proto\x1a\x15google/api/auth.proto\x1a\x18google/api/backend.proto\x1a\x18google/api/billing.proto\x1a\x18google/api/context.proto\x1a\x18google/api/control.proto\x1a\x1egoogle/api/documentation.proto\x1a\x19google/api/endpoint.proto\x1a\x15google/api/http.proto\x1a\x14google/api/log.proto\x1a\x18google/api/logging.proto\x1a\x17google/api/metric.proto\x1a#google/api/monitored_resource.proto\x1a\x1bgoogle/api/monitoring.proto\x1a\x16google/api/quota.proto\x1a\x1cgoogle/api/source_info.proto\x1a!google/api/system_parameter.proto\x1a\x16google/api/usage.proto\x1a\x19google/protobuf/api.proto\x1a\x1agoogle/protobuf/type.proto\x1a\x1egoogle/protobuf/wrappers.proto\"\xdc\x07\n\x07Service\x12\x34\n\x0e\x63onfig_version\x18\x14 \x01(\x0b\x32\x1c.google.protobuf.UInt32Value\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18! \x01(\t\x12\r\n\x05title\x18\x02 \x01(\t\x12\x1b\n\x13producer_project_id\x18\x16 \x01(\t\x12\"\n\x04\x61pis\x18\x03 \x03(\x0b\x32\x14.google.protobuf.Api\x12$\n\x05types\x18\x04 \x03(\x0b\x32\x15.google.protobuf.Type\x12$\n\x05\x65nums\x18\x05 \x03(\x0b\x32\x15.google.protobuf.Enum\x12\x30\n\rdocumentation\x18\x06 \x01(\x0b\x32\x19.google.api.Documentation\x12$\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0b\x32\x13.google.api.Backend\x12\x1e\n\x04http\x18\t \x01(\x0b\x32\x10.google.api.Http\x12 \n\x05quota\x18\n \x01(\x0b\x32\x11.google.api.Quota\x12\x32\n\x0e\x61uthentication\x18\x0b \x01(\x0b\x32\x1a.google.api.Authentication\x12$\n\x07\x63ontext\x18\x0c \x01(\x0b\x32\x13.google.api.Context\x12 \n\x05usage\x18\x0f \x01(\x0b\x32\x11.google.api.Usage\x12'\n\tendpoints\x18\x12 \x03(\x0b\x32\x14.google.api.Endpoint\x12$\n\x07\x63ontrol\x18\x15 \x01(\x0b\x32\x13.google.api.Control\x12'\n\x04logs\x18\x17 \x03(\x0b\x32\x19.google.api.LogDescriptor\x12-\n\x07metrics\x18\x18 \x03(\x0b\x32\x1c.google.api.MetricDescriptor\x12\x44\n\x13monitored_resources\x18\x19 \x03(\x0b\x32'.google.api.MonitoredResourceDescriptor\x12$\n\x07\x62illing\x18\x1a \x01(\x0b\x32\x13.google.api.Billing\x12$\n\x07logging\x18\x1b \x01(\x0b\x32\x13.google.api.Logging\x12*\n\nmonitoring\x18\x1c \x01(\x0b\x32\x16.google.api.Monitoring\x12\x37\n\x11system_parameters\x18\x1d \x01(\x0b\x32\x1c.google.api.SystemParameters\x12+\n\x0bsource_info\x18% \x01(\x0b\x32\x16.google.api.SourceInfoJ\x04\x08\x65\x10\x66\x42n\n\x0e\x63om.google.apiB\x0cServiceProtoP\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\xa2\x02\x04GAPIb\x06proto3",
dependencies=[
google_dot_api_dot_annotations__pb2.DESCRIPTOR,
google_dot_api_dot_auth__pb2.DESCRIPTOR,
google_dot_api_dot_backend__pb2.DESCRIPTOR,
google_dot_api_dot_billing__pb2.DESCRIPTOR,
google_dot_api_dot_context__pb2.DESCRIPTOR,
google_dot_api_dot_control__pb2.DESCRIPTOR,
google_dot_api_dot_documentation__pb2.DESCRIPTOR,
google_dot_api_dot_endpoint__pb2.DESCRIPTOR,
google_dot_api_dot_http__pb2.DESCRIPTOR,
google_dot_api_dot_log__pb2.DESCRIPTOR,
google_dot_api_dot_logging__pb2.DESCRIPTOR,
google_dot_api_dot_metric__pb2.DESCRIPTOR,
google_dot_api_dot_monitored__resource__pb2.DESCRIPTOR,
google_dot_api_dot_monitoring__pb2.DESCRIPTOR,
google_dot_api_dot_quota__pb2.DESCRIPTOR,
google_dot_api_dot_source__info__pb2.DESCRIPTOR,
google_dot_api_dot_system__parameter__pb2.DESCRIPTOR,
google_dot_api_dot_usage__pb2.DESCRIPTOR,
google_dot_protobuf_dot_api__pb2.DESCRIPTOR,
google_dot_protobuf_dot_type__pb2.DESCRIPTOR,
google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,
],
)
_SERVICE = _descriptor.Descriptor(
name="Service",
full_name="google.api.Service",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="config_version",
full_name="google.api.Service.config_version",
index=0,
number=20,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="name",
full_name="google.api.Service.name",
index=1,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="id",
full_name="google.api.Service.id",
index=2,
number=33,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="title",
full_name="google.api.Service.title",
index=3,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="producer_project_id",
full_name="google.api.Service.producer_project_id",
index=4,
number=22,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="apis",
full_name="google.api.Service.apis",
index=5,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="types",
full_name="google.api.Service.types",
index=6,
number=4,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="enums",
full_name="google.api.Service.enums",
index=7,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="documentation",
full_name="google.api.Service.documentation",
index=8,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="backend",
full_name="google.api.Service.backend",
index=9,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="http",
full_name="google.api.Service.http",
index=10,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="quota",
full_name="google.api.Service.quota",
index=11,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="authentication",
full_name="google.api.Service.authentication",
index=12,
number=11,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="context",
full_name="google.api.Service.context",
index=13,
number=12,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="usage",
full_name="google.api.Service.usage",
index=14,
number=15,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="endpoints",
full_name="google.api.Service.endpoints",
index=15,
number=18,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="control",
full_name="google.api.Service.control",
index=16,
number=21,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="logs",
full_name="google.api.Service.logs",
index=17,
number=23,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="metrics",
full_name="google.api.Service.metrics",
index=18,
number=24,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="monitored_resources",
full_name="google.api.Service.monitored_resources",
index=19,
number=25,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="billing",
full_name="google.api.Service.billing",
index=20,
number=26,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="logging",
full_name="google.api.Service.logging",
index=21,
number=27,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="monitoring",
full_name="google.api.Service.monitoring",
index=22,
number=28,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="system_parameters",
full_name="google.api.Service.system_parameters",
index=23,
number=29,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="source_info",
full_name="google.api.Service.source_info",
index=24,
number=37,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=619,
serialized_end=1607,
)
_SERVICE.fields_by_name[
"config_version"
].message_type = google_dot_protobuf_dot_wrappers__pb2._UINT32VALUE
_SERVICE.fields_by_name["apis"].message_type = google_dot_protobuf_dot_api__pb2._API
_SERVICE.fields_by_name["types"].message_type = google_dot_protobuf_dot_type__pb2._TYPE
_SERVICE.fields_by_name["enums"].message_type = google_dot_protobuf_dot_type__pb2._ENUM
_SERVICE.fields_by_name[
"documentation"
].message_type = google_dot_api_dot_documentation__pb2._DOCUMENTATION
_SERVICE.fields_by_name[
"backend"
].message_type = google_dot_api_dot_backend__pb2._BACKEND
_SERVICE.fields_by_name["http"].message_type = google_dot_api_dot_http__pb2._HTTP
_SERVICE.fields_by_name["quota"].message_type = google_dot_api_dot_quota__pb2._QUOTA
_SERVICE.fields_by_name[
"authentication"
].message_type = google_dot_api_dot_auth__pb2._AUTHENTICATION
_SERVICE.fields_by_name[
"context"
].message_type = google_dot_api_dot_context__pb2._CONTEXT
_SERVICE.fields_by_name["usage"].message_type = google_dot_api_dot_usage__pb2._USAGE
_SERVICE.fields_by_name[
"endpoints"
].message_type = google_dot_api_dot_endpoint__pb2._ENDPOINT
_SERVICE.fields_by_name[
"control"
].message_type = google_dot_api_dot_control__pb2._CONTROL
_SERVICE.fields_by_name[
"logs"
].message_type = google_dot_api_dot_log__pb2._LOGDESCRIPTOR
_SERVICE.fields_by_name[
"metrics"
].message_type = google_dot_api_dot_metric__pb2._METRICDESCRIPTOR
_SERVICE.fields_by_name[
"monitored_resources"
].message_type = (
google_dot_api_dot_monitored__resource__pb2._MONITOREDRESOURCEDESCRIPTOR
)
_SERVICE.fields_by_name[
"billing"
].message_type = google_dot_api_dot_billing__pb2._BILLING
_SERVICE.fields_by_name[
"logging"
].message_type = google_dot_api_dot_logging__pb2._LOGGING
_SERVICE.fields_by_name[
"monitoring"
].message_type = google_dot_api_dot_monitoring__pb2._MONITORING
_SERVICE.fields_by_name[
"system_parameters"
].message_type = google_dot_api_dot_system__parameter__pb2._SYSTEMPARAMETERS
_SERVICE.fields_by_name[
"source_info"
].message_type = google_dot_api_dot_source__info__pb2._SOURCEINFO
DESCRIPTOR.message_types_by_name["Service"] = _SERVICE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Service = _reflection.GeneratedProtocolMessageType(
"Service",
(_message.Message,),
{
"DESCRIPTOR": _SERVICE,
"__module__": "google.api.service_pb2"
# @@protoc_insertion_point(class_scope:google.api.Service)
},
)
_sym_db.RegisterMessage(Service)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 36.636364
| 2,562
| 0.626747
|
33aa5f73efa4c55575a39b3acca528e546bdfb36
| 1,253
|
py
|
Python
|
python/Genetic Programming/Examples/pyevolve_ex6_dbadapter.py
|
josephlewis42/personal_codebase
|
aa0fff9a908ab90bc78d24aa69d1b91163c35314
|
[
"Unlicense"
] | 3
|
2015-11-24T17:06:58.000Z
|
2018-05-01T14:03:57.000Z
|
python/Genetic Programming/Examples/pyevolve_ex6_dbadapter.py
|
josephlewis42/personal_codebase
|
aa0fff9a908ab90bc78d24aa69d1b91163c35314
|
[
"Unlicense"
] | null | null | null |
python/Genetic Programming/Examples/pyevolve_ex6_dbadapter.py
|
josephlewis42/personal_codebase
|
aa0fff9a908ab90bc78d24aa69d1b91163c35314
|
[
"Unlicense"
] | null | null | null |
from pyevolve import G1DList
from pyevolve import GSimpleGA
from pyevolve import Selectors
from pyevolve import DBAdapters
from pyevolve import Statistics
# This function is the evaluation function, we want
# to give high score to more zero'ed chromosomes
def eval_func(chromosome):
score = 0.0
# iterate over the chromosome
for value in chromosome:
if value==0:
score += 0.5
return score
# Genome instance
genome = G1DList.G1DList(100)
genome.setParams(rangemin=0, rangemax=10)
# The evaluator function (objective function)
genome.evaluator.set(eval_func)
# Genetic Algorithm Instance
ga = GSimpleGA.GSimpleGA(genome, 666)
ga.setGenerations(80)
ga.setMutationRate(0.2)
# Create DB Adapter and set as adapter
#sqlite_adapter = DBAdapters.DBSQLite(identify="ex6", resetDB=True)
#ga.setDBAdapter(sqlite_adapter)
# Using CSV Adapter
#csvfile_adapter = DBAdapters.DBFileCSV()
#ga.setDBAdapter(csvfile_adapter)
# Using the URL Post Adapter
# urlpost_adapter = DBAdapters.DBURLPost(url="http://whatismyip.oceanus.ro/server_variables.php", post=False)
# ga.setDBAdapter(urlpost_adapter)
# Do the evolution, with stats dump
# frequency of 10 generations
ga.evolve(freq_stats=10)
# Best individual
#print ga.bestIndividual()
| 26.104167
| 109
| 0.776536
|
1f072cd325f4e8217e7d49b241de359e79794902
| 7,173
|
py
|
Python
|
avalanche/training/templates/online_supervised.py
|
coreylowman/avalanche
|
9c1e7765f1577c400ec0c57260221bcffd9566a2
|
[
"MIT"
] | 1
|
2021-09-15T13:57:27.000Z
|
2021-09-15T13:57:27.000Z
|
avalanche/training/templates/online_supervised.py
|
coreylowman/avalanche
|
9c1e7765f1577c400ec0c57260221bcffd9566a2
|
[
"MIT"
] | null | null | null |
avalanche/training/templates/online_supervised.py
|
coreylowman/avalanche
|
9c1e7765f1577c400ec0c57260221bcffd9566a2
|
[
"MIT"
] | null | null | null |
import copy
import warnings
from typing import Optional, List, Union, Sequence
import torch
from torch.nn import Module, CrossEntropyLoss
from torch.optim import Optimizer
from avalanche.benchmarks import Experience
from avalanche.benchmarks.utils import AvalancheSubset
from avalanche.models import DynamicModule
from avalanche.training.plugins import SupervisedPlugin, EvaluationPlugin
from avalanche.training.plugins.evaluation import default_evaluator
from avalanche.training.templates.supervised import SupervisedTemplate
class SupervisedOnlineTemplate(SupervisedTemplate):
def __init__(
self,
model: Module,
optimizer: Optimizer,
criterion=CrossEntropyLoss(),
num_passes: int = 1,
train_mb_size: int = 1,
eval_mb_size: int = None,
device=None,
plugins: Optional[List[SupervisedPlugin]] = None,
evaluator: EvaluationPlugin = default_evaluator,
eval_every=-1,
):
super().__init__(
model,
optimizer,
criterion,
train_mb_size=train_mb_size,
train_epochs=1,
eval_mb_size=eval_mb_size,
device=device,
plugins=plugins,
evaluator=evaluator,
eval_every=eval_every,
)
self.num_passes = num_passes
warnings.warn(
"This is an unstable experimental strategy."
"Some plugins may not work properly."
)
def create_sub_experience_list(self, experience):
"""Creates a list of sub-experiences from an experience.
It returns a list of experiences, where each experience is
a subset of the original experience.
:param experience: single Experience.
:return: list of Experience.
"""
# Shuffle the indices
indices = torch.randperm(len(experience.dataset))
num_sub_exps = len(indices) // self.train_mb_size
sub_experience_list = []
for subexp_id in range(num_sub_exps):
subexp_indices = indices[
subexp_id
* self.train_mb_size : (subexp_id + 1)
* self.train_mb_size
]
sub_experience = copy.copy(experience)
subexp_ds = AvalancheSubset(
sub_experience.dataset, indices=subexp_indices
)
sub_experience.dataset = subexp_ds
sub_experience_list.append(sub_experience)
return sub_experience_list
def train(
self,
experiences: Union[Experience, Sequence[Experience]],
eval_streams: Optional[
Sequence[Union[Experience, Sequence[Experience]]]
] = None,
**kwargs
):
"""Training loop. if experiences is a single element trains on it.
If it is a sequence, trains the model on each experience in order.
This is different from joint training on the entire stream.
It returns a dictionary with last recorded value for each metric.
:param experiences: single Experience or sequence.
:param eval_streams: list of streams for evaluation.
If None: use training experiences for evaluation.
Use [] if you do not want to evaluate during training.
:return: dictionary containing last recorded value for
each metric name.
"""
self.is_training = True
self._stop_training = False
self.model.train()
self.model.to(self.device)
# Normalize training and eval data.
if not isinstance(experiences, Sequence):
experiences = [experiences]
if eval_streams is None:
eval_streams = [experiences]
self._eval_streams = eval_streams
self.num_sub_exps = len(experiences[0].dataset) // self.train_mb_size
self._before_training(**kwargs)
# Keep the (full) experience in self.full_experience
# for model adaptation
for self.full_experience in experiences:
sub_experience_list = self.create_sub_experience_list(
self.full_experience
)
# Train for each sub-experience
for i, sub_experience in enumerate(sub_experience_list):
self.experience = sub_experience
is_first_sub_exp = i == 0
is_last_sub_exp = i == len(sub_experience_list) - 1
self._train_exp(
self.experience,
eval_streams,
is_first_sub_exp=is_first_sub_exp,
is_last_sub_exp=is_last_sub_exp,
**kwargs
)
self._after_training(**kwargs)
res = self.evaluator.get_last_metrics()
return res
def _train_exp(
self,
experience: Experience,
eval_streams=None,
is_first_sub_exp=False,
is_last_sub_exp=False,
**kwargs
):
"""Training loop over a single Experience object.
:param experience: CL experience information.
:param eval_streams: list of streams for evaluation.
If None: use the training experience for evaluation.
Use [] if you do not want to evaluate during training.
:param is_first_sub_exp: whether the current sub-experience
is the first sub-experience.
:param is_last_sub_exp: whether the current sub-experience
is the last sub-experience.
:param kwargs: custom arguments.
"""
self.experience = experience
self.model.train()
if eval_streams is None:
eval_streams = [experience]
for i, exp in enumerate(eval_streams):
if not isinstance(exp, Sequence):
eval_streams[i] = [exp]
# Data Adaptation (e.g. add new samples/data augmentation)
self._before_train_dataset_adaptation(**kwargs)
self.train_dataset_adaptation(**kwargs)
self._after_train_dataset_adaptation(**kwargs)
self.make_train_dataloader(**kwargs)
# Model Adaptation (e.g. freeze/add new units) in the
# first sub-experience
if is_first_sub_exp:
self.model = self.model_adaptation()
self.make_optimizer()
self._before_training_exp(**kwargs)
self._before_training_epoch(**kwargs)
# if self._stop_training: # Early stopping
# self._stop_training = False
# break
for self.n_pass in range(self.num_passes):
self.training_epoch(**kwargs)
# if is_last_sub_exp:
self._after_training_epoch(**kwargs)
self._after_training_exp(**kwargs)
def model_adaptation(self, model=None):
"""Adapts the model to the data from the current
(full) experience.
Calls the :class:`~avalanche.models.DynamicModule`s adaptation.
"""
if model is None:
model = self.model
for module in model.modules():
if isinstance(module, DynamicModule):
module.adaptation(self.full_experience.dataset)
return model.to(self.device)
| 34.157143
| 77
| 0.623728
|
172cf4f4a4ec2d13fb358b6969499b6e5f2ad5ab
| 1,428
|
py
|
Python
|
tests/test_auth_credential/test_cli.py
|
minos-framework/minos-auth-credential
|
66ccb83e330d5376aa263dbc447da2239b3261fd
|
[
"MIT"
] | 5
|
2022-01-25T18:12:33.000Z
|
2022-02-05T11:02:00.000Z
|
tests/test_auth_credential/test_cli.py
|
minos-framework/minos-auth-credential
|
66ccb83e330d5376aa263dbc447da2239b3261fd
|
[
"MIT"
] | null | null | null |
tests/test_auth_credential/test_cli.py
|
minos-framework/minos-auth-credential
|
66ccb83e330d5376aa263dbc447da2239b3261fd
|
[
"MIT"
] | null | null | null |
"""tests.test_api_gateway.test_rest.test_cli module."""
import unittest
from unittest.mock import (
PropertyMock,
patch,
)
from typer.testing import (
CliRunner,
)
from minos.auth_credential import (
CredentialConfig,
)
from minos.auth_credential.cli import (
app,
)
from minos.auth_credential.launchers import (
EntrypointLauncher,
)
from tests.utils import (
BASE_PATH,
FakeEntrypoint,
)
runner = CliRunner()
class Foo:
def __init__(self, **kwargs):
self.kwargs = kwargs
class TestCli(unittest.TestCase):
CONFIG_FILE_PATH = BASE_PATH / "config.yml"
def setUp(self):
self.config = CredentialConfig(self.CONFIG_FILE_PATH)
self.services = ["a", "b", Foo]
self.launcher = EntrypointLauncher(config=self.config, services=self.services)
def test_app_ko(self):
path = f"{BASE_PATH}/non_existing_config.yml"
result = runner.invoke(app, ["start", path])
self.assertEqual(result.exit_code, 1)
self.assertTrue("Error loading config" in result.stdout)
def test_launch(self):
entrypoint = FakeEntrypoint()
with patch("minos.auth_credential.launchers.EntrypointLauncher.entrypoint", new_callable=PropertyMock) as mock:
mock.return_value = entrypoint
self.launcher.launch()
self.assertEqual(1, entrypoint.call_count)
if __name__ == "__main__":
unittest.main()
| 24.20339
| 119
| 0.687675
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.