hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e4e2c057d716503e6ba0401dde6b3f5ad2e211c8 | 1,959 | py | Python | shablbot/core/utils.py | Blackgard/vk-bot-python | 5d1eb269d76567a8e31dec47c0ea3c5cc1bcbc3c | [
"MIT"
] | 5 | 2019-11-12T05:15:07.000Z | 2022-01-20T06:26:55.000Z | shablbot/core/utils.py | Blackgard/vk-bot-python | 5d1eb269d76567a8e31dec47c0ea3c5cc1bcbc3c | [
"MIT"
] | 1 | 2021-06-02T00:33:47.000Z | 2021-06-02T00:33:47.000Z | shablbot/core/utils.py | Blackgard/vk-bot-python | 5d1eb269d76567a8e31dec47c0ea3c5cc1bcbc3c | [
"MIT"
] | 2 | 2021-12-18T17:03:10.000Z | 2022-01-29T17:08:35.000Z | from typing import Any, List, Dict
from anytree import Node, RenderTree, ContRoundStyle
class RenderState:
" Render state class. View shablbot module active in tree style. "
def render(self, style = ContRoundStyle) -> None:
""" Render tree with state bot modules.
Args:
style ([type], optional): Style how need rendered items. Defaults to ContRoundStyle.
"""
print(RenderTree(self.node, style=style()).by_attr())
def render_state(name_module: str, module: Any) -> None:
""" Render state module bot. Use tree.
Args:
name_module (str): Modules name
module (Any): Object for check node
"""
render_state = RenderState({ name_module: module })
render_state.render()
def render_state_all_components(list_components: List[Any]) -> None:
""" Render state all modules bot in tree style.
Args:
list_components (List[Any]): components bot for need rebder state. All componnets have 'get_main_data_object()' function"
"""
render_state = RenderState(
modules={
comp.__class__.__name__ : comp.get_main_data_object()
for comp in list_components
},
main_root=Node("Shablbot")
)
render_state.render()
| 31.596774 | 129 | 0.634507 | from typing import Any, List, Dict
from anytree import Node, RenderTree, ContRoundStyle
class RenderState:
" Render state class. View shablbot module active in tree style. "
def __init__(self, modules: Dict[str, Any], main_root: Node = None):
self.modules = modules
self.main_root = main_root
self.node: Node = self.__create_node()
def __create_node(self) -> Node:
root = self.main_root
for name_module, object in self.modules.items():
if not root: subroot = Node(name_module)
else: subroot = Node(name_module, parent=root)
if isinstance(object, Dict):
[Node(str(v), parent=subroot) for _, v in object.items()]
elif isinstance(object, List):
[Node(str(item), parent=subroot) for item in object]
return root if root else subroot
def render(self, style = ContRoundStyle) -> None:
""" Render tree with state bot modules.
Args:
style ([type], optional): Style how need rendered items. Defaults to ContRoundStyle.
"""
print(RenderTree(self.node, style=style()).by_attr())
def render_state(name_module: str, module: Any) -> None:
""" Render state module bot. Use tree.
Args:
name_module (str): Modules name
module (Any): Object for check node
"""
render_state = RenderState({ name_module: module })
render_state.render()
def render_state_all_components(list_components: List[Any]) -> None:
""" Render state all modules bot in tree style.
Args:
list_components (List[Any]): components bot for need rebder state. All componnets have 'get_main_data_object()' function"
"""
render_state = RenderState(
modules={
comp.__class__.__name__ : comp.get_main_data_object()
for comp in list_components
},
main_root=Node("Shablbot")
)
render_state.render()
| 643 | 0 | 53 |
3e4bd952403d2276a16fe6e594f1e94e0b8e90bf | 740 | py | Python | examples/plot_2d_graph.py | gfngoncalves/foam_graph | b1b735aef3eeaa6e9b8430c5520abff871ce3ffb | [
"MIT"
] | null | null | null | examples/plot_2d_graph.py | gfngoncalves/foam_graph | b1b735aef3eeaa6e9b8430c5520abff871ce3ffb | [
"MIT"
] | null | null | null | examples/plot_2d_graph.py | gfngoncalves/foam_graph | b1b735aef3eeaa6e9b8430c5520abff871ce3ffb | [
"MIT"
] | null | null | null | # %%
import matplotlib.pyplot as plt
from torch_geometric.data import download_url, extract_tar
from foam_graph.utils.graph_from_foam import read_foam
from foam_graph.visualization.graph_plotting import plot_graph
# %% Extract tar and read case as a graph
download_url("https://github.com/gfngoncalves/openfoam_cases/blob/main/damBreak.tar.xz?raw=true", ".")
extract_tar("damBreak.tar.xz", ".", mode="r:xz")
graph = read_foam(
"damBreak",
("alpha.water",),
read_boundaries=True,
)
#%% Plot alpha field for 2D case
field_name_plot = "alpha.water"
field_component_plot = 0
time = -1
fig, ax = plt.subplots(figsize=(10, 10))
plot_graph(graph[time], field_name_plot, field_component_plot, ax=ax)
plt.tight_layout()
plt.show()
| 26.428571 | 102 | 0.752703 | # %%
import matplotlib.pyplot as plt
from torch_geometric.data import download_url, extract_tar
from foam_graph.utils.graph_from_foam import read_foam
from foam_graph.visualization.graph_plotting import plot_graph
# %% Extract tar and read case as a graph
download_url("https://github.com/gfngoncalves/openfoam_cases/blob/main/damBreak.tar.xz?raw=true", ".")
extract_tar("damBreak.tar.xz", ".", mode="r:xz")
graph = read_foam(
"damBreak",
("alpha.water",),
read_boundaries=True,
)
#%% Plot alpha field for 2D case
field_name_plot = "alpha.water"
field_component_plot = 0
time = -1
fig, ax = plt.subplots(figsize=(10, 10))
plot_graph(graph[time], field_name_plot, field_component_plot, ax=ax)
plt.tight_layout()
plt.show()
| 0 | 0 | 0 |
3093dbc865abb3135a47c825de9eebf66e5d0d5e | 4,954 | py | Python | src/VersionControl/Git/Branches/Feature/Finish.py | flexiooss/flexio-flow | 47491c7e5b49a02dc859028de0d486edc0014b26 | [
"Apache-2.0"
] | null | null | null | src/VersionControl/Git/Branches/Feature/Finish.py | flexiooss/flexio-flow | 47491c7e5b49a02dc859028de0d486edc0014b26 | [
"Apache-2.0"
] | 44 | 2019-04-05T06:08:15.000Z | 2021-09-13T19:37:49.000Z | src/VersionControl/Git/Branches/Feature/Finish.py | flexiooss/flexio-flow | 47491c7e5b49a02dc859028de0d486edc0014b26 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
from typing import Type, Optional, List
from Exceptions.BranchHaveDiverged import BranchHaveDiverged
from Exceptions.BranchNotExist import BranchNotExist
from Exceptions.GitMergeConflictError import GitMergeConflictError
from Exceptions.NotCleanWorkingTree import NotCleanWorkingTree
from FlexioFlow.StateHandler import StateHandler
from Branches.Branches import Branches
from Log.Log import Log
from VersionControl.Git.Branches.GitFlowCmd import GitFlowCmd
from VersionControl.Git.GitCmd import GitCmd
from VersionControlProvider.Github.Message import Message
from VersionControlProvider.Issue import Issue
from VersionControlProvider.Topic import Topic
from ConsoleColors.Fg import Fg
from Core.ConfigHandler import ConfigHandler
| 35.898551 | 116 | 0.646952 | from __future__ import annotations
from typing import Type, Optional, List
from Exceptions.BranchHaveDiverged import BranchHaveDiverged
from Exceptions.BranchNotExist import BranchNotExist
from Exceptions.GitMergeConflictError import GitMergeConflictError
from Exceptions.NotCleanWorkingTree import NotCleanWorkingTree
from FlexioFlow.StateHandler import StateHandler
from Branches.Branches import Branches
from Log.Log import Log
from VersionControl.Git.Branches.GitFlowCmd import GitFlowCmd
from VersionControl.Git.GitCmd import GitCmd
from VersionControlProvider.Github.Message import Message
from VersionControlProvider.Issue import Issue
from VersionControlProvider.Topic import Topic
from ConsoleColors.Fg import Fg
from Core.ConfigHandler import ConfigHandler
class Finish:
def __init__(self,
state_handler: StateHandler,
config_handler: ConfigHandler,
issue: Optional[Type[Issue]],
topics: Optional[List[Topic]],
keep_branch: bool,
close_issue: bool
):
self.__state_handler: StateHandler = state_handler
self.__config_handler: ConfigHandler = config_handler
self.__issue: Optional[Type[Issue]] = issue
self.__topics: Optional[List[Topic]] = topics
self.__git: GitCmd = GitCmd(self.__state_handler).with_config_handler(config_handler)
self.__gitflow: GitFlowCmd = GitFlowCmd(self.__state_handler, self.__config_handler)
self.__current_branch_name: str = self.__git.get_current_branch_name()
self.__keep_branch: bool = keep_branch
self.__close_issue: bool = close_issue
def __init_gitflow(self) -> Finish:
self.__gitflow.init_config()
return self
def __pull_develop(self) -> Finish:
self.__git.checkout(self.__config_handler.develop()).try_to_pull()
return self
def __checkout_current_feature(self):
self.__git.checkout_with_branch_name(self.__current_branch_name)
def __merge_develop(self) -> Finish:
self.__checkout_current_feature()
message: Message = Message(
message=''.join([
"'Finish feature ` ",
self.__current_branch_name,
" ` for dev: ",
self.__state_handler.version_as_str()
]),
issue=self.__issue
)
message_str: str = ''
if self.__close_issue:
message_str = message.with_close()
else:
message_str = message.message
self.__git.commit(
message_str,
['--allow-empty']
).try_to_push()
self.__git.checkout(self.__config_handler.develop()).merge_with_version_message_from_branch_name(
branch=self.__current_branch_name,
message=Message(
message='',
issue=self.__issue
).with_ref(),
).try_to_push()
if self.__git.has_conflict():
Log.error("""
{fg_fail}CONFLICT : resolve conflict, and remove your feature branch manually{reset_fg}
""".format(
fg_fail=Fg.FAIL.value,
reset_fg=Fg.RESET.value,
))
raise GitMergeConflictError(self.__config_handler.develop(), self.__git.get_conflict())
return self
def __delete_feature(self) -> Finish:
if not self.__keep_branch:
self.__git.delete_local_branch_from_name(self.__current_branch_name)
self.__git.try_delete_remote_branch_from_name(self.__current_branch_name)
return self
def __finish_feature(self):
self.__git.checkout_file_with_branch_name(self.__config_handler.develop(), self.__state_handler.file_path())
self.__merge_develop()
if not self.__keep_branch:
self.__delete_feature()
def process(self):
if not self.__git.is_clean_working_tree():
raise NotCleanWorkingTree()
if not self.__gitflow.is_feature():
raise BranchNotExist(self.__config_handler.feature())
self.__pull_develop()
if self.__git.is_branch_ahead(self.__config_handler.develop(), self.__current_branch_name):
Log.error("""
{fg_fail}{list}{reset_fg}
""".format(
fg_fail=Fg.FAIL.value,
list=self.__git.list_commit_diff(self.__config_handler.develop(), self.__current_branch_name),
reset_fg=Fg.RESET.value,
))
self.__checkout_current_feature()
raise BranchHaveDiverged(
"""
{fg_fail}{message}{reset_fg}
""".format(
fg_fail=Fg.FAIL.value,
message='Oups !!! Develop have commit ahead ' + self.__current_branch_name + ' merge before',
reset_fg=Fg.RESET.value,
)
)
self.__finish_feature()
| 3,954 | -8 | 238 |
a980fa4865ba7ee280855f960b289e3b10b23ac7 | 3,439 | py | Python | omega_format/dynamics/dynamic_object.py | kai-storms/omega_format | 745f67d774d2da04201de9fe24fa24468a8b191b | [
"MIT"
] | 11 | 2021-07-15T13:47:59.000Z | 2022-03-16T14:06:22.000Z | omega_format/dynamics/dynamic_object.py | kai-storms/omega_format | 745f67d774d2da04201de9fe24fa24468a8b191b | [
"MIT"
] | 1 | 2022-01-19T10:15:05.000Z | 2022-01-31T12:28:35.000Z | omega_format/dynamics/dynamic_object.py | kai-storms/omega_format | 745f67d774d2da04201de9fe24fa24468a8b191b | [
"MIT"
] | 2 | 2021-07-17T05:37:09.000Z | 2022-01-20T07:35:50.000Z | from dataclasses import fields
from pydantic import conint
from pydantic.fields import Field
import numpy as np
from .bounding_box import BoundingBox
from .trajectory import Trajectory
from ..settings import DefaultValues
from ..enums import ReferenceTypes
from ..geometry import BBXCornersClass
from ..reference_resolving import *
import xarray as xr
def in_timespan(obj, birth, death):
"""
birth = first timestamp idx
death = last timestamp idx
"""
return birth <= obj.end and death >= obj.birth
| 33.715686 | 98 | 0.635941 | from dataclasses import fields
from pydantic import conint
from pydantic.fields import Field
import numpy as np
from .bounding_box import BoundingBox
from .trajectory import Trajectory
from ..settings import DefaultValues
from ..enums import ReferenceTypes
from ..geometry import BBXCornersClass
from ..reference_resolving import *
import xarray as xr
def in_timespan(obj, birth, death):
"""
birth = first timestamp idx
death = last timestamp idx
"""
return birth <= obj.end and death >= obj.birth
def timespan_to_cutoff_idxs(obj, birth, death):
start_delay = max(0, obj.birth - birth)
cutoff_start = int(max(0, birth - obj.birth))
cutoff_end = int(min(obj.end - obj.birth + 1, death - birth - start_delay + cutoff_start + 1))
own_birth = start_delay
own_death = cutoff_end - cutoff_start + start_delay - 1
assert cutoff_start <= cutoff_end
assert own_birth >= 0
assert own_death <= death - birth
assert own_birth <= own_death
return cutoff_start, cutoff_end, own_birth
class DynamicObject(InputClassBase, BBXCornersClass):
bb: BoundingBox = Field(default_factory=BoundingBox)
tr: Trajectory = Field(default_factory=Trajectory)
birth: conint(ge=0)
"""first timestamp idx"""
@property
def end(self):
"""Last timestamp idx"""
return len(self.tr.pos_x) + self.birth - 1
def in_timespan(self, birth, death):
return in_timespan(self, birth, death)
def timespan_to_cutoff_idxs(self, birth, death):
return timespan_to_cutoff_idxs(self, birth, death)
def cut_to_timespan(self, birth, death):
cutoff_start, cutoff_end, own_birth = self.timespan_to_cutoff_idxs(birth, death)
self.birth = own_birth
for k, v in vars(self.tr).items():
if isinstance(v, np.ndarray):
try:
setattr(self.tr, k, v[..., cutoff_start:cutoff_end])
except ValueError:
# most likely it was tried to set a cached property
# TODO: find a way to exclude cached properties from loop
pass
# cut properties from BBXCornersClass
for k, v in vars(self).items():
if isinstance(v, np.ndarray):
setattr(self, k, v[..., cutoff_start:cutoff_end])
@property
def length(self):
if self.bb.length == 0:
if self.type == ReferenceTypes.RoadUserType.BICYCLE:
return BoundingBox(DefaultValues.bicycle).length
elif self.type == ReferenceTypes.RoadUserType.PEDESTRIAN:
return BoundingBox(DefaultValues.pedestrian).length
else:
return .2 # TODO handle better
else:
return self.bb.length
@property
def width(self):
if self.bb.width == 0:
if self.type == ReferenceTypes.RoadUserType.BICYCLE:
return BoundingBox(DefaultValues.bicycle).width
elif self.type == ReferenceTypes.RoadUserType.PEDESTRIAN:
return BoundingBox(DefaultValues.pedestrian).width
else:
return .2 # TODO handle better
else:
return self.bb.width
def to_xarray(self, rr):
return xr.Dataset({f: ('time', getattr(self.tr,f)) for f in self.tr.__fields__.keys()},
coords={'time':rr.timestamps.val[self.birth:self.end+1]})
| 2,362 | 506 | 46 |
5ef70e448176718d624998272370abec0f1efc36 | 8,695 | py | Python | src/django_vcs_watch/models.py | svetlyak40wt/django-vcs-watch | fa341c488b8a863812dd2b25cb0bda6d9beffdc9 | [
"BSD-3-Clause"
] | 1 | 2016-05-08T13:45:53.000Z | 2016-05-08T13:45:53.000Z | src/django_vcs_watch/models.py | svetlyak40wt/django-vcs-watch | fa341c488b8a863812dd2b25cb0bda6d9beffdc9 | [
"BSD-3-Clause"
] | null | null | null | src/django_vcs_watch/models.py | svetlyak40wt/django-vcs-watch | fa341c488b8a863812dd2b25cb0bda6d9beffdc9 | [
"BSD-3-Clause"
] | 1 | 2019-06-10T16:49:02.000Z | 2019-06-10T16:49:02.000Z | import calendar
import datetime
import logging
import operator
import os
import pytz
from pdb import set_trace
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import DjangoUnicodeDecodeError
from django_fields.fields import EncryptedCharField
from django_vcs_watch.settings import \
REVISION_LIMIT, \
CHECK_INTERVAL_MIN, \
CHECK_INTERVAL_MAX, \
POSTPROCESS_COMMIT
from django_vcs_watch.utils import \
timedelta_to_string, \
strip_timezone, \
mongo, \
get_user_feed_slug
from pymongo import DESCENDING
from mongobongo import Document
if 'django_globals' not in settings.INSTALLED_APPS:
raise Exception('Please, install django_globals application.')
if 'django_globals.middleware.User' not in settings.MIDDLEWARE_CLASSES:
raise Exception('Please, add django_globals.middleware.User to the MIDDLEWARE_CLASSES.')
from django.db.models.signals import class_prepared
class_prepared.connect(_init_mongo_connection)
from django.db.models import signals
signals.post_save.connect(create_user_feed, sender = User)
| 28.601974 | 110 | 0.6046 | import calendar
import datetime
import logging
import operator
import os
import pytz
from pdb import set_trace
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import DjangoUnicodeDecodeError
from django_fields.fields import EncryptedCharField
from django_vcs_watch.settings import \
REVISION_LIMIT, \
CHECK_INTERVAL_MIN, \
CHECK_INTERVAL_MAX, \
POSTPROCESS_COMMIT
from django_vcs_watch.utils import \
timedelta_to_string, \
strip_timezone, \
mongo, \
get_user_feed_slug
from pymongo import DESCENDING
from mongobongo import Document
if 'django_globals' not in settings.INSTALLED_APPS:
raise Exception('Please, install django_globals application.')
if 'django_globals.middleware.User' not in settings.MIDDLEWARE_CLASSES:
raise Exception('Please, add django_globals.middleware.User to the MIDDLEWARE_CLASSES.')
def delta2secs(d):
return d.days * 24 * 60 * 60 + d.seconds
class Commit(Document):
collection = 'commits'
class Meta:
ordering = [('date', DESCENDING)]
def __unicode__(self):
return _('Commit %(revision)s by %(author)s') % self.__dict__
def __repr__(self):
return self.__unicode__()
@models.permalink
def get_absolute_url(self):
return ('vcs-watch-commit', (), {'repository_slug': self.slug, 'revision': self.revision})
def get_timestamp(self):
if self.date is not None:
return calendar.timegm(self.utcdate.timetuple())
return 0
@property
def utcdate(self):
return pytz.utc.localize(self._data.get('date'))
class Repository(Document):
collection = 'repositories'
def __unicode__(self):
return _('Repository at %s') % self.url
def __repr__(self):
return self.__unicode__()
@models.permalink
def get_absolute_url(self):
return ('vcs-watch-repository', (), {'slug': self.slug})
@models.permalink
def get_rss_url(self):
return ('vcs-watch-feed-commits', (), {'param': self.slug })
def update(self):
log = logging.getLogger('django_vcs_watch.repository.update')
log.debug('updating %s' % self.slug)
from django_vcs_watch.backends.svn import get_updates
try:
commits = get_updates(self.url, self.last_rev,
self.username, self.password)
except Exception, e:
log.exception('error during repository update')
self.last_error = str(e).decode('utf-8')
if self.last_error_date:
interval_to_check = min(
max(
delta2secs(datetime.datetime.utcnow() - self.last_error_date) * 2,
CHECK_INTERVAL_MIN
),
CHECK_INTERVAL_MAX)
else:
interval_to_check = CHECK_INTERVAL_MIN
interval_to_check = datetime.timedelta(0, interval_to_check)
log.debug('next check will be after %s' % timedelta_to_string(interval_to_check))
self.last_error_date = datetime.datetime.utcnow()
self.next_check_at = self.last_error_date + interval_to_check
self.save()
return
for commit in commits:
commit = Commit(**commit)
if len(Commit.objects.find({'slug': self.slug, 'revision': commit.revision})) > 0:
continue
commit.slug = self.slug
POSTPROCESS_COMMIT(self, commit)
try:
commit.save()
except Exception:
# just ignore this strange errors,
# caused by wrong encoding in the comments
# or binary files without mime type.
log.exception('error during commit saving %s:%s' % (self.url, commit.revision))
self.last_rev = commit.revision
self.updated_at = commit.date
if len(commits) > 0:
self.last_error = None
self.last_error_date = None
# don't update more often than latest commits
# TODO remove list when mongobongo will support len on cursor proxy
latest_commits = list(Commit.objects.find({'slug': self.slug}).sort([('date', -1)])[:3])
assert(len(latest_commits) <= 3)
def weight(x):
return 1.0 / (2 ** (x+1))
if len(latest_commits) > 0:
deltas = [
(weight(0),
(datetime.datetime.utcnow() - latest_commits[0].date))
]
for i in xrange(1, len(latest_commits)):
deltas.append((weight(i), latest_commits[i-1].date - latest_commits[i].date))
for weight, delta in deltas:
log.debug('DELTA: %s, WEIGHT: %s' % (timedelta_to_string(delta), weight))
interval_to_check = reduce(operator.add, (delta2secs(delta) * weight for weight, delta in deltas))
interval_to_check = min(
max(interval_to_check, CHECK_INTERVAL_MIN),
CHECK_INTERVAL_MAX)
else:
interval_to_check = CHECK_INTERVAL_MIN
interval_to_check = datetime.timedelta(0, interval_to_check)
log.debug('next check will be after %s' % timedelta_to_string(interval_to_check))
self.last_check_at = datetime.datetime.utcnow()
self.next_check_at = self.last_check_at + interval_to_check
self.save()
@property
def commits(self):
return Commit.objects.find({'slug': self.slug})
def update_last_access(self):
self.last_access = datetime.datetime.utcnow()
self.save()
return ''
class Feed(Document):
collection = 'feeds'
def __unicode__(self):
return _('Feed %s') % self._id
def __repr__(self):
return self.__unicode__()
def init(self, _id = None, ignore = [], watch = []):
self._id = _id
self.ignore = ignore
self.watch = watch
self.num_items = 0
super(Feed, self).__init__(slug)
def update(self):
log = logging.getLogger('django_vcs_watch.feed.update')
log.debug('updating %s' % self._id)
if self.ignore is None:
self.ignore = []
if self.watch is None:
self.watch = []
if self.num_items is None:
self.num_items = 0
watch_query = ' || '.join(
' && '.join(
"this.%s == '%s'" % item for item in rule.items())
for rule in self.watch)
ignore_query = ' || '.join(
' && '.join(
"this.%s == '%s'" % item for item in rule.items())
for rule in self.ignore)
if not watch_query:
return
query = {'$where': '(%s) && !(%s)' % (watch_query, ignore_query or '0')}
last_item = FeedItem.objects.find_one(dict(slug = self._id))
if last_item is not None:
query['date'] = {'$gt': last_item.date}
Commit.objects.ensure_index([('slug', 1)])
Commit.objects.ensure_index([('author', 1)])
from pymongo.dbref import DBRef
for commit in Commit.objects.find(query):
commit_ref = DBRef(commit.objects.collection_name, commit._id)
if FeedItem.objects.find_one(
dict(slug = self._id, commit = commit_ref)) is not None:
logging.error('UPS I DID IT AGAIN!')
FeedItem(slug = self._id, date = commit.date, commit = commit).save()
self.num_items += 1
self.save()
def full_update(self):
""" Drop all items and fill the feed with filtered items from scratch. """
FeedItem.objects.remove(dict(slug = self._id))
self.update()
class FeedItem(Document):
collection = 'feed_items'
def __unicode__(self):
return _('FeedItem %s') % self._id
def __repr__(self):
return self.__unicode__()
class Meta:
ordering = [('date', DESCENDING)]
def _init_mongo_connection(sender, **kwargs):
Repository.objects.db = mongo()
from django.db.models.signals import class_prepared
class_prepared.connect(_init_mongo_connection)
def create_user_feed(instance, **kwargs):
feed_id = get_user_feed_slug(instance)
feed = Feed.objects.find_one(dict(_id = feed_id))
if feed is None:
feed = Feed(
_id = feed_id,
user_id = instance.id
)
feed.save()
from django.db.models import signals
signals.post_save.connect(create_user_feed, sender = User)
| 6,308 | 1,017 | 161 |
3038ed4fb65985a3d843541d067e12a19deb1be3 | 1,225 | py | Python | act_to_gpl.py | muys4970/small_scripts | 4bcafd74c47a2b0c24aa67ac9f95611cf45f148b | [
"MIT"
] | 11 | 2021-01-04T18:16:34.000Z | 2022-03-07T20:18:42.000Z | act_to_gpl.py | muys4970/small_scripts | 4bcafd74c47a2b0c24aa67ac9f95611cf45f148b | [
"MIT"
] | null | null | null | act_to_gpl.py | muys4970/small_scripts | 4bcafd74c47a2b0c24aa67ac9f95611cf45f148b | [
"MIT"
] | 8 | 2020-12-11T00:51:18.000Z | 2022-01-13T18:52:41.000Z | #!/usr/bin/env python3
#
# Adobe Photoshop "*.act" palette file conversion to GIMP "*.gpl" palette
# format (which is also recognized by many other tools).
#
# How to use:
# ./act_to_gpl.py some_palette.act > some_palette.gpl
#
# Code based on swatchbook/codecs/adobe_act.py from:
# http://www.selapa.net/swatchbooker/
import os.path
import struct
import sys
if __name__ == '__main__':
sys.stdout.write(
return_gimp_palette(parse_adobe_act(sys.argv[1]), sys.argv[1])
)
| 26.630435 | 82 | 0.610612 | #!/usr/bin/env python3
#
# Adobe Photoshop "*.act" palette file conversion to GIMP "*.gpl" palette
# format (which is also recognized by many other tools).
#
# How to use:
# ./act_to_gpl.py some_palette.act > some_palette.gpl
#
# Code based on swatchbook/codecs/adobe_act.py from:
# http://www.selapa.net/swatchbooker/
import os.path
import struct
import sys
def parse_adobe_act(filename):
filesize = os.path.getsize(filename)
with open(filename, 'rb') as file:
if filesize == 772: # CS2
file.seek(768, 0)
nbcolors = struct.unpack('>H', file.read(2))[0]
file.seek(0, 0)
else:
nbcolors = filesize // 3
# List of (R, G, B) tuples.
return [struct.unpack('3B', file.read(3)) for i in range(nbcolors)]
def return_gimp_palette(colors, name, columns=0):
return 'GIMP Palette\nName: {name}\nColumns: {columns}\n#\n{colors}\n'.format(
name=name,
columns=columns,
colors='\n'.join(
'{0} {1} {2}\tUntitled'.format(*color)
for color in colors
),
)
if __name__ == '__main__':
sys.stdout.write(
return_gimp_palette(parse_adobe_act(sys.argv[1]), sys.argv[1])
)
| 687 | 0 | 46 |
02ce5f1acf7dc53c2d03827ffa1683a44ab3b7ae | 2,444 | py | Python | predictor.py | ved789/ImageReader | 9d1f2bec9a7ccc270d9125a4e53bb93642662e34 | [
"Apache-2.0"
] | 1 | 2021-09-01T07:00:45.000Z | 2021-09-01T07:00:45.000Z | predictor.py | ved789/ImageReader | 9d1f2bec9a7ccc270d9125a4e53bb93642662e34 | [
"Apache-2.0"
] | null | null | null | predictor.py | ved789/ImageReader | 9d1f2bec9a7ccc270d9125a4e53bb93642662e34 | [
"Apache-2.0"
] | null | null | null | from flask import Flask, request, redirect, render_template
from werkzeug.utils import secure_filename
import os
import tensorflow as tf
import numpy as np
import cv2
food_classes = ['Broken street sign', 'Damaged bollard', 'Damaged street light', 'Pothole']
app = Flask(__name__, static_url_path='/static')
app.config["IMAGE_UPLOADS"] = './static'
app.config["ALLOWED_IMAGE_EXTENSIONS"] = ["JPEG", "JPG", "PNG"]
food_prediction_model = tf.keras.models.load_model('./deeplearning_model')
@app.route("/", methods=["GET", "POST"])
@app.route("/showing-image/<image_name>", methods=["GET", "POST"])
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
| 27.155556 | 137 | 0.645254 | from flask import Flask, request, redirect, render_template
from werkzeug.utils import secure_filename
import os
import tensorflow as tf
import numpy as np
import cv2
food_classes = ['Broken street sign', 'Damaged bollard', 'Damaged street light', 'Pothole']
app = Flask(__name__, static_url_path='/static')
app.config["IMAGE_UPLOADS"] = './static'
app.config["ALLOWED_IMAGE_EXTENSIONS"] = ["JPEG", "JPG", "PNG"]
food_prediction_model = tf.keras.models.load_model('./deeplearning_model')
def allowed_image(filename):
if not "." in filename:
return False
ext = filename.rsplit(".", 1)[1]
if ext.upper() in app.config["ALLOWED_IMAGE_EXTENSIONS"]:
return True
else:
return False
@app.route("/", methods=["GET", "POST"])
def upload_image() :
if request.method == "POST":
if request.files:
image = request.files["image"]
if image.filename == "":
return redirect(request.url)
if allowed_image(image.filename):
filename = secure_filename(image.filename)
image.save(os.path.join(app.config["IMAGE_UPLOADS"], filename))
return redirect(f'/showing-image/{filename}')
else:
return redirect(request.url)
return render_template("upload_images.html")
@app.route("/showing-image/<image_name>", methods=["GET", "POST"])
def showing_image(image_name):
if request.method == 'POST':
image_path = os.path.join(app.config["IMAGE_UPLOADS"], image_name)
image = cv2.imread(image_path) #BGR
img = image.copy()
image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (229,229))
image = image.astype("float32")
image = image / 255.0
np_image = np.expand_dims(image, axis=0) # (229,229,3) --> (1,229,229,3)
predictions = food_prediction_model(np_image)
predicted_class_idx = np.argmax(predictions) # [0.1, 0.5, 0.3] --> 1
probability = np.max(predictions)
predicted_class = food_classes[predicted_class_idx]
return render_template("prediction_result.html", image_name=image_name, predicted_class=predicted_class, probability=probability)
return render_template("showing_image.html", value=image_name)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
| 1,660 | 0 | 67 |
5d31335ada9205591b56e394c748cdabaffb4985 | 1,284 | py | Python | src/dynamic-programming/mccall-job-search/opt-policy-example-0.py | jackg0/quantecon | 6f8305a41969ad9dbb8d6bf824054f695ec5ef89 | [
"MIT"
] | null | null | null | src/dynamic-programming/mccall-job-search/opt-policy-example-0.py | jackg0/quantecon | 6f8305a41969ad9dbb8d6bf824054f695ec5ef89 | [
"MIT"
] | null | null | null | src/dynamic-programming/mccall-job-search/opt-policy-example-0.py | jackg0/quantecon | 6f8305a41969ad9dbb8d6bf824054f695ec5ef89 | [
"MIT"
] | null | null | null | import numpy as np
from numba import jit
import matplotlib.pyplot as plt
import quantecon as qe
from quantecon.distributions import BetaBinomial
n, a, b = 50, 200, 100
w_min, w_max = 10, 60
w_vals = np.linspace(w_min, w_max, n+1)
dist = BetaBinomial(n, a, b)
psi_vals = dist.pdf()
reservation_wage = compute_reservation_wage(w_vals, psi_vals)
print(reservation_wage)
| 25.176471 | 99 | 0.658879 | import numpy as np
from numba import jit
import matplotlib.pyplot as plt
import quantecon as qe
from quantecon.distributions import BetaBinomial
n, a, b = 50, 200, 100
w_min, w_max = 10, 60
w_vals = np.linspace(w_min, w_max, n+1)
dist = BetaBinomial(n, a, b)
psi_vals = dist.pdf()
def plot_w_distribution(w_vals, psi_vals):
fig, ax = plt.subplots(figsize=(9, 6.5))
ax.stem(w_vals, psi_vals, label='$\phi (w\')$')
ax.set_xlabel('wages')
ax.set_ylabel('probabilities')
plt.show()
def optimal_policy_step(v, w_weights, w, beta=0.99, unemployment=25):
v_next = np.maximum(w / (1 - beta),
unemployment + beta*np.dot(v, w_weights))
return v_next
def compute_reservation_wage(w, w_weights, max_iter=500, epsilon=1e-6, beta=0.99, unemployment=25):
v = w / (1 - beta)
i = 0
error = epsilon + 1
while i < max_iter and error > epsilon:
v_next = optimal_policy_step(
v, w_weights, w, beta=beta, unemployment=unemployment)
error = np.max(np.abs(v_next - v))
v = v_next
i += 1
reservation_wage = (1 - beta) * (unemployment + beta*np.dot(v, w_weights))
return reservation_wage
reservation_wage = compute_reservation_wage(w_vals, psi_vals)
print(reservation_wage)
| 840 | 0 | 69 |
b003aa22f4a17b7c6740683c9d583a5038ef68eb | 1,587 | py | Python | main.py | AlessandroChen/AutoCnblogs | a537c94560e8dad8fe33a845ab9db2b45f80efe3 | [
"Apache-2.0"
] | null | null | null | main.py | AlessandroChen/AutoCnblogs | a537c94560e8dad8fe33a845ab9db2b45f80efe3 | [
"Apache-2.0"
] | null | null | null | main.py | AlessandroChen/AutoCnblogs | a537c94560e8dad8fe33a845ab9db2b45f80efe3 | [
"Apache-2.0"
] | null | null | null | import xmlrpc.client as xmlrpclib
import json
config_path = "./blog_config.json"
def have_config():
'''
return bool value : whether config file exists
'''
try:
with open(config_path, "r", encoding = "utf-8") as f:
try:
cfg = json.load(f)
return cfg != {}
except json.decoder.JSONDecodeError:
return False
except:
with open(config_path, "w", encoding = "utf-8") as f:
json.dump({}, f)
return False
def create_config():
'''
create config file if it doesnt exist
only save ensured data usr provided, if not print ERROR
'''
# Provide EXAMPLE Here
for test_times in range(0, 2):
cfg = {}
for item in [("url", "metaWeblog url"),
("appkey", "blogaddress"),
("usr", "usrname"),
("passwd", "password")]:
cfg[item[0]] = input(item[1] + " : ")
try:
server = xmlrpclib.ServerProxy(cfg["url"])
userInfo = server.blogger.getUsersBlogs(
cfg["appkey"], cfg["usr"], cfg["passwd"])
print (userInfo[0])
cfg["bolgid"] = userInfo[0]["blogid"]
break
except:
print ("ERROR!!!")
print ("Please Check It Again")
with open(config_path, "w", encoding = "utf-8") as f:
json.dump(cfg, f, indent = 4, ensure_ascii = False)
if __name__ == "__main__":
if have_config() == False:
create_config()
print ("End")
| 28.854545 | 61 | 0.507246 | import xmlrpc.client as xmlrpclib
import json
config_path = "./blog_config.json"
def have_config():
'''
return bool value : whether config file exists
'''
try:
with open(config_path, "r", encoding = "utf-8") as f:
try:
cfg = json.load(f)
return cfg != {}
except json.decoder.JSONDecodeError:
return False
except:
with open(config_path, "w", encoding = "utf-8") as f:
json.dump({}, f)
return False
def create_config():
'''
create config file if it doesnt exist
only save ensured data usr provided, if not print ERROR
'''
# Provide EXAMPLE Here
for test_times in range(0, 2):
cfg = {}
for item in [("url", "metaWeblog url"),
("appkey", "blogaddress"),
("usr", "usrname"),
("passwd", "password")]:
cfg[item[0]] = input(item[1] + " : ")
try:
server = xmlrpclib.ServerProxy(cfg["url"])
userInfo = server.blogger.getUsersBlogs(
cfg["appkey"], cfg["usr"], cfg["passwd"])
print (userInfo[0])
cfg["bolgid"] = userInfo[0]["blogid"]
break
except:
print ("ERROR!!!")
print ("Please Check It Again")
with open(config_path, "w", encoding = "utf-8") as f:
json.dump(cfg, f, indent = 4, ensure_ascii = False)
if __name__ == "__main__":
if have_config() == False:
create_config()
print ("End")
| 0 | 0 | 0 |
4f90ba328bb371e24b317ddc9c92bfb92e127e4e | 383 | py | Python | dhukiya/apps/account/migrations/0004_account_about_me.py | fikryans/dhukiya_porto | 9108e51a2feeb275e07c77f81edae07636cf89b6 | [
"MIT"
] | null | null | null | dhukiya/apps/account/migrations/0004_account_about_me.py | fikryans/dhukiya_porto | 9108e51a2feeb275e07c77f81edae07636cf89b6 | [
"MIT"
] | null | null | null | dhukiya/apps/account/migrations/0004_account_about_me.py | fikryans/dhukiya_porto | 9108e51a2feeb275e07c77f81edae07636cf89b6 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.6 on 2021-02-11 12:42
from django.db import migrations, models
| 20.157895 | 47 | 0.5953 | # Generated by Django 3.1.6 on 2021-02-11 12:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0003_auto_20210211_1228'),
]
operations = [
migrations.AddField(
model_name='account',
name='about_me',
field=models.TextField(null=True),
),
]
| 0 | 269 | 23 |
2a41a21fd019fee9b2c95b90771bd8705ec86740 | 12,272 | py | Python | referencesrv/resolver/common.py | golnazads/reference_service | 3cdd60d1113de099d3b355c1076badeec15f767d | [
"MIT"
] | 1 | 2021-04-28T21:14:30.000Z | 2021-04-28T21:14:30.000Z | referencesrv/resolver/common.py | golnazads/reference_service | 3cdd60d1113de099d3b355c1076badeec15f767d | [
"MIT"
] | 5 | 2020-12-22T19:07:08.000Z | 2022-02-28T15:38:53.000Z | referencesrv/resolver/common.py | golnazads/reference_service | 3cdd60d1113de099d3b355c1076badeec15f767d | [
"MIT"
] | 8 | 2019-08-29T21:22:33.000Z | 2021-11-10T18:06:47.000Z | import traceback
from flask import current_app
from numbers import Real
from decimal import Decimal
from itertools import tee, filterfalse
class DeferredSourceMatcher(object):
"""
# Defer loading of the actual source matcher to runtime to save on
# startup time when we don't need it.
"""
def __getattr__(self, att_name):
"""
:param att_name:
:return:
"""
if att_name=='__bases__':
return (object,)
elif att_name=='__name__':
return 'Unready source matcher'
return getattr(current_app.extensions['source_matcher'], att_name)
SOURCE_MATCHER = DeferredSourceMatcher()
class Evidences(object):
"""
a measure of confidence of a match.
All scoring functions working on one aspect are supposed to give a float
between -1 and 1, where -1 is "I'm pretty sure it's wrong" and 1 is
"I'm almost certain it's right". They put this score into the the
add_evidence function.
The individual evidences are kept by this class, but there's a get_score
method that munges them together to yield a number between -1 and 1 that
we hope to be a useful measure for the relative credibility of solutions.
The individual evidences typically come from tailored matching functions
(e.g., authors.compute_author_evidence). Those are free to abstain
from voting; if no evidences are collected, get_score will return
None (which should allow for easy filtering of those).
These evidences stand in as scores in that, when compared, they
are ordered according to what get_score returns.
"""
def __init__(self):
"""
"""
self.evidences = []
self.labels = []
# _score is cached; None means "not computed yet or invalid"
self.score = None
self.min_score = current_app.config['EVIDENCE_SCORE_RANGE'][0]
self.max_score = current_app.config['EVIDENCE_SCORE_RANGE'][1]
def __lt__(self, other):
"""
:param other:
:return:
"""
try:
return self.get_score() < other.get_score()
except (AttributeError, TypeError):
if other:
return self.get_score() < float(other)
return False
def __le__(self, other):
"""
:param other:
:return:
"""
try:
return self.get_score() <= other.get_score()
except (AttributeError, TypeError):
if other:
return self.get_score() <= float(other)
return False
def __gt__(self, other):
"""
:param other:
:return:
"""
try:
return self.get_score() > other.get_score()
except (AttributeError, TypeError):
if other:
return self.get_score() > float(other)
return False
def __ge__(self, other):
"""
:param other:
:return:
"""
try:
return self.get_score() >= other.get_score()
except (AttributeError, TypeError):
if other:
return self.get_score() >= float(other)
return False
def __eq__(self, other):
"""
:param other:
:return:
"""
try:
return self.get_score() == other.get_score()
except (AttributeError, TypeError):
if other:
return self.get_score() == float(other)
return False
def __len__(self):
"""
:return:
"""
return len(self.evidences)
def __str__(self):
"""
:return:
"""
return 'Evidences(%s)'%', '.join('%s=%s'%item for item in zip (self.labels, self.evidences))
def __add__(self, other):
"""
:param other:
:return:
"""
for ev in other.evidences:
assert self.min_score <= ev <= self.max_score
self.score = None
self.evidences += other.evidences
self.labels +=other.labels
return self
def sum(self):
"""
:return:
"""
return sum(self.evidences)
def avg(self):
"""
:return:
"""
if len(self.evidences) != 0:
return round(self.sum()/len(self.evidences), 1)
return 0
def add_evidence(self, evidence, label):
"""
adds evidence (a float between -1 and 1) to our evidence collection
under label.
:param evidence:
:param label:
:return:
"""
assert self.min_score <= evidence <= self.max_score
self.score = None
self.evidences.append(evidence)
self.labels.append(label)
def get_score(self):
"""
returns some float between -1 and 1 representative of the collective
evidence collected.
:return:
"""
if not self.evidences:
current_app.logger.error('No evidence, rejecting')
return 0
if self.score is None:
self.score = sum(self.evidences)
return self.score
def has_veto(self):
"""
returns false if all evidence is strictly positive.
:return:
"""
for e in self.evidences:
if e<=0:
return True
return False
def single_veto_from(self, field_label):
"""
returns true if there is exactly one veto and it originates from
what has field_label.
:param field_label:
:return:
"""
neg_inds = [ind for ind, ev in enumerate(self.evidences) if ev<=0]
if len(neg_inds)==1:
return (self.labels[neg_inds[0]]==field_label)
return False
def count_votes(self):
"""
return true if the combination of terms all have high scores
:return:
"""
d = dict(zip(self.labels, self.evidences))
combinations = [
['authors', 'pubstring', 'volume', 'year'],
['authors', 'year', 'page']
]
for fields in combinations:
vote = 0
for term in fields:
if term in d and d[term] == current_app.config['EVIDENCE_SCORE_RANGE'][1]:
vote += 1
if vote == len(fields):
return True
return False
def __getitem__(self, label):
"""
returns the score for the field label if exist
:param label:
:return:
"""
if label in self.labels:
d = dict(zip(self.labels, self.evidences))
return d[label]
return None
class Solution(object):
"""
a container for a solution and some ancillary metadata.
Ancillary metadata includes:
* citing_bibcode
* score
* source_hypothesis (the hypothesis that eventually got it right)
"""
def __init__(self, cited_bibcode, score, source_hypothesis='not given', citing_bibcode=None):
"""
:param cited_bibcode:
:param score:
:param source_hypothesis:
:param citing_bibcode:
"""
self.cited_bibcode = cited_bibcode
self.score = score
self.citing_bibcode = str(citing_bibcode)
self.source_hypothesis = source_hypothesis
def __str__(self):
"""
:return:
"""
if isinstance(self.score, Evidences):
return '%.1f %s'%(self.score.avg(),self.cited_bibcode)
raise NoSolution("NotResolved")
class Hypothesis(object):
"""A container for expectations to a reference.
Constraints have a dict of fields to show to the search
engine (the hints, get them from the attribute), and a
get_score(response_record, hypothesis)->Evidences method.
See common.Evidences for details.
The get_score function receives a result record, i.e.,
a dictionary containing at most the fields given in the
apiQueryFields configuration. How it compares this against
what's in the record is basically up to the class.
Additionally, it gets the hypotheses that generated the response.
This is a simple way to pass information from the hints generator
to the matching function -- usually, you should just construct
the Hypothesis with additional keyword arguments ("details");
you should query for them in get_score using the get_detail(str)
-> anything method (that returns None for keys not passed).
For debugging, you should give hypotheses short, but somewhat
expressive names. See below for examples.
"""
def __init__(self, name, hints, get_score_function, **details):
"""
:param name:
:param hints:
:param get_score_function:
:param details:
"""
self.name = name
self.hints, self.get_score_function = hints, get_score_function
self.details = details
def get_score(self, response_record, hints):
"""
:param response_record:
:param hints:
:return:
"""
return self.get_score_function(response_record, hints)
def get_detail(self, detail_name):
"""
:param detail_name:
:return:
"""
if detail_name in self.details:
return self.details.get(detail_name)
return None
def get_hint(self, hint_name):
"""
:param hint_name:
:return:
"""
if hint_name in self.hints:
return self.hints.get(hint_name)
return None
class NotResolved(object):
"""
a sentinel class holding unresolved references.
"""
def __init__(self, raw_ref, citing_bibcode):
"""
:param raw_ref:
:param citing_bibcode:
"""
self.raw_ref = raw_ref
self.citing_bibcode = str(citing_bibcode)
def __str__(self):
"""
:return:
"""
return 'NOT RESOLVED: %s...'%(self.raw_ref[:40])
class Error(Exception):
"""
the base class for all exceptions.
"""
pass
class NoSolution(Error):
"""
is raised when a solution could not be found.
NoSolution is constructed with an explanation string and the
Reference instance that failed.
"""
class Undecidable(NoSolution):
"""
is raised when the resolver needs to make a decision but cannot.
In addition to the reference string, this also contains
solutions_considered, pairs of evidence and solutions that were
considered tied.
"""
class Overflow(Error):
"""is raised when too many matches come back from solr.
It should be taken as "please try another, more specific hypothesis".
"""
class OverflowOrNone(Error):
"""
is rasided either if too many matches or no records come back from solr
"""
class Solr(Error):
"""
is raised when solr returns an error.
"""
class Incomplete(Error):
"""
is raised when parsed reference is incomplete and hence not able to resolve the reference.
"""
def round_two_significant_digits(num):
"""
:param num:
:return:
"""
return float('%s' % float('%.1g' % num))
def sorted2(iterable):
"""
source: https://stackoverflow.com/a/43456510
:param iterable: An iterable (array or alike) entity which elements should be sorted.
:return: List with sorted elements.
"""
t1, t2 = tee(iterable)
numbers = filter(predicate, t1)
non_numbers = filterfalse(predicate, t2)
sorted_numbers = sorted(numbers)
sorted_non_numbers = sorted(non_numbers, key=str)
return sorted_numbers + sorted_non_numbers | 26.562771 | 100 | 0.592731 | import traceback
from flask import current_app
from numbers import Real
from decimal import Decimal
from itertools import tee, filterfalse
class DeferredSourceMatcher(object):
"""
# Defer loading of the actual source matcher to runtime to save on
# startup time when we don't need it.
"""
def __getattr__(self, att_name):
"""
:param att_name:
:return:
"""
if att_name=='__bases__':
return (object,)
elif att_name=='__name__':
return 'Unready source matcher'
return getattr(current_app.extensions['source_matcher'], att_name)
SOURCE_MATCHER = DeferredSourceMatcher()
class Evidences(object):
"""
a measure of confidence of a match.
All scoring functions working on one aspect are supposed to give a float
between -1 and 1, where -1 is "I'm pretty sure it's wrong" and 1 is
"I'm almost certain it's right". They put this score into the the
add_evidence function.
The individual evidences are kept by this class, but there's a get_score
method that munges them together to yield a number between -1 and 1 that
we hope to be a useful measure for the relative credibility of solutions.
The individual evidences typically come from tailored matching functions
(e.g., authors.compute_author_evidence). Those are free to abstain
from voting; if no evidences are collected, get_score will return
None (which should allow for easy filtering of those).
These evidences stand in as scores in that, when compared, they
are ordered according to what get_score returns.
"""
def __init__(self):
"""
"""
self.evidences = []
self.labels = []
# _score is cached; None means "not computed yet or invalid"
self.score = None
self.min_score = current_app.config['EVIDENCE_SCORE_RANGE'][0]
self.max_score = current_app.config['EVIDENCE_SCORE_RANGE'][1]
def __lt__(self, other):
"""
:param other:
:return:
"""
try:
return self.get_score() < other.get_score()
except (AttributeError, TypeError):
if other:
return self.get_score() < float(other)
return False
def __le__(self, other):
"""
:param other:
:return:
"""
try:
return self.get_score() <= other.get_score()
except (AttributeError, TypeError):
if other:
return self.get_score() <= float(other)
return False
def __gt__(self, other):
"""
:param other:
:return:
"""
try:
return self.get_score() > other.get_score()
except (AttributeError, TypeError):
if other:
return self.get_score() > float(other)
return False
def __ge__(self, other):
"""
:param other:
:return:
"""
try:
return self.get_score() >= other.get_score()
except (AttributeError, TypeError):
if other:
return self.get_score() >= float(other)
return False
def __eq__(self, other):
"""
:param other:
:return:
"""
try:
return self.get_score() == other.get_score()
except (AttributeError, TypeError):
if other:
return self.get_score() == float(other)
return False
def __len__(self):
"""
:return:
"""
return len(self.evidences)
def __str__(self):
"""
:return:
"""
return 'Evidences(%s)'%', '.join('%s=%s'%item for item in zip (self.labels, self.evidences))
def __add__(self, other):
"""
:param other:
:return:
"""
for ev in other.evidences:
assert self.min_score <= ev <= self.max_score
self.score = None
self.evidences += other.evidences
self.labels +=other.labels
return self
def sum(self):
"""
:return:
"""
return sum(self.evidences)
def avg(self):
"""
:return:
"""
if len(self.evidences) != 0:
return round(self.sum()/len(self.evidences), 1)
return 0
def add_evidence(self, evidence, label):
"""
adds evidence (a float between -1 and 1) to our evidence collection
under label.
:param evidence:
:param label:
:return:
"""
assert self.min_score <= evidence <= self.max_score
self.score = None
self.evidences.append(evidence)
self.labels.append(label)
def get_score(self):
"""
returns some float between -1 and 1 representative of the collective
evidence collected.
:return:
"""
if not self.evidences:
current_app.logger.error('No evidence, rejecting')
return 0
if self.score is None:
self.score = sum(self.evidences)
return self.score
def has_veto(self):
"""
returns false if all evidence is strictly positive.
:return:
"""
for e in self.evidences:
if e<=0:
return True
return False
def single_veto_from(self, field_label):
"""
returns true if there is exactly one veto and it originates from
what has field_label.
:param field_label:
:return:
"""
neg_inds = [ind for ind, ev in enumerate(self.evidences) if ev<=0]
if len(neg_inds)==1:
return (self.labels[neg_inds[0]]==field_label)
return False
def count_votes(self):
"""
return true if the combination of terms all have high scores
:return:
"""
d = dict(zip(self.labels, self.evidences))
combinations = [
['authors', 'pubstring', 'volume', 'year'],
['authors', 'year', 'page']
]
for fields in combinations:
vote = 0
for term in fields:
if term in d and d[term] == current_app.config['EVIDENCE_SCORE_RANGE'][1]:
vote += 1
if vote == len(fields):
return True
return False
def __getitem__(self, label):
"""
returns the score for the field label if exist
:param label:
:return:
"""
if label in self.labels:
d = dict(zip(self.labels, self.evidences))
return d[label]
return None
class Solution(object):
"""
a container for a solution and some ancillary metadata.
Ancillary metadata includes:
* citing_bibcode
* score
* source_hypothesis (the hypothesis that eventually got it right)
"""
def __init__(self, cited_bibcode, score, source_hypothesis='not given', citing_bibcode=None):
"""
:param cited_bibcode:
:param score:
:param source_hypothesis:
:param citing_bibcode:
"""
self.cited_bibcode = cited_bibcode
self.score = score
self.citing_bibcode = str(citing_bibcode)
self.source_hypothesis = source_hypothesis
def __str__(self):
"""
:return:
"""
if isinstance(self.score, Evidences):
return '%.1f %s'%(self.score.avg(),self.cited_bibcode)
raise NoSolution("NotResolved")
def __repr__(self):
return repr(self.cited_bibcode)
class Hypothesis(object):
"""A container for expectations to a reference.
Constraints have a dict of fields to show to the search
engine (the hints, get them from the attribute), and a
get_score(response_record, hypothesis)->Evidences method.
See common.Evidences for details.
The get_score function receives a result record, i.e.,
a dictionary containing at most the fields given in the
apiQueryFields configuration. How it compares this against
what's in the record is basically up to the class.
Additionally, it gets the hypotheses that generated the response.
This is a simple way to pass information from the hints generator
to the matching function -- usually, you should just construct
the Hypothesis with additional keyword arguments ("details");
you should query for them in get_score using the get_detail(str)
-> anything method (that returns None for keys not passed).
For debugging, you should give hypotheses short, but somewhat
expressive names. See below for examples.
"""
def __init__(self, name, hints, get_score_function, **details):
"""
:param name:
:param hints:
:param get_score_function:
:param details:
"""
self.name = name
self.hints, self.get_score_function = hints, get_score_function
self.details = details
def get_score(self, response_record, hints):
"""
:param response_record:
:param hints:
:return:
"""
return self.get_score_function(response_record, hints)
def get_detail(self, detail_name):
"""
:param detail_name:
:return:
"""
if detail_name in self.details:
return self.details.get(detail_name)
return None
def get_hint(self, hint_name):
"""
:param hint_name:
:return:
"""
if hint_name in self.hints:
return self.hints.get(hint_name)
return None
class NotResolved(object):
"""
a sentinel class holding unresolved references.
"""
def __init__(self, raw_ref, citing_bibcode):
"""
:param raw_ref:
:param citing_bibcode:
"""
self.raw_ref = raw_ref
self.citing_bibcode = str(citing_bibcode)
def __str__(self):
"""
:return:
"""
return 'NOT RESOLVED: %s...'%(self.raw_ref[:40])
class Error(Exception):
"""
the base class for all exceptions.
"""
pass
class NoSolution(Error):
"""
is raised when a solution could not be found.
NoSolution is constructed with an explanation string and the
Reference instance that failed.
"""
def __init__(self, reason, ref=None):
Error.__init__(self, reason)
self.ref = ref
self.reason = reason
def __str__(self):
if self.ref is None:
return self.reason
else:
return '%s: %s'%(self.reason, self.ref)
class Undecidable(NoSolution):
"""
is raised when the resolver needs to make a decision but cannot.
In addition to the reference string, this also contains
solutions_considered, pairs of evidence and solutions that were
considered tied.
"""
def __init__(self, reason, ref=None, considered_solutions=[]):
NoSolution.__init__(self, reason, ref)
self.considered_solutions = considered_solutions
class Overflow(Error):
"""is raised when too many matches come back from solr.
It should be taken as "please try another, more specific hypothesis".
"""
class OverflowOrNone(Error):
"""
is rasided either if too many matches or no records come back from solr
"""
class Solr(Error):
"""
is raised when solr returns an error.
"""
class Incomplete(Error):
"""
is raised when parsed reference is incomplete and hence not able to resolve the reference.
"""
def round_two_significant_digits(num):
"""
:param num:
:return:
"""
return float('%s' % float('%.1g' % num))
def sorted2(iterable):
"""
source: https://stackoverflow.com/a/43456510
:param iterable: An iterable (array or alike) entity which elements should be sorted.
:return: List with sorted elements.
"""
def predicate(x):
return isinstance(x, (Real, Decimal))
t1, t2 = tee(iterable)
numbers = filter(predicate, t1)
non_numbers = filterfalse(predicate, t2)
sorted_numbers = sorted(numbers)
sorted_non_numbers = sorted(non_numbers, key=str)
return sorted_numbers + sorted_non_numbers | 453 | 0 | 132 |
0245172c85b78f4f4335204005de051e5effb0d9 | 9,361 | py | Python | TraceGenerator.py | tgale96/memory-address-trace-tools | 0087dea8764944a9bb20868badbc10cb6066cc27 | [
"MIT"
] | 1 | 2021-02-10T02:02:20.000Z | 2021-02-10T02:02:20.000Z | TraceGenerator.py | tgale96/memory-address-trace-tools | 0087dea8764944a9bb20868badbc10cb6066cc27 | [
"MIT"
] | null | null | null | TraceGenerator.py | tgale96/memory-address-trace-tools | 0087dea8764944a9bb20868badbc10cb6066cc27 | [
"MIT"
] | null | null | null | """ filename: TraceGenerator.py
contents: this script calls the GenerateSyntheticTrace method that
creates and stream of memory references that models the memory
performance of the input application or applications
author: Trevor Gale
date: 3.4.16"""
import h5py as h5
import numpy as np
import ConfigParser
import json
import sys
import traceback
import lib.TraceFormats as TraceFormats
import lib.PreProcessing as PreProc
# dictionary for all available trace formats
traceFormats = {"STL":TraceFormats.STL, \
"OVP":TraceFormats.OVP, \
"Din":TraceFormats.Din}
# usage string
usage_info = "USAGE: python TraceGenerator.py <config_file> \n\
config_file: file specifying the configuration for the trace generator\n\n\
all options for generator must be under header \"[generator]\" \n\
generator options: \n\
\t- traceFile: string indicating the name of the file that contains\n\
\tthe trace to be analyzed (plain-text)\n\n\
\t- traceLength: desired length of the synthetic address\n\
\ttrace (in memory references)\n\n\
\tappProfiles: lists (in brackets, separated by commas, no spaces)\n\
\tof the names of application profiles to model. At least one must \n\
\tbe specified. If len(appProfiles) > 1, the applications profiles\n\
\t are mixed by creating a linear combination with the weights\n\
\tspecified by the \"weights\" parameters. If weights is left as\n\
\tdefault, a uniform distribution is used\n\n\
\t- weights: list (in brackets, separated by commas, no spaces)\n\
\tof the weights of each application. Defaults to evenly weighted\n\
\tapplications\n\n\
\t- formatAccess: name of callback function that is called to print the\n\
\tmemory access. Function must be defined in the lib/TraceFormats and be\n\
\tpresent in the \"traceFormats\" dictionary at the top of this file\n"
# TODO:
# 1. Tool to generate profiles based on a PMF
# 2. Print runtime generation details & progress
def GenerateSyntheticTrace(traceFile, traceLength, appProfiles, weights=[], formatAccess=TraceFormats.STL):
""" GenerateSyntheticTrace: this function takes in application profiles
generated by the \"ApplicationProfiler\" script and generates a synthetic
address trace that models the properties of the input applications
args:
- traceFile: string specifying the name of the file to write the
synthetic address trace to (plain-text)
- traceLength: desired length of the synthetic trace (in memory references)
- appProfiles: python list of the names of the application profiles
to model. At least one must be specified. If len(appProfiles) > 1, the
applications profiles are mixed by creating a linear combination with
the weights specified by the "weights" parameters. If weights is left
as default, a uniform distribution is used
- weights: python list specifying the weights of each application.
Defaults to evenly weighted applications
- formatAccess: callback function that is called to print the memory
references. Function arguments must be (cycle, accessType, memAddress)"""
# validate inputs
if not len(appProfiles):
raise ValueError("(in GenerateSyntheticTrace) must input >= 1 app profile")
if traceLength <= 0:
raise ValueError("(in GenerateSyntheticTrace) traceLength must be > 0)")
numProfiles = len(appProfiles)
if numProfiles > 1 and not(len(weights) == 0 or len(weights) == numProfiles):
raise ValueError("(in GenerateSyntheticTrace) if len(appProfiles) > 1, len(weights) must be 0 or len(appProfiles)")
# create even weights if weights is left as default
if len(weights) == 0:
weights = np.ones(numProfiles)
for i in xrange(numProfiles):
if weights[i] < 0:
raise ValueError("(in GenerateSyntheticTrace) weights must be > 0")
# markov model for cycle activity
blockSize = np.zeros(numProfiles, dtype = np.int)
# open application profiles & find size of largest reusePMF
numReuseDistances = 0
for i in xrange(numProfiles):
appProfiles[i] = h5.File(appProfiles[i])
# get blocksizes
blockSize[i] = appProfiles[i]['blockSize'][()]
# if current size larger than previous largest rPMF
if len(appProfiles[i]['reusePMF']) > numReuseDistances:
numReuseDistances = len(appProfiles[i]['reusePMF'])
# make sure all blocksizes are the same
for i in xrange(1, numProfiles):
if blockSize[i] != blockSize[i-1]:
raise ValueError("(in GenerateSyntheticTrace) all profiles must have the same blockSize")
blockSize = blockSize[0] # set blocksize
# build markov model
activityMarkov = np.zeros((2,2), dtype = np.float)
PreProc.BuildMarkovModel(appProfiles, weights, activityMarkov)
# create weighted PMF for each reuse distance
reusePMF = np.zeros(numReuseDistances, dtype = np.float)
PreProc.BuildReusePMF(appProfiles, weights, reusePMF)
# create load proportions for each reuse distance
loadProp = np.zeros(numReuseDistances, dtype = np.float)
PreProc.BuildLoadProp(appProfiles, weights, loadProp)
# initialize application's working set
workingSet = PreProc.BuildWorkingSet(appProfiles).tolist()
wsSize = len(workingSet)
lruStack = workingSet
# create alphaForest
alphaForest = []
PreProc.BuildAlphaForest(appProfiles, weights, alphaForest, wsSize, blockSize)
# close application profiles
for i in xrange(numProfiles):
appProfiles[i].close()
# open traceFile
traceFile = open(traceFile, 'w')
# get reference to random generator
choice = np.random.choice
# indicates previous cycle's activity
previousCycle = 0
# counts unique accesses
uniqueAddrs = 0
# generation loop
cycle = -1
accesses = 0
while (accesses < traceLength):
if not choice(2, p=activityMarkov[previousCycle,:]): # if inactive cycle
# process inactive cycle
cycle += 1
previousCycle = 0
continue
# else, active cycle
cycle += 1
previousCycle = 1
accesses += 1
# select reuse distance
reuseDist = choice(numReuseDistances, p = reusePMF)
# compulsory cache miss
if not reuseDist:
# if we run out of addresses, print message and exit
if uniqueAddrs >= wsSize:
print "Exiting on cycle %d: cannot exceed size of working set"
exit()
# select new cache block to reference
memAddress = lruStack[uniqueAddrs]
# update lru stack
lruStack.remove(memAddress)
lruStack.insert(0, memAddress)
else:
# keep track of unique accesses
if reuseDist > uniqueAddrs:
uniqueAddrs += 1
# get block at this reuse distance
memAddress = lruStack[reuseDist - 1]
# update lruStack
lruStack.remove(memAddress)
lruStack.insert(0, memAddress)
# select type of access
rand = np.random.rand()
if rand < loadProp[reuseDist]:
accessType = 0 # load
else:
accessType = 1 # store
# select 4-byte word address based on alpha values
blockIndex = workingSet.index(memAddress)
memAddress = memAddress | alphaForest[blockIndex].GenerateAccess(reuseDist - 1)
# print access
formatAccess(traceFile, cycle, accessType, memAddress)
#
## main function
#
if __name__ == "__main__":
try:
if len(sys.argv) != 2:
raise IndexError("Invalid number of arguments. Only config file should be specified")
# setup config parser with default args
config = ConfigParser.RawConfigParser({'weights': [], 'formatAccess': traceFormats['STL']})
config.read(sys.argv[1])
# pull arguments
traceFile = config.get('generator', 'traceFile')
traceLength = int(config.get('generator', 'traceLength'))
appProfiles = json.loads(config.get('generator', 'appProfiles'))
weights = json.loads(config.get('generator', 'weights'))
formatAccess = config.get('generator', 'formatAccess')
GenerateSyntheticTrace(traceFile, traceLength, appProfiles, weights, traceFormats[formatAccess])
except IOError as error:
print "IOError: " + str(error)
except ValueError as error:
tb = sys.exc_info()[2]
traceback.print_tb(tb)
print "ValueError: ", error
except ConfigParser.NoOptionError as error:
print "Invalid Args: ", error, "\n"
print usage_info
except ConfigParser.NoSectionError as error:
print "Invalid Config: ", error, "\n"
print usage_info
except KeyError as error:
print "KeyError: ", error
except IndexError as error:
print "IndexError: ", error, "\n"
print usage_info | 37.444 | 123 | 0.649824 | """ filename: TraceGenerator.py
contents: this script calls the GenerateSyntheticTrace method that
creates and stream of memory references that models the memory
performance of the input application or applications
author: Trevor Gale
date: 3.4.16"""
import h5py as h5
import numpy as np
import ConfigParser
import json
import sys
import traceback
import lib.TraceFormats as TraceFormats
import lib.PreProcessing as PreProc
# dictionary for all available trace formats
traceFormats = {"STL":TraceFormats.STL, \
"OVP":TraceFormats.OVP, \
"Din":TraceFormats.Din}
# usage string
usage_info = "USAGE: python TraceGenerator.py <config_file> \n\
config_file: file specifying the configuration for the trace generator\n\n\
all options for generator must be under header \"[generator]\" \n\
generator options: \n\
\t- traceFile: string indicating the name of the file that contains\n\
\tthe trace to be analyzed (plain-text)\n\n\
\t- traceLength: desired length of the synthetic address\n\
\ttrace (in memory references)\n\n\
\tappProfiles: lists (in brackets, separated by commas, no spaces)\n\
\tof the names of application profiles to model. At least one must \n\
\tbe specified. If len(appProfiles) > 1, the applications profiles\n\
\t are mixed by creating a linear combination with the weights\n\
\tspecified by the \"weights\" parameters. If weights is left as\n\
\tdefault, a uniform distribution is used\n\n\
\t- weights: list (in brackets, separated by commas, no spaces)\n\
\tof the weights of each application. Defaults to evenly weighted\n\
\tapplications\n\n\
\t- formatAccess: name of callback function that is called to print the\n\
\tmemory access. Function must be defined in the lib/TraceFormats and be\n\
\tpresent in the \"traceFormats\" dictionary at the top of this file\n"
# TODO:
# 1. Tool to generate profiles based on a PMF
# 2. Print runtime generation details & progress
def GenerateSyntheticTrace(traceFile, traceLength, appProfiles, weights=[], formatAccess=TraceFormats.STL):
""" GenerateSyntheticTrace: this function takes in application profiles
generated by the \"ApplicationProfiler\" script and generates a synthetic
address trace that models the properties of the input applications
args:
- traceFile: string specifying the name of the file to write the
synthetic address trace to (plain-text)
- traceLength: desired length of the synthetic trace (in memory references)
- appProfiles: python list of the names of the application profiles
to model. At least one must be specified. If len(appProfiles) > 1, the
applications profiles are mixed by creating a linear combination with
the weights specified by the "weights" parameters. If weights is left
as default, a uniform distribution is used
- weights: python list specifying the weights of each application.
Defaults to evenly weighted applications
- formatAccess: callback function that is called to print the memory
references. Function arguments must be (cycle, accessType, memAddress)"""
# validate inputs
if not len(appProfiles):
raise ValueError("(in GenerateSyntheticTrace) must input >= 1 app profile")
if traceLength <= 0:
raise ValueError("(in GenerateSyntheticTrace) traceLength must be > 0)")
numProfiles = len(appProfiles)
if numProfiles > 1 and not(len(weights) == 0 or len(weights) == numProfiles):
raise ValueError("(in GenerateSyntheticTrace) if len(appProfiles) > 1, len(weights) must be 0 or len(appProfiles)")
# create even weights if weights is left as default
if len(weights) == 0:
weights = np.ones(numProfiles)
for i in xrange(numProfiles):
if weights[i] < 0:
raise ValueError("(in GenerateSyntheticTrace) weights must be > 0")
# markov model for cycle activity
blockSize = np.zeros(numProfiles, dtype = np.int)
# open application profiles & find size of largest reusePMF
numReuseDistances = 0
for i in xrange(numProfiles):
appProfiles[i] = h5.File(appProfiles[i])
# get blocksizes
blockSize[i] = appProfiles[i]['blockSize'][()]
# if current size larger than previous largest rPMF
if len(appProfiles[i]['reusePMF']) > numReuseDistances:
numReuseDistances = len(appProfiles[i]['reusePMF'])
# make sure all blocksizes are the same
for i in xrange(1, numProfiles):
if blockSize[i] != blockSize[i-1]:
raise ValueError("(in GenerateSyntheticTrace) all profiles must have the same blockSize")
blockSize = blockSize[0] # set blocksize
# build markov model
activityMarkov = np.zeros((2,2), dtype = np.float)
PreProc.BuildMarkovModel(appProfiles, weights, activityMarkov)
# create weighted PMF for each reuse distance
reusePMF = np.zeros(numReuseDistances, dtype = np.float)
PreProc.BuildReusePMF(appProfiles, weights, reusePMF)
# create load proportions for each reuse distance
loadProp = np.zeros(numReuseDistances, dtype = np.float)
PreProc.BuildLoadProp(appProfiles, weights, loadProp)
# initialize application's working set
workingSet = PreProc.BuildWorkingSet(appProfiles).tolist()
wsSize = len(workingSet)
lruStack = workingSet
# create alphaForest
alphaForest = []
PreProc.BuildAlphaForest(appProfiles, weights, alphaForest, wsSize, blockSize)
# close application profiles
for i in xrange(numProfiles):
appProfiles[i].close()
# open traceFile
traceFile = open(traceFile, 'w')
# get reference to random generator
choice = np.random.choice
# indicates previous cycle's activity
previousCycle = 0
# counts unique accesses
uniqueAddrs = 0
# generation loop
cycle = -1
accesses = 0
while (accesses < traceLength):
if not choice(2, p=activityMarkov[previousCycle,:]): # if inactive cycle
# process inactive cycle
cycle += 1
previousCycle = 0
continue
# else, active cycle
cycle += 1
previousCycle = 1
accesses += 1
# select reuse distance
reuseDist = choice(numReuseDistances, p = reusePMF)
# compulsory cache miss
if not reuseDist:
# if we run out of addresses, print message and exit
if uniqueAddrs >= wsSize:
print "Exiting on cycle %d: cannot exceed size of working set"
exit()
# select new cache block to reference
memAddress = lruStack[uniqueAddrs]
# update lru stack
lruStack.remove(memAddress)
lruStack.insert(0, memAddress)
else:
# keep track of unique accesses
if reuseDist > uniqueAddrs:
uniqueAddrs += 1
# get block at this reuse distance
memAddress = lruStack[reuseDist - 1]
# update lruStack
lruStack.remove(memAddress)
lruStack.insert(0, memAddress)
# select type of access
rand = np.random.rand()
if rand < loadProp[reuseDist]:
accessType = 0 # load
else:
accessType = 1 # store
# select 4-byte word address based on alpha values
blockIndex = workingSet.index(memAddress)
memAddress = memAddress | alphaForest[blockIndex].GenerateAccess(reuseDist - 1)
# print access
formatAccess(traceFile, cycle, accessType, memAddress)
#
## main function
#
if __name__ == "__main__":
try:
if len(sys.argv) != 2:
raise IndexError("Invalid number of arguments. Only config file should be specified")
# setup config parser with default args
config = ConfigParser.RawConfigParser({'weights': [], 'formatAccess': traceFormats['STL']})
config.read(sys.argv[1])
# pull arguments
traceFile = config.get('generator', 'traceFile')
traceLength = int(config.get('generator', 'traceLength'))
appProfiles = json.loads(config.get('generator', 'appProfiles'))
weights = json.loads(config.get('generator', 'weights'))
formatAccess = config.get('generator', 'formatAccess')
GenerateSyntheticTrace(traceFile, traceLength, appProfiles, weights, traceFormats[formatAccess])
except IOError as error:
print "IOError: " + str(error)
except ValueError as error:
tb = sys.exc_info()[2]
traceback.print_tb(tb)
print "ValueError: ", error
except ConfigParser.NoOptionError as error:
print "Invalid Args: ", error, "\n"
print usage_info
except ConfigParser.NoSectionError as error:
print "Invalid Config: ", error, "\n"
print usage_info
except KeyError as error:
print "KeyError: ", error
except IndexError as error:
print "IndexError: ", error, "\n"
print usage_info | 0 | 0 | 0 |
8d9f7ecbe7901669fd46fcdcbed2e71b21fe5538 | 1,337 | py | Python | src/read_data.py | oneguynick/battery_model | ee9fa1f04a060b3486d5a496760b47db70a91888 | [
"MIT"
] | 37 | 2018-12-05T15:31:13.000Z | 2022-02-14T14:54:19.000Z | src/read_data.py | xxl4tomxu98/Energy_Storage_Pyomo | fa7bec0d9bb7a2d30a96e8fbc2e8957ecddc234c | [
"MIT"
] | 1 | 2020-09-01T02:50:00.000Z | 2020-09-02T12:42:48.000Z | src/read_data.py | xxl4tomxu98/Energy_Storage_Pyomo | fa7bec0d9bb7a2d30a96e8fbc2e8957ecddc234c | [
"MIT"
] | 13 | 2018-11-08T17:18:11.000Z | 2021-06-01T16:21:50.000Z | import pandas as pd
def read_filter_lbmp(path):
"""
Read one csv file with pandas. Convert the Time Stamp field to pandas
datetime format. Filter out non-NYC nodes and only keep required columns.
Change column names to snake case for easier access.
Parameters
----------
path : str or other object for read_csv filepath parameter
Path to csv file with LBMP data
Returns
-------
DataFrame
df with 3 columns (time stamp, name of node, LBMP)
"""
df = pd.read_csv(path, parse_dates=['Time Stamp'])
df = df.loc[df.Name == 'N.Y.C.', ['Time Stamp', 'Name', 'LBMP ($/MWHr)']]
df.columns = ['time_stamp', 'name', 'lbmp']
return df
def read_all_nyc(data_path):
"""
Reads and combines individual LBMP data files.
Parameters
----------
data_path : Path object
This is a pathlib Path object pointing to the LBMP data folder with
.csv files.
Returns
-------
DataFrame
df with 4 columns (time stamp, name of node, LBMP, hour of year)
"""
fnames = data_path.glob('**/*.csv')
dfs = [read_filter_lbmp(name) for name in fnames]
df = pd.concat(dfs)
df.sort_values('time_stamp', inplace=True)
df.reset_index(inplace=True, drop=True)
df['hour'] = df.index
return df
| 26.74 | 77 | 0.613313 | import pandas as pd
def read_filter_lbmp(path):
"""
Read one csv file with pandas. Convert the Time Stamp field to pandas
datetime format. Filter out non-NYC nodes and only keep required columns.
Change column names to snake case for easier access.
Parameters
----------
path : str or other object for read_csv filepath parameter
Path to csv file with LBMP data
Returns
-------
DataFrame
df with 3 columns (time stamp, name of node, LBMP)
"""
df = pd.read_csv(path, parse_dates=['Time Stamp'])
df = df.loc[df.Name == 'N.Y.C.', ['Time Stamp', 'Name', 'LBMP ($/MWHr)']]
df.columns = ['time_stamp', 'name', 'lbmp']
return df
def read_all_nyc(data_path):
"""
Reads and combines individual LBMP data files.
Parameters
----------
data_path : Path object
This is a pathlib Path object pointing to the LBMP data folder with
.csv files.
Returns
-------
DataFrame
df with 4 columns (time stamp, name of node, LBMP, hour of year)
"""
fnames = data_path.glob('**/*.csv')
dfs = [read_filter_lbmp(name) for name in fnames]
df = pd.concat(dfs)
df.sort_values('time_stamp', inplace=True)
df.reset_index(inplace=True, drop=True)
df['hour'] = df.index
return df
| 0 | 0 | 0 |
c4d2c99a44409cf5bcbe81080d8b5c90242aef43 | 5,232 | py | Python | fdroid_dl/update/index.py | nicopace/fdroid-dl | 06f8f46b947a01ace5a69ce442b4195e5386953c | [
"MIT"
] | null | null | null | fdroid_dl/update/index.py | nicopace/fdroid-dl | 06f8f46b947a01ace5a69ce442b4195e5386953c | [
"MIT"
] | null | null | null | fdroid_dl/update/index.py | nicopace/fdroid-dl | 06f8f46b947a01ace5a69ce442b4195e5386953c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import os.path
from concurrent.futures import as_completed
import requests
from ..download import FuturesSessionFlex
from ..processor import IndexFileProcessor
logger = logging.getLogger('update.IndexUpdate')
| 48.444444 | 184 | 0.613723 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import os.path
from concurrent.futures import as_completed
import requests
from ..download import FuturesSessionFlex
from ..processor import IndexFileProcessor
logger = logging.getLogger('update.IndexUpdate')
class IndexUpdate:
def __init__(self,config,head_timeout=10,index_timeout=60,max_workers=10):
self.config = config
self.head_timeout = head_timeout
self.index_timeout = index_timeout
self.max_workers = max_workers
def required(self,repos,timeout=60):
# try new index
head_futures = self.__future(repos=repos,timeout=timeout)
# process result
(new_index,notfound)=self.__head_response(head_futures)
# retry old index
head_futures = self.__future(repos=notfound,attr='url_index',timeout=timeout)
# process result
(old_index,notfound)=self.__head_response(head_futures)
# new_index[] needs to be fetched with index-v1.jar
# old_index[] needs to be fetched with index.jar
return (new_index,old_index)
def download(self,new_index,old_index,timeout=60):
if new_index is None: raise AttributeError('new_index missing %s'%repos)
if old_index is None: raise AttributeError('old_index missing %s'%repos)
new_futures = self.__future(repos=new_index,timeout=timeout,http_method=FuturesSessionFlex.get,background_callback=FuturesSessionFlex.extract_jar, stream=True)
old_futures = self.__future(repos=old_index,timeout=timeout,http_method=FuturesSessionFlex.get,background_callback=FuturesSessionFlex.extract_jar,attr='url_index', stream=True)
self.__download_response(new_futures)
self.__download_response(old_futures)
def __future(self, repos=None, attr='url_index_v1', http_method=FuturesSessionFlex.head, background_callback=None, timeout=60,**kwargs):
if repos is None: raise AttributeError('repos missing %s'%repos)
if background_callback is None:
background_callback=FuturesSessionFlex.add_hash
futures=[]
with FuturesSessionFlex(max_workers=self.max_workers) as session:
for repo in repos:
if not repo.auth is None or repo.verify == False:
ts = requests.Session()
ts.auth=repo.auth
ts.verify=repo.verify
session.map(getattr(repo,attr),ts)
request = http_method(session,getattr(repo,attr), background_callback=background_callback, timeout=timeout, **kwargs)
request.repo=repo # pass repo ref to future processing
futures.append(request)
return futures
def __as_completed(self,futures):
for future in as_completed(futures):
repo = future.repo
try:
response = future.result()
response.raise_for_status()
yield (repo,response,True)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404: # try old index file
if logging.getLogger().isEnabledFor(logging.DEBUG): logger.exception()
else: logger.warn(str(e))
yield (repo,response,False)
else:
repo['error']={'code':e.response.status_code if isinstance(e, exceptions.HTTPError) else 600,'msg':str(e)}
logger.exception()
def __head_response(self,futures,timeout=60):
success=[]
notfound=[]
for repo,response,ok in self.__as_completed(futures):
if ok:
# check cache
logger.info("HEAD %s (%s) "%(response.url,response.elapsed))
if not os.path.exists(repo.filename) or repo.hash != response.hash:
if 'error' in repo:
del repo['error']
repo['hash']=response.hash
success.append(repo)
if not os.path.exists(repo.filename):
logger.warn("CACHE - (miss) - %s.cache file not found!"%(repo.id,))
else:
logger.info("CACHE - (miss) - %s - %s)"%(repo.key,response.hash))
else:
# skip do nothing for cache hits
logger.info("CACHE - (hit) - %s - %s)"%(repo.key,response.hash))
else:
notfound.append(repo)
return (success,notfound)
def __download_response(self,futures,timeout=60):
with IndexFileProcessor(max_workers=self.max_workers) as ifp:
for repo,response,ok in self.__as_completed(futures):
if response.ok:
logger.info("DOWNLOADED %s [%s] (%s) "%(response.url,response.elapsed,response.h_size))
idx = response.index
ifp.process(response.index,repo,repo.url,response.h_size)
for future in ifp.completed():
(index,elapsed,url,h_size) = future.result()
repo['name']=index.get('repo',{}).get('name')
logger.info("UPDATED %s - %s [%s] (%s) "%(repo['name'],url,elapsed,h_size))
| 4,751 | -3 | 210 |
a82d0971f1f41de5777dfa876b35ba155a2396c9 | 5,594 | py | Python | models.py | Arturs/udacity-computer_vision1 | 0d71428c6542f02a001565404e5235f51b19bf89 | [
"MIT"
] | null | null | null | models.py | Arturs/udacity-computer_vision1 | 0d71428c6542f02a001565404e5235f51b19bf89 | [
"MIT"
] | null | null | null | models.py | Arturs/udacity-computer_vision1 | 0d71428c6542f02a001565404e5235f51b19bf89 | [
"MIT"
] | null | null | null | ## TODO: define the convolutional neural network architecture
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
| 33.698795 | 117 | 0.555417 | ## TODO: define the convolutional neural network architecture
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
# can use the below import should you choose to initialize the weights of your Net
import torch.nn.init as I
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
## TODO: Define all the layers of this CNN, the only requirements are:
## 1. This network takes in a square (same width and height), grayscale image as input
## 2. It ends with a linear layer that represents the keypoints
## it's suggested that you make this last layer output 136 values, 2 for each of the 68 keypoint (x, y) pairs
# As an example, you've been given a convolutional layer, which you may (but don't have to) change:
# 1 input image channel (grayscale), 32 output channels/feature maps, 5x5 square convolution kernel
# 7 epochs
# self.conv1_1 = nn.Conv2d(1, 32, 5)
# self.pool1_1 = nn.MaxPool2d(2, 2)
# self.fc1_1 = nn.Linear(110*110*32, 136)
# 35 epochs, didnt improve during the last 2 epochs
# self.conv2_1 = nn.Conv2d(1, 32, 5)
# self.pool2_1 = nn.MaxPool2d(2, 2)
# self.batch_norm2_1 = nn.BatchNorm1d(110*110*32)
# self.fc2_1 = nn.Linear(110*110*32, 136)
# 10 epochs, didnt improve during the last 2 epochs
# self.conv3_1 = nn.Conv2d(1, 32, 5)
# self.pool3_1 = nn.MaxPool2d(2, 2)
# self.conv3_2 = nn.Conv2d(32, 64, 5)
# self.pool3_2 = nn.MaxPool2d(2, 2)
# self.batch_norm3_1 = nn.BatchNorm1d(53*53*64)
# self.fc3_1 = nn.Linear(53*53*64, 136)
# 6 epochs,
# self.conv4_1 = nn.Conv2d(1, 32, 5)
# self.pool4_1 = nn.MaxPool2d(2, 2)
# self.conv4_1_drop = nn.Dropout2d()
# self.conv4_2 = nn.Conv2d(32, 64, 5)
# self.pool4_2 = nn.MaxPool2d(2, 2)
# self.conv4_2_drop = nn.Dropout2d()
# self.batch_norm4_1 = nn.BatchNorm1d(53*53*64)
# self.fc4_1 = nn.Linear(53*53*64, 136)
# 6 epochs
# self.conv5_1 = nn.Conv2d(1, 32, 5)
# self.pool5_1 = nn.MaxPool2d(2, 2)
# self.conv5_1_drop = nn.Dropout2d()
# self.conv5_2 = nn.Conv2d(32, 64, 5)
# self.pool5_2 = nn.MaxPool2d(2, 2)
# self.conv5_2_drop = nn.Dropout2d()
# self.batch_norm5_1 = nn.BatchNorm1d(53*53*64)
# self.fc5_1 = nn.Linear(53*53*64, 1024)
# self.batch_norm5_2 = nn.BatchNorm2d(53*53*64)
# self.fc5_2 = nn.Linear(1024, 136)
self.conv6_1 = nn.Conv2d(1, 32, 5)
self.pool6_1 = nn.MaxPool2d(2, 2)
self.conv6_1_drop = nn.Dropout2d()
self.conv6_2 = nn.Conv2d(32, 64, 5)
self.pool6_2 = nn.MaxPool2d(2, 2)
self.conv6_2_drop = nn.Dropout2d()
self.conv6_3 = nn.Conv2d(64, 128, 5)
self.pool6_3 = nn.MaxPool2d(2, 2)
self.conv6_3_drop = nn.Dropout2d()
self.batch_norm6_1 = nn.BatchNorm1d(24*24*128)
self.fc6_1 = nn.Linear(24*24*128, 1024)
self.fc6_2 = nn.Linear(1024, 136)
def forward(self, x):
# TODO: Define the feedforward behavior of this model
# x is the input image and, as an example, here you may choose to include a pool/conv step:
# model1
# x = Variable(x)
# x = self.pool1_1(F.relu(self.conv1_1(x)))
# x = x.view(-1, 110*110*32)
# x = self.fc1_1(x)
# model2
# x = Variable(x)
# x = self.pool2_1(F.relu(self.conv2_1(x)))
# x = x.view(-1, 110*110*32)
# x = self.batch_norm2_1(x)
# x = self.fc2_1(x)
# model3
# x = Variable(x)
# x = self.pool3_1(F.relu(self.conv3_1(x)))
# x = self.pool3_2(F.relu(self.conv3_2(x)))
# x = x.view(-1, 53*53*64)
# x = self.batch_norm3_1(x)
# x = self.fc3_1(x)
# model4
# x = Variable(x)
# x = self.pool4_1(F.relu(self.conv4_1_drop(self.conv4_1(x))))
# x = self.pool4_2(F.relu(self.conv4_2_drop(self.conv4_2(x))))
# x = self.pool4_1(F.relu(self.conv4_1(x)))
# x = self.pool4_2(F.relu(self.conv4_2(x)))
# x = x.view(-1, 53*53*64)
# x = self.batch_norm4_1(x)
# x = self.fc4_1(x)
# model5
# x = Variable(x)
# x = self.pool5_1(F.relu(self.conv5_1_drop(self.conv5_1(x))))
# x = self.pool5_2(F.relu(self.conv5_2_drop(self.conv5_2(x))))
# x = self.pool5_1(F.relu(self.conv5_1(x)))
# x = self.pool5_2(F.relu(self.conv5_2(x)))
# x = x.view(-1, 53*53*64)
# x = self.batch_norm5_1(x)
# x = self.fc5_1(x)
# x = self.fc5_2(x)
# x = Variable(x)
x = self.pool6_1(F.relu(self.conv6_1_drop(self.conv6_1(x))))
x = self.pool6_2(F.relu(self.conv6_2_drop(self.conv6_2(x))))
x = self.pool6_3(F.relu(self.conv6_3_drop(self.conv6_3(x))))
# x = self.pool6_1(F.relu(self.conv6_1(x)))
# x = self.pool6_2(F.relu(self.conv6_2(x)))
# x = self.pool6_3(F.relu(self.conv6_3(x)))
x = x.view(-1, 24*24*128)
x = self.batch_norm6_1(x)
x = self.fc6_1(x)
x = self.fc6_2(x)
# a modified x, having gone through all the layers of your model, should be returned
return x
| 5,209 | 0 | 100 |
04d9fcf1d8d76ecdd25982beaa26335f1e0641bd | 6,160 | py | Python | Robot.py | btrice/Robotics | 89e718a08ab50b6cf957b2766eea6d653b518802 | [
"Apache-2.0"
] | 1 | 2019-10-10T12:54:47.000Z | 2019-10-10T12:54:47.000Z | Robot.py | btrice/Robotics | 89e718a08ab50b6cf957b2766eea6d653b518802 | [
"Apache-2.0"
] | null | null | null | Robot.py | btrice/Robotics | 89e718a08ab50b6cf957b2766eea6d653b518802 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
import pygame
import sys
import random
def main ():
"""Main function"""
rt = Robot()
p = rt.init_probability_map()
while True:
#rt.display_robot()
rt.display_probability_map(p)
m = rt.robot_random_move()
rt.perception()
rt.localisation(m,p)
#print("x : " +str(rt.x))
#print("y : " +str(rt.y))
#rt.display_prob(p)
#print(m)
#print("Perception: I receive: "+str(rt.z))
if __name__ == "__main__":
main()
| 30.49505 | 137 | 0.525325 | #! /usr/bin/env python
import pygame
import sys
import random
class Robot(object):
def __init__ (self):
self.pygame = pygame.init()
self.screen = pygame.display.set_mode((640,480))
self.x = random.randint(0,9) # robot x position
self.y = random.randint(0,9) # robot y position
self.z = 0.0
self.zred = 0.0
self.zblue = 0.0
def robot_up(self):
""" We call this methode when robot move up """
self.y = self.y - 1
if self.y < 0:
self.y = 0
def robot_down(self):
""" We call this methode when robot move down """
self.y = self.y + 1
if self.y > 9:
self.y = 9
def robot_left(self):
""" We call this methode when robot move left """
self.x = self.x - 1
if self.x < 0:
self.x = 0
def robot_right(self):
""" We call this methode when robot move right """
self.x = self.x + 1
if self.x > 9:
self.x = 9
def robot_random_move(self):
""" Robot random move """
r = random.randint(1,4)
if r == 1 and self.x < 9:
self.robot_right()
return 1
elif r == 2 and self.x > 0:
self.robot_left()
return 2
elif r == 3 and self.y > 0:
self.robot_up()
return 3
elif r == 4 and self.y < 9:
self.robot_down()
return 4
else:
return 5
def display_robot(self):
""" Display robot on screen """
self.screen.fill((0,0,0))
blue = (0,0,255)
red = (255,0,0)
pygame.draw.rect(self.screen,blue,(0,0,640,48))
pygame.draw.rect(self.screen,blue,(0,0,64,480))
pygame.draw.rect(self.screen,red,(640-64,0,64,480))
pygame.draw.rect(self.screen,red,(0,480-48,640,48))
pygame.draw.circle(self.screen,(255,255,255),(self.x*64+32,self.y*48+24),24)
pygame.display.update()
pygame.time.wait(50)
def perception(self):
""" Perception whenb robot hit the wall or encounter an obstacle """
self.z = 0.0
self.zred = 0.0
self.zblue = 0.0
if (self.x == 0 or self.y == 0) and random.random() < 0.3:
self.zblue = 1.0
if (self.x == 9 or self.y == 9) and random.random() < 0.3:
self.zred = 1.0
if (self.x == 0 or self.y == 0 or self.x == 9 or self.y == 9) and random.random() < 0.3:
self.z = 1.0
def display_probability_map(self,p):
""" Display robot probability map for localisation """
self.screen.fill((0,0,0))
for j in range(0,10):
for i in range(0,10):
pygame.draw.rect(self.screen,(50+205*p[i][j],0,50+205*p[i][j]),(i*64,j*48,64,48))
pygame.draw.circle(self.screen,(255,255,255),(self.y*64+32,self.x*48+24),24)
pygame.display.update()
pygame.time.wait(50)
def init_probability_map(self):
""" Initialisation of probability map by 0.10 for each cell of the table"""
#0.10 should be 0.1 . Each line probalility should be equal to 1 (0.1 * 10 = 1)
#with 0.10 we can see the color difference between the screen and probabily map in display_probability_ma
i = 10
j = 10
return [[0.10 for x in range(j)] for y in range(j)]
################## LOCALISATION ####################
def move_right(self, p):
""" When robot move rigth, the probability that it is on left is zero (0)
we move probability of each cell like p[9][9]= p[9][8], p[8][9]= p[8][8] and put zero on the left column of probability table """
i = 9
j = 9
while i > 0:
while j > -1:
p[j][i] = p[j][i-1]
j = j - 1
j = 9
i = i - 1
for i in range(10):
p[i][0] = 0 # Put zero on the left column of probability table
def move_left(self, p):
""" When robot move left, the probability that it is on right is zero (0)
we move probability of each cell like p[0][0]= p[0][1], p[1][0]= p[1][1] and put zero on the right column of probability table """
i = 0
j = 0
while i < 9:
while j < 10:
p[j][i] = p[j][i+1]
j = j + 1
j = 0
i = i + 1
for i in range(10):
p[i][9] = 0 # Put zero on the left column of probability table
def move_up(self, p):
""" When robot move up, the probability that it is on down is zero (0)
we move probability of each cell like p[0][0]= p[1][0], p[0][1]= p[1][1] and put zero on the down column of probability table """
i = 0
j = 0
while i < 9:
while j < 10:
p[i][j] = p[i+1][j]
j = j + 1
j = 0
i = i + 1
for i in range(10):
p[9][i] = 0 # Put zero on the down column of probability table
def move_down(self, p):
""" When robot move down, the probability that it is on up is zero (0)
we move probability of each cell like p[9][9]= p[8][9], p[9][8]= p[8][9] and put zero on the up column of probability table """
i = 9
j = 9
while i > 0:
while j > -1:
p[i][j] = p[i-1][j]
j = j - 1
j = 9
i = i - 1
for i in range(10):
p[0][i] = 0 # Put zero on the up column of probability table
def localisation(self,m, p):
""" Localisation """
if m == 1 :
self.move_right(p)
elif m == 2 :
self.move_left(p)
elif m == 3 :
self.move_up(p)
elif m == 4:
self.move_down(p)
def display_prob(self, p):
""" Display probability map """
for i in range(10):
for j in range(10):
print(p[i][j], end=' ')
print('\n')
def main ():
"""Main function"""
rt = Robot()
p = rt.init_probability_map()
while True:
#rt.display_robot()
rt.display_probability_map(p)
m = rt.robot_random_move()
rt.perception()
rt.localisation(m,p)
#print("x : " +str(rt.x))
#print("y : " +str(rt.y))
#rt.display_prob(p)
#print(m)
#print("Perception: I receive: "+str(rt.z))
if __name__ == "__main__":
main()
| 265 | 5,316 | 26 |
c9f18bb599d78b1a54d6f891a4ae00339b091379 | 6,010 | py | Python | geojson2java.py | Quar/AnyLogicGIS_from_geojson | 6037ae9b0480ff3e1ae16085baabec40d3425396 | [
"MIT"
] | 2 | 2018-06-18T15:27:45.000Z | 2019-11-23T00:16:32.000Z | geojson2java.py | Quar/AnyLogicGIS_from_geojson | 6037ae9b0480ff3e1ae16085baabec40d3425396 | [
"MIT"
] | 1 | 2019-09-16T05:35:51.000Z | 2019-09-16T05:35:51.000Z | geojson2java.py | Quar/AnyLogicGIS_from_geojson | 6037ae9b0480ff3e1ae16085baabec40d3425396 | [
"MIT"
] | null | null | null | import json
from io import StringIO
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""
Generate Java Utility class from GeoJson Input for AnyLogic GISShape
""")
parser.add_argument('geojsonfile',
help='path to geojson file of regions',
metavar='regions.geojson',
type=str)
parser.add_argument("-o",
help="path of output Java file",
metavar='output.java',
type=str,
default='InitGISRegions.java')
args = parser.parse_args()
genFile(args.geojsonfile, args.o)
| 31.465969 | 142 | 0.59817 | import json
from io import StringIO
import argparse
def read_geo_obj(filepath):
with open(filepath, 'r') as f:
return json.load(f)
def print_key_trees(dict_obj):
def print_indent(obj, indent=0):
if isinstance(obj, dict):
for k, v in obj.items():
print('\t'*indent + k)
print_indent(v, indent+1)
elif isinstance(obj, list):
print('\t'*indent + '[')
print_indent(obj[0], indent+1)
print('\t'*indent + ']')
print_indent(dict_obj, 0)
def zipcode_latlon(features):
buf = StringIO()
buf.write("")
for f in features:
buf.write(f"double[] region_{f['properties']['ZIPCODE']}() "+"{\n" + "return new double[] ")
lonlatList = f['geometry']['coordinates'][0][0]
buf.write('{ ')
isHead = True
for lon,lat in lonlatList:
if not isHead:
buf.write(', ')
buf.write(f"{lat}, {lon}")
isHead = False
buf.write('};\n}\n\n')
return buf.getvalue()
def gisRegionMap(features):
buf = StringIO()
buf.write('public LinkedHashMap<String, GISRegion> __addGISRegionInLA(ShapeGISMap map) {')
buf.write("LinkedHashMap<String, GISRegion> regionsMap = new LinkedHashMap<String, GISRegion>();\n")
feat_sorted = sorted(features, key=lambda f: f['properties']['ZIPCODE'])
for f in feat_sorted:
zipcode = f['properties']['ZIPCODE']
buf.write(f'regionsMap.put("{zipcode}", new GISRegion(map, region_{zipcode}()));\n')
buf.write('return regionsMap;\n')
buf.write('}\n\n')
return buf.getvalue()
def gisRegionMapWithFilter(features):
buf = StringIO()
buf.write('public LinkedHashMap<String, GISRegion> __addGISRegionInLA(ShapeGISMap map, List<String> onlyInclude) {')
buf.write("LinkedHashMap<String, GISRegion> regionsMap = new LinkedHashMap<String, GISRegion>();\n")
feat_sorted = sorted(features, key=lambda f: f['properties']['ZIPCODE'])
for f in feat_sorted:
zipcode = f['properties']['ZIPCODE']
buf.write(f'if (onlyInclude.contains("{zipcode}")) regionsMap.put("{zipcode}", new GISRegion(map, region_{zipcode}()));\n')
buf.write('return regionsMap;\n')
buf.write('}\n\n')
return buf.getvalue()
def regionsCentMap(features):
buf = StringIO()
buf.write('public LinkedHashMap<String, Point> __addGISRegionCentersInLA(ShapeGISMap map) {')
buf.write("LinkedHashMap<String, Point> regionsCentMap = new LinkedHashMap<String, Point>();\n")
feat_sorted = sorted(features, key=lambda f: f['properties']['ZIPCODE'])
for f in feat_sorted:
zipcode = f['properties']['ZIPCODE']
sumLat, sumLon = (0, 0)
lonlatList = f['geometry']['coordinates'][0][0]
nPoints = len(lonlatList)
for lon,lat in lonlatList:
sumLat += lat
sumLon += lon
centLat = sumLat / nPoints
centLon = sumLon / nPoints
buf.write(f'regionsCentMap.put("{zipcode}", (new Point()).setLatLon({centLat}, {centLon}));\n')
buf.write('return regionsCentMap;\n')
buf.write('}\n\n')
return buf.getvalue()
def regionsCentMapWithFilter(features):
buf = StringIO()
buf.write('public LinkedHashMap<String, Point> __addGISRegionCentersInLA(ShapeGISMap map, List<String> onlyInclude) {')
buf.write("LinkedHashMap<String, Point> regionsCentMap = new LinkedHashMap<String, Point>();\n")
feat_sorted = sorted(features, key=lambda f: f['properties']['ZIPCODE'])
for f in feat_sorted:
zipcode = f['properties']['ZIPCODE']
sumLat, sumLon = (0, 0)
lonlatList = f['geometry']['coordinates'][0][0]
nPoints = len(lonlatList)
for lon,lat in lonlatList:
sumLat += lat
sumLon += lon
centLat = sumLat / nPoints
centLon = sumLon / nPoints
buf.write(f'if (onlyInclude.contains("{zipcode}")) regionsCentMap.put("{zipcode}", (new Point()).setLatLon({centLat}, {centLon}));\n')
buf.write('return regionsCentMap;\n')
buf.write('}\n\n')
return buf.getvalue()
def getCentralOfAllRegions(features):
sumLat, sumLon = (0, 0)
nPoints = 0
for f in features:
zipcode = f['properties']['ZIPCODE']
lonlatList = f['geometry']['coordinates'][0][0]
nPoints += len(lonlatList)
for lon,lat in lonlatList:
sumLat += lat
sumLon += lon
centLat = sumLat / nPoints
centLon = sumLon / nPoints
return centLat, centLon
def addClassWrapper(*args):
buf = StringIO()
buf.write("""
/***************************
This file is auto generated by a bot.
***************************/
import java.util.Map;
import java.util.stream.Collectors;
public class InitGISRegions {
""")
for arg in args:
buf.write(arg)
buf.write('\n')
buf.write('}\n')
return buf.getvalue()
def genFile(geojsonfile, javafile='InitGISRegions.java'):
geo_obj = read_geo_obj(geojsonfile)
feat = geo_obj['features']
with open(javafile, 'w') as f:
f.write(
addClassWrapper(
zipcode_latlon(feat),
gisRegionMap(feat),
gisRegionMapWithFilter(feat),
regionsCentMap(feat),
regionsCentMapWithFilter(feat)
)
)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""
Generate Java Utility class from GeoJson Input for AnyLogic GISShape
""")
parser.add_argument('geojsonfile',
help='path to geojson file of regions',
metavar='regions.geojson',
type=str)
parser.add_argument("-o",
help="path of output Java file",
metavar='output.java',
type=str,
default='InitGISRegions.java')
args = parser.parse_args()
genFile(args.geojsonfile, args.o)
| 5,073 | 0 | 230 |
c36194f00baaa0b59faef27b4bff8f45286f8d6b | 469 | py | Python | conftest.py | DKE-Data/agrirouter-sdk-python | 6d6b26606f7d424c62289af56da55acf412772fc | [
"Apache-2.0"
] | null | null | null | conftest.py | DKE-Data/agrirouter-sdk-python | 6d6b26606f7d424c62289af56da55acf412772fc | [
"Apache-2.0"
] | null | null | null | conftest.py | DKE-Data/agrirouter-sdk-python | 6d6b26606f7d424c62289af56da55acf412772fc | [
"Apache-2.0"
] | null | null | null | import pytest
from tests.constants import public_key, private_key, auth_result_url
@pytest.fixture(scope="session")
| 33.5 | 85 | 0.80597 | import pytest
from tests.constants import public_key, private_key, auth_result_url
@pytest.fixture(scope="session")
def authorization():
from agrirouter.auth.auth import Authorization
auth_client = Authorization("QA", public_key=public_key, private_key=private_key)
auth_response = auth_client.extract_auth_response(auth_result_url)
auth_client.verify_auth_response(auth_response)
auth_data = auth_response.get_auth_result()
return auth_data
| 329 | 0 | 22 |
71460bc2648ba173cb08db66055f0c4bfbe42333 | 471 | py | Python | tests/unittests/http_functions/sync_logging/main.py | gohar94/azure-functions-python-worker | 4322e53ddbcc1eea40c1b061b42653336d9003f6 | [
"MIT"
] | 277 | 2018-01-25T23:13:03.000Z | 2022-02-22T06:12:04.000Z | tests/unittests/http_functions/sync_logging/main.py | gohar94/azure-functions-python-worker | 4322e53ddbcc1eea40c1b061b42653336d9003f6 | [
"MIT"
] | 731 | 2018-01-18T18:54:38.000Z | 2022-03-29T00:01:46.000Z | tests/unittests/http_functions/sync_logging/main.py | YunchuWang/azure-functions-python-worker | 1f23e038a506c6412e4efbf07eb471a6afab0c2a | [
"MIT"
] | 109 | 2018-01-18T02:22:57.000Z | 2022-02-15T18:59:54.000Z | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
import time
import azure.functions
logger = logging.getLogger('my function')
| 23.55 | 74 | 0.711253 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
import time
import azure.functions
logger = logging.getLogger('my function')
def main(req: azure.functions.HttpRequest):
try:
1 / 0
except ZeroDivisionError:
logger.error('a gracefully handled error', exc_info=True)
logger.error('a gracefully handled critical error', exc_info=True)
time.sleep(0.05)
return 'OK-sync'
| 258 | 0 | 23 |
4a90ef174f0f3934bc571093ee16d87145d07aa4 | 1,050 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/third_party_auth/urls.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/third_party_auth/urls.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/common/djangoapps/third_party_auth/urls.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """Url configuration for the auth module."""
from django.conf.urls import include, url
from .views import (
IdPRedirectView,
inactive_user_view,
lti_login_and_complete_view,
post_to_custom_auth_form,
saml_metadata_view
)
urlpatterns = [
url(r'^auth/inactive', inactive_user_view, name="third_party_inactive_redirect"),
url(r'^auth/custom_auth_entry', post_to_custom_auth_form, name='tpa_post_to_custom_auth_form'),
url(r'^auth/saml/metadata.xml', saml_metadata_view),
url(r'^auth/login/(?P<backend>lti)/$', lti_login_and_complete_view),
url(r'^auth/idp_redirect/(?P<provider_slug>[\w-]+)', IdPRedirectView.as_view(), name="idp_redirect"),
url(r'^auth/', include('social_django.urls', namespace='social')),
url(r'^auth/saml/v0/', include('common.djangoapps.third_party_auth.samlproviderconfig.urls')),
url(r'^auth/saml/v0/', include('common.djangoapps.third_party_auth.samlproviderdata.urls')),
url(r'^auth/saml/v0/', include('common.djangoapps.third_party_auth.saml_configuration.urls')),
]
| 43.75 | 105 | 0.74 | """Url configuration for the auth module."""
from django.conf.urls import include, url
from .views import (
IdPRedirectView,
inactive_user_view,
lti_login_and_complete_view,
post_to_custom_auth_form,
saml_metadata_view
)
urlpatterns = [
url(r'^auth/inactive', inactive_user_view, name="third_party_inactive_redirect"),
url(r'^auth/custom_auth_entry', post_to_custom_auth_form, name='tpa_post_to_custom_auth_form'),
url(r'^auth/saml/metadata.xml', saml_metadata_view),
url(r'^auth/login/(?P<backend>lti)/$', lti_login_and_complete_view),
url(r'^auth/idp_redirect/(?P<provider_slug>[\w-]+)', IdPRedirectView.as_view(), name="idp_redirect"),
url(r'^auth/', include('social_django.urls', namespace='social')),
url(r'^auth/saml/v0/', include('common.djangoapps.third_party_auth.samlproviderconfig.urls')),
url(r'^auth/saml/v0/', include('common.djangoapps.third_party_auth.samlproviderdata.urls')),
url(r'^auth/saml/v0/', include('common.djangoapps.third_party_auth.saml_configuration.urls')),
]
| 0 | 0 | 0 |
4eeaa2e94ebf0366cc4aae0128164fc2f00bbf83 | 9,991 | py | Python | maltpynt/rebin.py | matteobachetti/MaLTPyNT | 6c93d2e23041b6c932810b5a8d727ee1b6dabfed | [
"BSD-3-Clause"
] | 8 | 2015-02-23T13:43:21.000Z | 2021-07-17T11:35:24.000Z | maltpynt/rebin.py | matteobachetti/MaLTPyNT | 6c93d2e23041b6c932810b5a8d727ee1b6dabfed | [
"BSD-3-Clause"
] | 1 | 2017-09-14T07:55:07.000Z | 2017-09-14T07:55:07.000Z | maltpynt/rebin.py | matteobachetti/MaLTPyNT | 6c93d2e23041b6c932810b5a8d727ee1b6dabfed | [
"BSD-3-Clause"
] | 4 | 2016-03-02T20:36:07.000Z | 2018-02-26T13:23:53.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions to rebin light curves and frequency spectra."""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import numpy as np
from .io import get_file_type
from .io import save_data
from .io import MP_FILE_EXTENSION, get_file_extension
from .base import _empty, _assign_value_if_none
import logging
def const_rebin(x, y, factor, yerr=None, normalize=True):
"""Rebin any pair of variables.
Might be time and counts, or freq and pds.
Also possible to rebin the error on y.
Parameters
----------
x : array-like
y : array-like
factor : int
Rebin factor
yerr : array-like, optional
Uncertainties of y values (it is assumed that the y are normally
distributed)
Returns
-------
new_x : array-like
The rebinned x array
new_y : array-like
The rebinned y array
new_err : array-like
The rebinned yerr array
Other Parameters
----------------
normalize : bool
"""
arr_dtype = y.dtype
if factor <= 1:
res = [x, y]
if yerr is not None:
res.append(yerr)
else:
res.append(np.zeros(len(y), dtype=arr_dtype))
return res
factor = np.long(factor)
nbin = len(y)
new_nbins = np.int(nbin / factor)
y_resh = np.reshape(y[:new_nbins * factor], (new_nbins, factor))
x_resh = np.reshape(x[:new_nbins * factor], (new_nbins, factor))
new_y = np.sum(y_resh, axis=1)
new_x = np.sum(x_resh, axis=1) / factor
if yerr is not None:
yerr_resh = np.reshape(yerr[:new_nbins * factor], (new_nbins, factor))
new_yerr = np.sum(yerr_resh ** 2, axis=1)
else:
new_yerr = np.zeros(len(new_x), dtype=arr_dtype)
if normalize:
return new_x, new_y / factor, np.sqrt(new_yerr) / factor
else:
return new_x, new_y, np.sqrt(new_yerr)
def geom_bin(freq, pds, bin_factor=None, pds_err=None, npds=None):
"""Given a PDS, bin it geometrically.
Parameters
----------
freq : array-like
pds : array-like
bin_factor : float > 1
pds_err : array-like
Returns
-------
retval : object
An object containing all the following attributes
flo : array-like
Lower boundaries of the new frequency bins
fhi : array-like
Upper boundaries of the new frequency bins
pds : array-like
The rebinned PDS
epds : array-like
The uncertainties on the rebinned PDS points (be careful. Check with
simulations if it works in your case)
nbins : array-like, optional
The new number of bins averaged in each PDS point.
Other Parameters
----------------
npds : int
The number of PDSs averaged to obtain the input PDS
Notes
-----
Some parts of the code are copied from an algorithm in isisscripts.sl
"""
from numpy import log10
df = np.diff(freq)
assert np.max(df) - np.min(df) < 1e-5 * np.max(df), \
'This only works for not previously rebinned spectra'
df = freq[1] - freq[0]
npds = _assign_value_if_none(npds, 1.)
pds_err = _assign_value_if_none(pds_err, np.zeros(len(pds)))
if freq[0] < 1e-10:
freq = freq[1:]
pds = pds[1:]
pds_err = pds_err[1:]
if bin_factor <= 1:
logging.warning("Bin factor must be > 1!!")
f0 = freq - df / 2.
f1 = freq + df / 2.
retval = _empty()
retval.flo = f0
retval.fhi = f1
retval.pds = pds
retval.epds = pds_err
retval.nbins = np.ones(len(pds)) * npds
return retval
# Input frequencies are referred to the center of the bin. But from now on
# I'll be interested in the start and stop of each frequency bin.
freq = freq - df / 2
fmin = min(freq)
fmax = max(freq) + df
logstep = log10(bin_factor)
# maximum number of bins
nmax = np.int((log10(fmax) - log10(fmin)) / logstep + 0.5)
# Low frequency grid
flo = fmin * 10. ** (np.arange(nmax) * logstep)
flo = np.append(flo, [fmax])
# Now the clever part: building a histogram of frequencies
pds_dtype = pds.dtype
pdse_dtype = pds_err.dtype
bins = np.digitize(freq.astype(np.double), flo.astype(np.double))
newpds = np.zeros(nmax, dtype=pds_dtype) - 1
newpds_err = np.zeros(nmax, dtype=pdse_dtype)
newfreqlo = np.zeros(nmax)
new_nbins = np.zeros(nmax, dtype=np.long)
for i in range(nmax):
good = bins == i
ngood = np.count_nonzero(good)
new_nbins[i] = ngood
if ngood == 0:
continue
newpds[i] = np.sum(pds[good]) / ngood
newfreqlo[i] = np.min(freq[good])
newpds_err[i] = np.sqrt(np.sum(pds_err[good] ** 2)) / ngood
good = new_nbins > 0
new_nbins = new_nbins[good] * npds
newfreqlo = newfreqlo[good]
newpds = newpds[good]
newpds_err = newpds_err[good]
newfreqhi = newfreqlo[1:]
newfreqhi = np.append(newfreqhi, [fmax])
retval = [newfreqlo, newfreqhi, newpds, newpds_err, new_nbins]
retval = _empty()
retval.flo = newfreqlo
retval.fhi = newfreqhi
retval.pds = newpds
retval.epds = newpds_err
retval.nbins = new_nbins
return retval
def rebin_file(filename, rebin):
"""Rebin the contents of a file, be it a light curve or a spectrum."""
ftype, contents = get_file_type(filename)
do_dyn = False
if 'dyn{0}'.format(ftype) in contents.keys():
do_dyn = True
if ftype == 'lc':
x = contents['time']
y = contents['lc']
ye = np.sqrt(y)
logging.info('Applying a constant rebinning')
x, y, ye = \
const_rebin(x, y, rebin, ye, normalize=False)
contents['time'] = x
contents['lc'] = y
if 'rebin' in list(contents.keys()):
contents['rebin'] *= rebin
else:
contents['rebin'] = rebin
elif ftype in ['pds', 'cpds']:
x = contents['freq']
y = contents[ftype]
ye = contents['e' + ftype]
# if rebin is integer, use constant rebinning. Otherwise, geometrical
if rebin == float(int(rebin)):
logging.info('Applying a constant rebinning')
if do_dyn:
old_dynspec = contents['dyn{0}'.format(ftype)]
old_edynspec = contents['edyn{0}'.format(ftype)]
dynspec = []
edynspec = []
for i_s, spec in enumerate(old_dynspec):
_, sp, spe = \
const_rebin(x, spec, rebin,
old_edynspec[i_s],
normalize=True)
dynspec.append(sp)
edynspec.append(spe)
contents['dyn{0}'.format(ftype)] = np.array(dynspec)
contents['edyn{0}'.format(ftype)] = np.array(edynspec)
x, y, ye = \
const_rebin(x, y, rebin, ye, normalize=True)
contents['freq'] = x
contents[ftype] = y
contents['e' + ftype] = ye
contents['rebin'] *= rebin
else:
logging.info('Applying a geometrical rebinning')
if do_dyn:
old_dynspec = contents['dyn{0}'.format(ftype)]
old_edynspec = contents['edyn{0}'.format(ftype)]
dynspec = []
edynspec = []
for i_s, spec in enumerate(old_dynspec):
retval = geom_bin(x, spec, rebin, old_edynspec[i_s])
dynspec.append(retval.pds)
edynspec.append(retval.epds)
contents['dyn{0}'.format(ftype)] = np.array(dynspec)
contents['edyn{0}'.format(ftype)] = np.array(edynspec)
retval = geom_bin(x, y, rebin, ye)
del contents['freq']
contents['flo'] = retval.flo
contents['fhi'] = retval.fhi
contents[ftype] = retval.pds
contents['e' + ftype] = retval.epds
contents['nbins'] = retval.nbins
contents['rebin'] *= retval.nbins
else:
raise Exception('Format was not recognized:', ftype)
outfile = filename.replace(get_file_extension(filename),
'_rebin%g' % rebin + MP_FILE_EXTENSION)
logging.info('Saving %s to %s' % (ftype, outfile))
save_data(contents, outfile, ftype)
def main(args=None):
"""Main function called by the `MPrebin` command line script."""
import argparse
description = 'Rebin light curves and frequency spectra. '
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files", help="List of light curve files", nargs='+')
parser.add_argument("-r", "--rebin", type=float, default=1,
help="Rebinning to apply. Only if the quantity to" +
" rebin is a (C)PDS, it is possible to specify a" +
" non-integer rebin factor, in which case it is" +
" interpreted as a geometrical binning factor")
parser.add_argument("--loglevel",
help=("use given logging level (one between INFO, "
"WARNING, ERROR, CRITICAL, DEBUG; "
"default:WARNING)"),
default='WARNING',
type=str)
parser.add_argument("--debug", help="use DEBUG logging level",
default=False, action='store_true')
args = parser.parse_args(args)
files = args.files
if args.debug:
args.loglevel = 'DEBUG'
numeric_level = getattr(logging, args.loglevel.upper(), None)
logging.basicConfig(filename='MPrebin.log', level=numeric_level,
filemode='w')
rebin = args.rebin
for f in files:
rebin_file(f, rebin)
| 32.333333 | 78 | 0.572315 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Functions to rebin light curves and frequency spectra."""
from __future__ import (absolute_import, unicode_literals, division,
print_function)
import numpy as np
from .io import get_file_type
from .io import save_data
from .io import MP_FILE_EXTENSION, get_file_extension
from .base import _empty, _assign_value_if_none
import logging
def const_rebin(x, y, factor, yerr=None, normalize=True):
"""Rebin any pair of variables.
Might be time and counts, or freq and pds.
Also possible to rebin the error on y.
Parameters
----------
x : array-like
y : array-like
factor : int
Rebin factor
yerr : array-like, optional
Uncertainties of y values (it is assumed that the y are normally
distributed)
Returns
-------
new_x : array-like
The rebinned x array
new_y : array-like
The rebinned y array
new_err : array-like
The rebinned yerr array
Other Parameters
----------------
normalize : bool
"""
arr_dtype = y.dtype
if factor <= 1:
res = [x, y]
if yerr is not None:
res.append(yerr)
else:
res.append(np.zeros(len(y), dtype=arr_dtype))
return res
factor = np.long(factor)
nbin = len(y)
new_nbins = np.int(nbin / factor)
y_resh = np.reshape(y[:new_nbins * factor], (new_nbins, factor))
x_resh = np.reshape(x[:new_nbins * factor], (new_nbins, factor))
new_y = np.sum(y_resh, axis=1)
new_x = np.sum(x_resh, axis=1) / factor
if yerr is not None:
yerr_resh = np.reshape(yerr[:new_nbins * factor], (new_nbins, factor))
new_yerr = np.sum(yerr_resh ** 2, axis=1)
else:
new_yerr = np.zeros(len(new_x), dtype=arr_dtype)
if normalize:
return new_x, new_y / factor, np.sqrt(new_yerr) / factor
else:
return new_x, new_y, np.sqrt(new_yerr)
def geom_bin(freq, pds, bin_factor=None, pds_err=None, npds=None):
"""Given a PDS, bin it geometrically.
Parameters
----------
freq : array-like
pds : array-like
bin_factor : float > 1
pds_err : array-like
Returns
-------
retval : object
An object containing all the following attributes
flo : array-like
Lower boundaries of the new frequency bins
fhi : array-like
Upper boundaries of the new frequency bins
pds : array-like
The rebinned PDS
epds : array-like
The uncertainties on the rebinned PDS points (be careful. Check with
simulations if it works in your case)
nbins : array-like, optional
The new number of bins averaged in each PDS point.
Other Parameters
----------------
npds : int
The number of PDSs averaged to obtain the input PDS
Notes
-----
Some parts of the code are copied from an algorithm in isisscripts.sl
"""
from numpy import log10
df = np.diff(freq)
assert np.max(df) - np.min(df) < 1e-5 * np.max(df), \
'This only works for not previously rebinned spectra'
df = freq[1] - freq[0]
npds = _assign_value_if_none(npds, 1.)
pds_err = _assign_value_if_none(pds_err, np.zeros(len(pds)))
if freq[0] < 1e-10:
freq = freq[1:]
pds = pds[1:]
pds_err = pds_err[1:]
if bin_factor <= 1:
logging.warning("Bin factor must be > 1!!")
f0 = freq - df / 2.
f1 = freq + df / 2.
retval = _empty()
retval.flo = f0
retval.fhi = f1
retval.pds = pds
retval.epds = pds_err
retval.nbins = np.ones(len(pds)) * npds
return retval
# Input frequencies are referred to the center of the bin. But from now on
# I'll be interested in the start and stop of each frequency bin.
freq = freq - df / 2
fmin = min(freq)
fmax = max(freq) + df
logstep = log10(bin_factor)
# maximum number of bins
nmax = np.int((log10(fmax) - log10(fmin)) / logstep + 0.5)
# Low frequency grid
flo = fmin * 10. ** (np.arange(nmax) * logstep)
flo = np.append(flo, [fmax])
# Now the clever part: building a histogram of frequencies
pds_dtype = pds.dtype
pdse_dtype = pds_err.dtype
bins = np.digitize(freq.astype(np.double), flo.astype(np.double))
newpds = np.zeros(nmax, dtype=pds_dtype) - 1
newpds_err = np.zeros(nmax, dtype=pdse_dtype)
newfreqlo = np.zeros(nmax)
new_nbins = np.zeros(nmax, dtype=np.long)
for i in range(nmax):
good = bins == i
ngood = np.count_nonzero(good)
new_nbins[i] = ngood
if ngood == 0:
continue
newpds[i] = np.sum(pds[good]) / ngood
newfreqlo[i] = np.min(freq[good])
newpds_err[i] = np.sqrt(np.sum(pds_err[good] ** 2)) / ngood
good = new_nbins > 0
new_nbins = new_nbins[good] * npds
newfreqlo = newfreqlo[good]
newpds = newpds[good]
newpds_err = newpds_err[good]
newfreqhi = newfreqlo[1:]
newfreqhi = np.append(newfreqhi, [fmax])
retval = [newfreqlo, newfreqhi, newpds, newpds_err, new_nbins]
retval = _empty()
retval.flo = newfreqlo
retval.fhi = newfreqhi
retval.pds = newpds
retval.epds = newpds_err
retval.nbins = new_nbins
return retval
def rebin_file(filename, rebin):
"""Rebin the contents of a file, be it a light curve or a spectrum."""
ftype, contents = get_file_type(filename)
do_dyn = False
if 'dyn{0}'.format(ftype) in contents.keys():
do_dyn = True
if ftype == 'lc':
x = contents['time']
y = contents['lc']
ye = np.sqrt(y)
logging.info('Applying a constant rebinning')
x, y, ye = \
const_rebin(x, y, rebin, ye, normalize=False)
contents['time'] = x
contents['lc'] = y
if 'rebin' in list(contents.keys()):
contents['rebin'] *= rebin
else:
contents['rebin'] = rebin
elif ftype in ['pds', 'cpds']:
x = contents['freq']
y = contents[ftype]
ye = contents['e' + ftype]
# if rebin is integer, use constant rebinning. Otherwise, geometrical
if rebin == float(int(rebin)):
logging.info('Applying a constant rebinning')
if do_dyn:
old_dynspec = contents['dyn{0}'.format(ftype)]
old_edynspec = contents['edyn{0}'.format(ftype)]
dynspec = []
edynspec = []
for i_s, spec in enumerate(old_dynspec):
_, sp, spe = \
const_rebin(x, spec, rebin,
old_edynspec[i_s],
normalize=True)
dynspec.append(sp)
edynspec.append(spe)
contents['dyn{0}'.format(ftype)] = np.array(dynspec)
contents['edyn{0}'.format(ftype)] = np.array(edynspec)
x, y, ye = \
const_rebin(x, y, rebin, ye, normalize=True)
contents['freq'] = x
contents[ftype] = y
contents['e' + ftype] = ye
contents['rebin'] *= rebin
else:
logging.info('Applying a geometrical rebinning')
if do_dyn:
old_dynspec = contents['dyn{0}'.format(ftype)]
old_edynspec = contents['edyn{0}'.format(ftype)]
dynspec = []
edynspec = []
for i_s, spec in enumerate(old_dynspec):
retval = geom_bin(x, spec, rebin, old_edynspec[i_s])
dynspec.append(retval.pds)
edynspec.append(retval.epds)
contents['dyn{0}'.format(ftype)] = np.array(dynspec)
contents['edyn{0}'.format(ftype)] = np.array(edynspec)
retval = geom_bin(x, y, rebin, ye)
del contents['freq']
contents['flo'] = retval.flo
contents['fhi'] = retval.fhi
contents[ftype] = retval.pds
contents['e' + ftype] = retval.epds
contents['nbins'] = retval.nbins
contents['rebin'] *= retval.nbins
else:
raise Exception('Format was not recognized:', ftype)
outfile = filename.replace(get_file_extension(filename),
'_rebin%g' % rebin + MP_FILE_EXTENSION)
logging.info('Saving %s to %s' % (ftype, outfile))
save_data(contents, outfile, ftype)
def main(args=None):
"""Main function called by the `MPrebin` command line script."""
import argparse
description = 'Rebin light curves and frequency spectra. '
parser = argparse.ArgumentParser(description=description)
parser.add_argument("files", help="List of light curve files", nargs='+')
parser.add_argument("-r", "--rebin", type=float, default=1,
help="Rebinning to apply. Only if the quantity to" +
" rebin is a (C)PDS, it is possible to specify a" +
" non-integer rebin factor, in which case it is" +
" interpreted as a geometrical binning factor")
parser.add_argument("--loglevel",
help=("use given logging level (one between INFO, "
"WARNING, ERROR, CRITICAL, DEBUG; "
"default:WARNING)"),
default='WARNING',
type=str)
parser.add_argument("--debug", help="use DEBUG logging level",
default=False, action='store_true')
args = parser.parse_args(args)
files = args.files
if args.debug:
args.loglevel = 'DEBUG'
numeric_level = getattr(logging, args.loglevel.upper(), None)
logging.basicConfig(filename='MPrebin.log', level=numeric_level,
filemode='w')
rebin = args.rebin
for f in files:
rebin_file(f, rebin)
| 0 | 0 | 0 |
17f3c7f78e01745f6b3482cefe947cc17063b57b | 2,435 | py | Python | python/companies_plugin-1.21/companies_plugin/utils/sys_utils.py | dataesr/scanr-backend | 39681be69b9a96b4a07b9410754c897cd5b65c24 | [
"MIT"
] | null | null | null | python/companies_plugin-1.21/companies_plugin/utils/sys_utils.py | dataesr/scanr-backend | 39681be69b9a96b4a07b9410754c897cd5b65c24 | [
"MIT"
] | 3 | 2020-06-18T15:06:10.000Z | 2021-05-07T16:29:50.000Z | plugins/companies_plugin/companies_plugin/utils/sys_utils.py | reseachalps/Search-Engine | 1cd1e83902119938ffd412394b09dce92d082500 | [
"MIT"
] | null | null | null | import faulthandler
import atexit
import signal
import traceback
import os
import multiprocessing
# To test with doctest...
if not __package__: # pragma: no cover
LIB_PATH = "."
else:
from .. import LIB_PATH
# Global configuration
DUMP_DIRECTORY = LIB_PATH + "/dump/"
DUMP_CURRENT_PROCESS = DUMP_DIRECTORY + "%(ppid)s/"
DUMP_FILENAME = DUMP_CURRENT_PROCESS + "%(pid)s.dump"
def get_stack(): # pragma: no cover # nothing to test here
"""Returns the current stack trace as a string."""
return traceback.format_exc()
class StackDumper:
"""
Dumps the stack of the current process and all of its threads when it receives a SIGUSR1 signal.
The stack will go in dump/<main process id>/<real process id>.dump file.
You do not need to create this object yourself, instead, use `init_stack_dumper()`
"""
def init_stack_dumper():
"""
Initalize the stack dumper for current process.
This method should be called for all subprocesses (but not for threads)
>>> import os
>>> from signal import SIGUSR1
>>> init_stack_dumper()
>>> # From your terminal send the SIGUSR1 signal
>>> # kill -SIGUSR1 <pid>
>>> os.kill(os.getpid(), SIGUSR1) # send the SIGUSR1 signal
>>> print(open(DUMP_FILENAME % dict(ppid=os.getpid(), pid=os.getpid()), "r").read()) # doctest: +ELLIPSIS
Current thread ... (most recent call first):...
"""
StackDumper()
| 31.623377 | 110 | 0.643121 | import faulthandler
import atexit
import signal
import traceback
import os
import multiprocessing
# To test with doctest...
if not __package__: # pragma: no cover
LIB_PATH = "."
else:
from .. import LIB_PATH
# Global configuration
DUMP_DIRECTORY = LIB_PATH + "/dump/"
DUMP_CURRENT_PROCESS = DUMP_DIRECTORY + "%(ppid)s/"
DUMP_FILENAME = DUMP_CURRENT_PROCESS + "%(pid)s.dump"
def get_stack(): # pragma: no cover # nothing to test here
"""Returns the current stack trace as a string."""
return traceback.format_exc()
class StackDumper:
"""
Dumps the stack of the current process and all of its threads when it receives a SIGUSR1 signal.
The stack will go in dump/<main process id>/<real process id>.dump file.
You do not need to create this object yourself, instead, use `init_stack_dumper()`
"""
def __init__(self):
try:
os.mkdir(DUMP_DIRECTORY)
except: # pragma: no cover # if it already exists
pass
ppid = os.getpid() if multiprocessing.current_process().name == "MainProcess" else os.getppid()
dir = DUMP_CURRENT_PROCESS % {"ppid": ppid}
try:
os.mkdir(dir)
except: # pragma: no cover # if it already exists
pass
self.fname = DUMP_FILENAME % {"ppid": ppid, "pid": os.getpid()}
self.f = open(self.fname, "w")
self.fname = self.f.name # The actual filename, can change when we patch open for testing
faulthandler.register(signal.SIGUSR1, self.f, chain=False)
atexit.register(self.clean)
def clean(self): # pragma: no cover # not detected since it's called at exit
faulthandler.unregister(signal.SIGUSR1)
if not self.f.closed:
self.f.close()
if os.stat(self.fname).st_size == 0:
os.unlink(self.fname)
def init_stack_dumper():
"""
Initalize the stack dumper for current process.
This method should be called for all subprocesses (but not for threads)
>>> import os
>>> from signal import SIGUSR1
>>> init_stack_dumper()
>>> # From your terminal send the SIGUSR1 signal
>>> # kill -SIGUSR1 <pid>
>>> os.kill(os.getpid(), SIGUSR1) # send the SIGUSR1 signal
>>> print(open(DUMP_FILENAME % dict(ppid=os.getpid(), pid=os.getpid()), "r").read()) # doctest: +ELLIPSIS
Current thread ... (most recent call first):...
"""
StackDumper()
| 960 | 0 | 53 |
4c9227643a202094fb84db7b27faaa285c006aab | 4,370 | py | Python | IcebergCalculations.py | helenamorgan/WhiteStarLine | fea59659de10d1d192beb1543b82de43098b2c7d | [
"MIT"
] | null | null | null | IcebergCalculations.py | helenamorgan/WhiteStarLine | fea59659de10d1d192beb1543b82de43098b2c7d | [
"MIT"
] | null | null | null | IcebergCalculations.py | helenamorgan/WhiteStarLine | fea59659de10d1d192beb1543b82de43098b2c7d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri May 7 11:32:58 2021
@author: helena
"""
class IcebergCalc():
"""
Class declaration for the IcebergCalc class.
Provides methods for calculating an iceberg's area, volume and mass.
"""
def __init__(self, lidardata, radardata, seaice_m):
"""
Constructor function initialises the attributes of the IcebergCalc class.
Parameters
----------
lidardata : list of intergers
List of the lidar data
radardata : list of integers
List of the radar data
seaice_m : list
Empty list for the ice variables to be appended
Returns
-------
None.
"""
self.lidardata = lidardata
self.radardata = radardata
self.seaice_m = []
def find_ice(self, radardata, seaice_m):
"""
This function finds if there is ice within this section of the sea.
Ice has radar data values of over 100, so this function finds those values over 100 and prints these values.
Parameters
----------
radardata : list of integers
List of the radar data
seaice_m : list
Empty list for the ice variables to be appended. The default is None.
Returns
-------
None.
"""
for i in range(len(radardata)):
if i >= 100.0:
seaice_m.append(i)
print("Areas of ice are " + str(seaice_m))
#print(seaice_m)
def find_mass(self, lidardata):
"""
This function calculates the area, volume and mass of the ice.
The lidar data is in cm.
The area of the icberg is calculated then multiplied by the by the height to calculate the volume.
The volume is multiplied by the density of ice to calculate the mass of the iceberg.
The density of ice is a set value.
The total volume of the mass of the iceberg is calculated by multiplying by 10 (as only 10% of the iceberg is currently above the water line) to calculate 100% of the iceberg mass.
These variables are printed.
Parameters
----------
lidardata : list of integers
List of the lidar data
Returns
-------
Integer.
The total mass of the iceberg.
"""
global total_volume
global total_mass
total_pixels = 0
total_volume = 0
for row in lidardata:
for i in row:
if i >= 100.0:
total_pixels += 1 # Area of the iceberg
total_volume += i*0.1 # Convert cm to m as 0.1 = 10cm
# Density of ice = 917km/m^3
iceberg_mass = total_volume * 917
total_mass = iceberg_mass * 10
print("The area of iceberg is" + " " + str(total_pixels) + " m^2")
print("The volume of iceberg is" + " " + str(total_volume) + " m^3")
print("The mass of the iceberg is" + " " + str(iceberg_mass) + " kg/m^3")
print("The total mass of the iceberg is" + " " + str(total_mass) + " kg/m^3")
return(total_mass)
def total_volume(self):
"""
This function defines the variable total volume of the iceberg.
Returns
-------
Integer
The total volume of the iceberg.
"""
return int(total_volume)
def total_mass(self):
"""
This function defines the variable total mass of the iceberg.
Returns
-------
Integer
The total mass of the iceberg.
"""
return int(total_mass)
def determine_drag(self):
"""
This function determines whether the iceberg is within size range to be towed by the company tug boat.
If the iceberg is larger than 36 million kg in mass, it cannot be towed by the tug boat.
Returns
-------
String .
If the iceberg is within size range to be towed by the company.
"""
global within_range
global outside_range
within_range = "Can tow iceberg"
outside_range = "Cannot tow iceberg"
if total_mass > 36000000:
return(outside_range)
return(within_range)
| 30.774648 | 189 | 0.559497 | # -*- coding: utf-8 -*-
"""
Created on Fri May 7 11:32:58 2021
@author: helena
"""
class IcebergCalc():
"""
Class declaration for the IcebergCalc class.
Provides methods for calculating an iceberg's area, volume and mass.
"""
def __init__(self, lidardata, radardata, seaice_m):
"""
Constructor function initialises the attributes of the IcebergCalc class.
Parameters
----------
lidardata : list of intergers
List of the lidar data
radardata : list of integers
List of the radar data
seaice_m : list
Empty list for the ice variables to be appended
Returns
-------
None.
"""
self.lidardata = lidardata
self.radardata = radardata
self.seaice_m = []
def find_ice(self, radardata, seaice_m):
"""
This function finds if there is ice within this section of the sea.
Ice has radar data values of over 100, so this function finds those values over 100 and prints these values.
Parameters
----------
radardata : list of integers
List of the radar data
seaice_m : list
Empty list for the ice variables to be appended. The default is None.
Returns
-------
None.
"""
for i in range(len(radardata)):
if i >= 100.0:
seaice_m.append(i)
print("Areas of ice are " + str(seaice_m))
#print(seaice_m)
def find_mass(self, lidardata):
"""
This function calculates the area, volume and mass of the ice.
The lidar data is in cm.
The area of the icberg is calculated then multiplied by the by the height to calculate the volume.
The volume is multiplied by the density of ice to calculate the mass of the iceberg.
The density of ice is a set value.
The total volume of the mass of the iceberg is calculated by multiplying by 10 (as only 10% of the iceberg is currently above the water line) to calculate 100% of the iceberg mass.
These variables are printed.
Parameters
----------
lidardata : list of integers
List of the lidar data
Returns
-------
Integer.
The total mass of the iceberg.
"""
global total_volume
global total_mass
total_pixels = 0
total_volume = 0
for row in lidardata:
for i in row:
if i >= 100.0:
total_pixels += 1 # Area of the iceberg
total_volume += i*0.1 # Convert cm to m as 0.1 = 10cm
# Density of ice = 917km/m^3
iceberg_mass = total_volume * 917
total_mass = iceberg_mass * 10
print("The area of iceberg is" + " " + str(total_pixels) + " m^2")
print("The volume of iceberg is" + " " + str(total_volume) + " m^3")
print("The mass of the iceberg is" + " " + str(iceberg_mass) + " kg/m^3")
print("The total mass of the iceberg is" + " " + str(total_mass) + " kg/m^3")
return(total_mass)
def total_volume(self):
"""
This function defines the variable total volume of the iceberg.
Returns
-------
Integer
The total volume of the iceberg.
"""
return int(total_volume)
def total_mass(self):
"""
This function defines the variable total mass of the iceberg.
Returns
-------
Integer
The total mass of the iceberg.
"""
return int(total_mass)
def determine_drag(self):
"""
This function determines whether the iceberg is within size range to be towed by the company tug boat.
If the iceberg is larger than 36 million kg in mass, it cannot be towed by the tug boat.
Returns
-------
String .
If the iceberg is within size range to be towed by the company.
"""
global within_range
global outside_range
within_range = "Can tow iceberg"
outside_range = "Cannot tow iceberg"
if total_mass > 36000000:
return(outside_range)
return(within_range)
| 0 | 0 | 0 |
7eb8517b615d71c48f5f7c34f233991231b9648f | 6,502 | py | Python | keras_mobile/blocks/conv.py | i404788/keras-mobile | db48cb9a3b63cfda5d8edd2749c3eb17661ef8d4 | [
"MIT"
] | 1 | 2019-09-30T10:37:22.000Z | 2019-09-30T10:37:22.000Z | keras_mobile/blocks/conv.py | i404788/keras-mobile | db48cb9a3b63cfda5d8edd2749c3eb17661ef8d4 | [
"MIT"
] | null | null | null | keras_mobile/blocks/conv.py | i404788/keras-mobile | db48cb9a3b63cfda5d8edd2749c3eb17661ef8d4 | [
"MIT"
] | null | null | null | from keras.layers import Conv2D, BatchNormalization, Input, DepthwiseConv2D, Lambda, Concatenate
from keras.layers import GlobalAveragePooling2D, Reshape, ReLU, Add, Dropout
import keras.backend as K
from ..functions.mutations import channel_split, channel_shuffle
# Emulate class behaviour for parameterization
def SeperableConvBlock(output_filters=None, ReLU_Max=None, strides=(1, 1)):
r"""
x->DWConv(3x3)->BN->ReLU(max)->Conv2D(1x1)->BN
```
output_filters: int, size of last axis output
ReLU_Max: float, max value as output of a ReLU in this block, if < 0, it will be Linear (no ReLU)
strides: int/tuple-int, same as in keras.layers.Conv2D
```
From MnasNet https://arxiv.org/pdf/1807.11626.pdf
Also used in MobileConvBlock as subblock
"""
return stub
def MobileConvBlock(output_filters, latent_filters, ReLU_Max=None, skipFunction=None, strides=(1, 1)):
r"""
```
/-------------------------------------------\
x->Conv(1x1,lat)->ReLU->{SeperableConvBlock}-=?>skip
```
```
output_filters: int, size of last axis output
latent_filters: int, size of filters at first Conv 1x1 (see MnasNet), If None shape[-1] is used
- *_filters is generally 'k * shape[-1]' as expansion factor
ReLU_Max: float, max value as output of a ReLU in this block
skipFunction: def, a function combining 2 equi-shaped tensors (e.g. keras.layers.add)
strides: int/tuple-int, same as in keras.layers.Conv2D
```
skipFunction (if not None) is an keras function with the same interface as keras.layers.{add|multiply}
if None there will be no attention added
Stride block from MobileNetV2 (fixed )
```
Strides=1: ReLU_Max=6, skipFunction=keras.layers.add
Strides=2: ReLU_Max=6, strides=(2,2)
```
MBConv6 from MnasNet
```
latent_filters=6*output_filters, skipFunction=keras.layers.add
```
From MobileNetV2 https://arxiv.org/pdf/1801.04381.pdf (When RELU6)
From MnasNet https://arxiv.org/pdf/1807.11626.pdf (When RELU)
"""
return stub
def ShuffleBasic(out_channels, bottleneck_factor):
r"""
```
/->Conv(1x1,BN,RelU)->{SeperableConvBlock}-\
x->ChSplit------------------------------------Concat(axis=-1)->ChShuffle
```
"""
return stub
def ShuffleStride(out_channels, bottleneck_factor, strides=(2, 2)):
r"""
```
/-Conv(1x1,bn,relu)->{SeperableConvBlock}->ReLU-\
x-{SeperableConvBlock}->ReLU---------------->Concat(axis=-1)->ChShuffle
```
"""
return stub
def ResnetBlock():
r"""
```
/->BN->ReLU->Conv(k=3)->BN->ReLU->Conv(k=1)-\
x--------------------------------------------->Add
```
"""
return stub
def ApesBlock(k, r):
r"""
```
/->BN->ReLU->Conv(k=1)->BN->ReLU->Conv(k=k)->BN-\
x->BN---------------------------------------------->Add->ReLU->Dropout(r)
```
"""
return stub
| 32.348259 | 106 | 0.563211 | from keras.layers import Conv2D, BatchNormalization, Input, DepthwiseConv2D, Lambda, Concatenate
from keras.layers import GlobalAveragePooling2D, Reshape, ReLU, Add, Dropout
import keras.backend as K
from ..functions.mutations import channel_split, channel_shuffle
# Emulate class behaviour for parameterization
def SeperableConvBlock(output_filters=None, ReLU_Max=None, strides=(1, 1)):
r"""
x->DWConv(3x3)->BN->ReLU(max)->Conv2D(1x1)->BN
```
output_filters: int, size of last axis output
ReLU_Max: float, max value as output of a ReLU in this block, if < 0, it will be Linear (no ReLU)
strides: int/tuple-int, same as in keras.layers.Conv2D
```
From MnasNet https://arxiv.org/pdf/1807.11626.pdf
Also used in MobileConvBlock as subblock
"""
def stub(x):
x = DepthwiseConv2D((3, 3), strides=strides, padding='same')(x)
x = BatchNormalization()(x)
x = ReLU(max_value=ReLU_Max)(x)
if output_filters is None:
x = Conv2D(K.shape(x)[-1], (1, 1), padding='same')(x)
else:
x = Conv2D(output_filters, (1, 1), padding='same')(x)
x = BatchNormalization()(x)
return x
return stub
def MobileConvBlock(output_filters, latent_filters, ReLU_Max=None, skipFunction=None, strides=(1, 1)):
r"""
```
/-------------------------------------------\
x->Conv(1x1,lat)->ReLU->{SeperableConvBlock}-=?>skip
```
```
output_filters: int, size of last axis output
latent_filters: int, size of filters at first Conv 1x1 (see MnasNet), If None shape[-1] is used
- *_filters is generally 'k * shape[-1]' as expansion factor
ReLU_Max: float, max value as output of a ReLU in this block
skipFunction: def, a function combining 2 equi-shaped tensors (e.g. keras.layers.add)
strides: int/tuple-int, same as in keras.layers.Conv2D
```
skipFunction (if not None) is an keras function with the same interface as keras.layers.{add|multiply}
if None there will be no attention added
Stride block from MobileNetV2 (fixed )
```
Strides=1: ReLU_Max=6, skipFunction=keras.layers.add
Strides=2: ReLU_Max=6, strides=(2,2)
```
MBConv6 from MnasNet
```
latent_filters=6*output_filters, skipFunction=keras.layers.add
```
From MobileNetV2 https://arxiv.org/pdf/1801.04381.pdf (When RELU6)
From MnasNet https://arxiv.org/pdf/1807.11626.pdf (When RELU)
"""
def stub(x):
# if latent_filters is None:
# latent_filters = K.int_shape(x)[-1]
y = Conv2D(latent_filters, (1, 1), padding='same')(x)
y = ReLU(max_value=ReLU_Max)(y)
y = SeperableConvBlock(output_filters=output_filters,
ReLU_Max=ReLU_Max, strides=strides)(y)
if skipFunction is not None:
if strides is not (1, 1):
print("Strides can't be used with attention")
x = skipFunction([x, y])
return x
else:
return y
return stub
def GroupConv(in_channels, out_channels, groups, kernel=1, stride=1, name=''):
def stub(x):
if groups == 1:
return Conv2D(filters=out_channels, kernel_size=kernel, padding='same',
use_bias=False, strides=stride, name=name)(x)
# number of intput channels per group
ig = in_channels // groups
group_list = []
assert out_channels % groups == 0
for i in range(groups):
offset = i * ig
group = Lambda(
lambda z: z[:, :, :, offset:offset + ig], name='%s/g%d_slice' % (name, i))(x)
group_list.append(Conv2D(int(0.5 + out_channels / groups), kernel_size=kernel, strides=stride,
use_bias=False, padding='same', name='%s_/g%d' % (name, i))(group))
return Concatenate(name='%s/concat' % name)(group_list)
return stub
def ShuffleBasic(out_channels, bottleneck_factor):
r"""
```
/->Conv(1x1,BN,RelU)->{SeperableConvBlock}-\
x->ChSplit------------------------------------Concat(axis=-1)->ChShuffle
```
"""
def stub(x):
c_hat, c = channel_split(x)
x = c
bottleneck_channels = K.int_shape(x)[-1]
x = Conv2D(bottleneck_channels, (1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = SeperableConvBlock(bottleneck_channels)(x)
x = ReLU()(x)
x = Concatenate(axis=-1)([x, c_hat])
x = Lambda(channel_shuffle)(x)
return x
return stub
def ShuffleStride(out_channels, bottleneck_factor, strides=(2, 2)):
r"""
```
/-Conv(1x1,bn,relu)->{SeperableConvBlock}->ReLU-\
x-{SeperableConvBlock}->ReLU---------------->Concat(axis=-1)->ChShuffle
```
"""
def stub(x):
bottleneck_channels = int(out_channels * bottleneck_factor)
y = Conv2D(bottleneck_channels, kernel_size=(1, 1), padding='same')(x)
y = ReLU()(y)
y = SeperableConvBlock(bottleneck_channels,
ReLU_Max=None, strides=strides)(y)
y = ReLU()(y)
z = SeperableConvBlock(bottleneck_channels,
ReLU_Max=None, strides=strides)(x)
z = ReLU()(z)
ret = Concatenate(axis=-1)([y, z])
ret = Lambda(channel_shuffle)(ret)
return ret
return stub
def ResnetBlock():
r"""
```
/->BN->ReLU->Conv(k=3)->BN->ReLU->Conv(k=1)-\
x--------------------------------------------->Add
```
"""
def stub(x):
dim = K.int_shape(x)[-1]
y = BatchNormalization()(x)
y = ReLU()(y)
y = Conv2D(dim, 3, padding='same')(y)
y = BatchNormalization()(y)
y = ReLU()(y)
y = Conv2D(dim, 1, padding='same')(y)
return Add()([x, y])
return stub
def ApesBlock(k, r):
r"""
```
/->BN->ReLU->Conv(k=1)->BN->ReLU->Conv(k=k)->BN-\
x->BN---------------------------------------------->Add->ReLU->Dropout(r)
```
"""
def stub(x):
M = K.int_shape(x)[-1]
y = BatchNormalization()(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(M, (1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
x = Conv2D(M, (k, k), padding='same')(x)
x = BatchNormalization()(x)
x = Add()([x, y])
x = ReLU()(x)
return Dropout(r)(x)
return stub
| 3,385 | 0 | 179 |
7cf87342ec75cba52f90005f86e21e0e03f234af | 41 | py | Python | tests/_support/configs/python/invoke.py | daobook/invoke | 577faf1c016a69392583046613bfb42356855e8f | [
"BSD-2-Clause"
] | null | null | null | tests/_support/configs/python/invoke.py | daobook/invoke | 577faf1c016a69392583046613bfb42356855e8f | [
"BSD-2-Clause"
] | null | null | null | tests/_support/configs/python/invoke.py | daobook/invoke | 577faf1c016a69392583046613bfb42356855e8f | [
"BSD-2-Clause"
] | null | null | null | outer = {"inner": {"hooray": "python"}}
| 20.5 | 40 | 0.536585 | outer = {"inner": {"hooray": "python"}}
| 0 | 0 | 0 |
82cb4d5dd9061a4bb4f537e15e18859ca7f1c372 | 601 | py | Python | multi_user.py | rithikrice/Whatsapp_auto_2 | 3e00988b5d8725e6bb3b3039a7bb56308ccfe714 | [
"MIT"
] | 1 | 2019-08-04T20:12:00.000Z | 2019-08-04T20:12:00.000Z | multi_user.py | rithikrice/Whatsapp_auto_2 | 3e00988b5d8725e6bb3b3039a7bb56308ccfe714 | [
"MIT"
] | null | null | null | multi_user.py | rithikrice/Whatsapp_auto_2 | 3e00988b5d8725e6bb3b3039a7bb56308ccfe714 | [
"MIT"
] | 1 | 2020-03-02T22:24:18.000Z | 2020-03-02T22:24:18.000Z | from selenium import webdriver
driver = webdriver.Chrome()
driver.get('https://web.whatsapp.com/')
all_names = ['Sachin Yadav']
msg = input('Enter the message')
count = int(input('no.of times to send message'))
input('click any random key after qr scanning')
for name in all_names:
user = driver.find_element_by_xpath("//span[@title = '{}']".format(name))
user.click()
msgbox = driver.find_element_by_class_name('_3u328')
for i in range (count):
msgbox.send_keys(msg)
button = driver.find_element_by_class_name('_3M-N-')
button.click() | 28.619048 | 78 | 0.663894 | from selenium import webdriver
driver = webdriver.Chrome()
driver.get('https://web.whatsapp.com/')
all_names = ['Sachin Yadav']
msg = input('Enter the message')
count = int(input('no.of times to send message'))
input('click any random key after qr scanning')
for name in all_names:
user = driver.find_element_by_xpath("//span[@title = '{}']".format(name))
user.click()
msgbox = driver.find_element_by_class_name('_3u328')
for i in range (count):
msgbox.send_keys(msg)
button = driver.find_element_by_class_name('_3M-N-')
button.click() | 0 | 0 | 0 |
7e6cbc5d15b6db6d05cc1a9bb65b6ab33e786060 | 1,626 | py | Python | iogt_users/wagtail_hooks.py | saqlainrasheed/iogt | 4bebfe4cd2f1d4ff9cab50d1126756c162a30b89 | [
"BSD-2-Clause"
] | 1 | 2021-11-18T05:10:14.000Z | 2021-11-18T05:10:14.000Z | iogt_users/wagtail_hooks.py | mbilalhameed/iogt | 9c12f0be29f1eea0097ec43f692c77ba65673610 | [
"BSD-2-Clause"
] | null | null | null | iogt_users/wagtail_hooks.py | mbilalhameed/iogt | 9c12f0be29f1eea0097ec43f692c77ba65673610 | [
"BSD-2-Clause"
] | null | null | null | from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils import timezone
from wagtail.contrib.modeladmin.options import ModelAdmin, modeladmin_register
from home.models import SiteSettings
from iogt_users.filters import GroupsFilter
modeladmin_register(UsersExportAdmin)
| 41.692308 | 109 | 0.740467 | from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils import timezone
from wagtail.contrib.modeladmin.options import ModelAdmin, modeladmin_register
from home.models import SiteSettings
from iogt_users.filters import GroupsFilter
class UsersExportAdmin(ModelAdmin):
model = get_user_model()
menu_label = 'Export/Import Users'
menu_icon = 'user'
list_display = ('username', 'date_joined', 'is_staff', 'is_active')
list_filter = (GroupsFilter, 'date_joined', 'is_staff', 'is_active')
form_fields_exclude = ('password', 'last_login', 'is_superuser', 'groups', 'user_permissions')
search_fields = ('username',)
list_export = ('username', 'first_name', 'last_name', 'email', 'is_staff', 'is_active', 'date_joined',
'terms_accepted', 'has_filled_registration_survey', 'registration_survey_response',)
add_to_settings_menu = True
list_per_page = 20
menu_order = 601
def registration_survey_response(self, obj):
registration_survey = SiteSettings.get_for_default_site().registration_survey
user_submission = None
if registration_survey:
ids = registration_survey.get_translations(inclusive=True).values_list('id', flat=True)
user_submission = obj.usersubmission_set.filter(page__pk__in=ids).order_by('submit_time').first()
return user_submission.form_data if user_submission else ''
@property
def export_filename(self):
return f'users_{timezone.now().strftime(settings.EXPORT_FILENAME_TIMESTAMP_FORMAT)}'
modeladmin_register(UsersExportAdmin)
| 549 | 738 | 23 |
8222c146252aa8643f4d57a50a367425f145b298 | 985 | py | Python | aioethereum/utils.py | h8is2w8/aioethereum | eb23e28068c34cda28bbef45c3f288d16936d88e | [
"MIT"
] | 16 | 2017-10-04T17:44:51.000Z | 2021-03-07T12:55:04.000Z | aioethereum/utils.py | h8is2w8/aioethereum | eb23e28068c34cda28bbef45c3f288d16936d88e | [
"MIT"
] | 8 | 2017-10-04T22:53:08.000Z | 2021-01-15T18:04:41.000Z | aioethereum/utils.py | h8is2w8/aioethereum | eb23e28068c34cda28bbef45c3f288d16936d88e | [
"MIT"
] | 5 | 2018-02-22T15:56:34.000Z | 2021-01-03T21:25:22.000Z | from .constants import BLOCK_TAGS
def add_0x(string):
"""Add 0x to string at start.
"""
if isinstance(string, bytes):
string = string.decode('utf-8')
return '0x' + str(string)
def hex_to_dec(x):
"""Convert hex to decimal
"""
return int(x, 16)
def wei_to_ether(wei):
"""Convert wei to ether
"""
return 1.0 * wei / 10**18
def gwei_to_ether(wei):
"""Convert gwei to ether
"""
return 1.0 * wei / 10**9
def ether_to_wei(ether):
"""Convert ether to wei
"""
return int(ether * 10**18)
def ether_to_gwei(ether):
"""Convert ether to Gwei
"""
return int(ether * 10**9)
def validate_block(block):
"""Validate block on tag or hex int
"""
if isinstance(block, str):
if block not in BLOCK_TAGS:
raise ValueError('Invalid block tag.')
elif isinstance(block, int):
block = hex(block)
else:
raise ValueError('Invalid block type.')
return block
| 18.584906 | 50 | 0.592893 | from .constants import BLOCK_TAGS
def add_0x(string):
"""Add 0x to string at start.
"""
if isinstance(string, bytes):
string = string.decode('utf-8')
return '0x' + str(string)
def hex_to_dec(x):
"""Convert hex to decimal
"""
return int(x, 16)
def wei_to_ether(wei):
"""Convert wei to ether
"""
return 1.0 * wei / 10**18
def gwei_to_ether(wei):
"""Convert gwei to ether
"""
return 1.0 * wei / 10**9
def ether_to_wei(ether):
"""Convert ether to wei
"""
return int(ether * 10**18)
def ether_to_gwei(ether):
"""Convert ether to Gwei
"""
return int(ether * 10**9)
def validate_block(block):
"""Validate block on tag or hex int
"""
if isinstance(block, str):
if block not in BLOCK_TAGS:
raise ValueError('Invalid block tag.')
elif isinstance(block, int):
block = hex(block)
else:
raise ValueError('Invalid block type.')
return block
| 0 | 0 | 0 |
d93321b1077efe4ccc8a70aaaa794d1456e0dc6a | 192 | py | Python | fle_site/apps/redirects/context_processors.py | khangmach/fle-home | 42c376bc9aca5a440a09e78973ea1b48a0827e3c | [
"MIT"
] | null | null | null | fle_site/apps/redirects/context_processors.py | khangmach/fle-home | 42c376bc9aca5a440a09e78973ea1b48a0827e3c | [
"MIT"
] | null | null | null | fle_site/apps/redirects/context_processors.py | khangmach/fle-home | 42c376bc9aca5a440a09e78973ea1b48a0827e3c | [
"MIT"
] | null | null | null | from .models import RedirectVariable
| 27.428571 | 106 | 0.666667 | from .models import RedirectVariable
def redirect_vars(request):
return {
'vars': dict([tuple(r.values()) for r in RedirectVariable.objects.all().values("name", "value")]),
}
| 132 | 0 | 23 |
c92a4fae037b23367735659e7e8606b589d6c871 | 1,911 | py | Python | pybw/tokens.py | louisdevie/birdway | fc71ab4deaa154033cd3228746919e6e82c4723f | [
"CC0-1.0"
] | null | null | null | pybw/tokens.py | louisdevie/birdway | fc71ab4deaa154033cd3228746919e6e82c4723f | [
"CC0-1.0"
] | null | null | null | pybw/tokens.py | louisdevie/birdway | fc71ab4deaa154033cd3228746919e6e82c4723f | [
"CC0-1.0"
] | null | null | null | from enum import Enum, auto
from autorepr import AutoRepr
from birdway import Unary, Type, Binary
for name in [
"KeywordMeta",
"KeywordArgs",
"KeywordParam",
"KeywordRun",
"KeywordIf",
"KeywordThen",
"KeywordElse",
"KeywordPrintln",
"KeywordOption",
"BlockBegin",
"BlockEnd",
"TableBegin",
"TableEnd",
"OpeningParens",
"ClosingParens",
"FormattedStringDelimiter",
"StringDelimiter",
"LineEnd",
"Association",
"Separator",
"Assignment",
"Return",
"FormattingExpressionBegin",
"KeywordStruct",
"KeywordEnum",
"KeywordFunc",
"KeywordFor",
"KeywordFrom",
"KeywordTo",
"KeywordDo",
"KeywordTry",
"KeywordOn",
"Range",
"KeywordUse",
"KeywordIn",
]:
exec(f"class {name} (Token, AutoRepr): pass")
del name
| 20.329787 | 69 | 0.579278 | from enum import Enum, auto
from autorepr import AutoRepr
from birdway import Unary, Type, Binary
class Token:
def __init__(self, line=None, **attributes):
self._line = line
for attr in dir(self):
if not attr.startswith("_"):
if attr in attributes:
setattr(self, attr, attributes[attr])
else:
raise TypeError(f"no value specified for {attr}")
def __eq__(self, other):
if type(self) is type(other):
for attr in dir(self):
if not attr.startswith("_"):
if getattr(self, attr) != getattr(other, attr):
return False
return True
else:
return False
for name in [
"KeywordMeta",
"KeywordArgs",
"KeywordParam",
"KeywordRun",
"KeywordIf",
"KeywordThen",
"KeywordElse",
"KeywordPrintln",
"KeywordOption",
"BlockBegin",
"BlockEnd",
"TableBegin",
"TableEnd",
"OpeningParens",
"ClosingParens",
"FormattedStringDelimiter",
"StringDelimiter",
"LineEnd",
"Association",
"Separator",
"Assignment",
"Return",
"FormattingExpressionBegin",
"KeywordStruct",
"KeywordEnum",
"KeywordFunc",
"KeywordFor",
"KeywordFrom",
"KeywordTo",
"KeywordDo",
"KeywordTry",
"KeywordOn",
"Range",
"KeywordUse",
"KeywordIn",
]:
exec(f"class {name} (Token, AutoRepr): pass")
del name
class StringContent(Token, AutoRepr):
value = str()
class Identifier(Token, AutoRepr):
name = str()
class Integer(Token, AutoRepr):
value = int()
class UnaryOperator(Token, AutoRepr):
operator = Unary(1)
class BinaryOperator(Token, AutoRepr):
operator = Binary(1)
class Variable(Token, AutoRepr):
name = str()
class TypeName(Token, AutoRepr):
type = Type(1)
| 599 | 223 | 237 |
ca7b5cebd80d5aca1a160db747c842eb5e4be84f | 1,643 | py | Python | adventofcode/code_7.py | fearless-spider/python_playground | 5150b2de09736d68558f4c159e110a7ebbe29bfc | [
"BSD-3-Clause"
] | null | null | null | adventofcode/code_7.py | fearless-spider/python_playground | 5150b2de09736d68558f4c159e110a7ebbe29bfc | [
"BSD-3-Clause"
] | null | null | null | adventofcode/code_7.py | fearless-spider/python_playground | 5150b2de09736d68558f4c159e110a7ebbe29bfc | [
"BSD-3-Clause"
] | null | null | null | file1 = open('input_7.txt', 'r')
Lines = file1.readlines()
bingo_numbers = Lines[0].split(",")
bingo_tables = []
bingo_win = 0
bingo_row_number = 0
for line in Lines[2:]:
new_row = []
if line == '\n':
continue
row = line.split(" ")
for element in row:
if element == '\n' or element == '':
continue
new_row.append(element.strip())
bingo_tables.append(new_row)
bingo_row_result = [0] * len(bingo_tables)
bingo = False
for number in bingo_numbers:
counter = 0
for row in bingo_tables:
try:
index = row.index(number)
bingo_row_result[counter] += 1
row[row.index(number)] = 0
if bingo_row_result[counter] == 5:
bingo = True
bingo_row_number = counter
bingo_win = int(number)
print('Bingo %s' % number)
break
except:
pass
for c in range(0, 5):
bingo_test = 0
for r in range(0, len(bingo_tables)):
if r % 5 == 0:
bingo_test = 0
bingo_test += int(bingo_tables[r][c])
if bingo_test == 0:
bingo = True
bingo_row_number = counter
bingo_win = int(number)
print('Bingo %s' % number)
break
counter += 1
if bingo:
break
bingo_row_number = bingo_row_number - (bingo_row_number % 5)
total = 0
for row in bingo_tables[bingo_row_number:bingo_row_number+5]:
for x in row:
total += int(x)
print(bingo_tables)
print(total*bingo_win)
| 27.383333 | 61 | 0.529519 | file1 = open('input_7.txt', 'r')
Lines = file1.readlines()
bingo_numbers = Lines[0].split(",")
bingo_tables = []
bingo_win = 0
bingo_row_number = 0
for line in Lines[2:]:
new_row = []
if line == '\n':
continue
row = line.split(" ")
for element in row:
if element == '\n' or element == '':
continue
new_row.append(element.strip())
bingo_tables.append(new_row)
bingo_row_result = [0] * len(bingo_tables)
bingo = False
for number in bingo_numbers:
counter = 0
for row in bingo_tables:
try:
index = row.index(number)
bingo_row_result[counter] += 1
row[row.index(number)] = 0
if bingo_row_result[counter] == 5:
bingo = True
bingo_row_number = counter
bingo_win = int(number)
print('Bingo %s' % number)
break
except:
pass
for c in range(0, 5):
bingo_test = 0
for r in range(0, len(bingo_tables)):
if r % 5 == 0:
bingo_test = 0
bingo_test += int(bingo_tables[r][c])
if bingo_test == 0:
bingo = True
bingo_row_number = counter
bingo_win = int(number)
print('Bingo %s' % number)
break
counter += 1
if bingo:
break
bingo_row_number = bingo_row_number - (bingo_row_number % 5)
total = 0
for row in bingo_tables[bingo_row_number:bingo_row_number+5]:
for x in row:
total += int(x)
print(bingo_tables)
print(total*bingo_win)
| 0 | 0 | 0 |
57baad10af5b26c0c3e29aeccdf74f93c7ca4346 | 103 | py | Python | user_input.py | NathanKr/python-playground | 03ea7f6489ab4db84c8180332a2ebf07caa9136e | [
"MIT"
] | null | null | null | user_input.py | NathanKr/python-playground | 03ea7f6489ab4db84c8180332a2ebf07caa9136e | [
"MIT"
] | null | null | null | user_input.py | NathanKr/python-playground | 03ea7f6489ab4db84c8180332a2ebf07caa9136e | [
"MIT"
] | null | null | null | username = input("Please enter your name : ")
print(type(username))
print(f'Your name is : {username}') | 34.333333 | 45 | 0.708738 | username = input("Please enter your name : ")
print(type(username))
print(f'Your name is : {username}') | 0 | 0 | 0 |
65abe0016207dd33ebabe1cf568609bc06ca94ca | 9,175 | py | Python | FileCrypt/__init__.py | TA40/FileCryptcc | f6f7144a0204c5141fa7f12f91b112d6db8f2b96 | [
"MIT"
] | 4 | 2019-07-03T00:32:57.000Z | 2021-03-08T17:14:19.000Z | FileCrypt/__init__.py | TA40/FileCryptcc | f6f7144a0204c5141fa7f12f91b112d6db8f2b96 | [
"MIT"
] | null | null | null | FileCrypt/__init__.py | TA40/FileCryptcc | f6f7144a0204c5141fa7f12f91b112d6db8f2b96 | [
"MIT"
] | null | null | null | import requests, json
def userApiKey(apikey):
"""
Returns information about an apikey
apikey: Your ApiKey from FileCrypt
Attention: this API is limited to 10 requests for an timeframe of 1 hour.
"""
data={"api_key":apikey,"sub":"apikey","fn":"user"}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def userEarnings(apikey,year=None,month=None,day=None):
"""
Returns statistics from user income and the total available balance on the account
apikey: Your ApiKey from FileCrypt
year(optional): year (YYYY) (2019)
month(optional): month (MM) (06)
day(optional): month (DD) (13)
"""
data={"api_key":apikey,"sub":"earnings","fn":"user"}
if year != None:
data["year"] = str(year)
if month != None:
data["month"] = str(month)
if day != None:
data["day"] = str(day)
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def containerCreate(apikey,name,mirrors,password=None,captcha=None,allow_cnl=None,allow_dlc=None,allow_links=None,groupid=None):
"""
This function allows you to create an filecrypt.cc protected folder.
apikey: Your ApiKey from FileCrypt
name: name of your folder
mirrors: mirrors in a tripple listed list
example: [[[mirror_0_link1,mirror_0_link2],[mirror_0_backup_link1,mirror_0_backup_link2]],[[mirror_1_link1,mirror_1_link2],[mirror_1_backup_link1,mirror_1_backup_link2]]]
prettyprinted example:
[
[
[
"mirror_0_link1",
"mirror_0_link2"
],
[
"mirror_0_backup_link1",
"mirror_0_backup_link2"
]
],
[
[
"mirror_1_link1",
"mirror_1_link2"
],
[
"mirror_1_backup_link1",
"mirror_1_backup_link2"
]
]
]
all strings in the first and second list (where the other lists should be) will be skipped
password(optional): password of your folder
captcha(optional): enable captcha? Allowed Values: 0,1
allow_cnl(optional): enable cnl? Allowed Values: 0,1
allow_dlc(optional): enable dlc? Allowed Values: 0,1
allow_links(optional): enable links? Allowed Values: 0,1
groupid: group ID of your target group
"""
data={"api_key":apikey,"sub":"createV2","fn":"containerV2","name":name}
for i in range(len(mirrors)):
if isinstance(mirrors[i],str):
continue
for j in range(len(mirrors[i])):
if isinstance(mirrors[i][j],str):
continue
for k in range(len(mirrors[i][j])):
data["mirror_"+str(i+1)+"["+str(j)+"]["+str(k)+"]"] = mirrors[i][j][k]
if password != None:
data["folderpass"] = password
if captcha != None:
data["captcha"] = str(captcha)
if allow_cnl != None:
data["allow_cnl"] = str(allow_cnl)
if allow_dlc != None:
data["allow_dlc"] = str(allow_dlc)
if allow_links != None:
data["allow_links"] = str(allow_links)
if groupid != None:
data["group"] = str(groupid)
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def containerEdit(apikey,mirrors,container_id,name=None,password=None,captcha=None,allow_cnl=None,allow_dlc=None,allow_links=None,groupid=None):
"""
This function allows you to edit an filecrypt.cc protected folder.
Once you submit mirror_1 all links are permanently removed from this folder and will be replaced with the once submitted.
if you would like to replace just a mirror please use our info api to get informations about links inside this folder to build your request.
For informations on statusimages please visit https://filecrypt.cc/docs/index.htm#api-General-Statusimages
apikey: Your ApiKey from FileCrypt
name(optional): name of your folder
container_id: the container_id as string
mirrors: same as containerCreate()
password(optional): password of your folder
captcha(optional): enable captcha? Allowed Values: 0,1
allow_cnl(optional): enable cnl? Allowed Values: 0,1
allow_dlc(optional): enable dlc? Allowed Values: 0,1
allow_links(optional): enable links? Allowed Values: 0,1
groupid: group ID of your target group
"""
data={"api_key":apikey,"sub":"editV2","fn":"containerV2","name":name,"container_id":container_id}
for i in range(len(mirrors)):
if isinstance(mirrors[i],str):
continue
for j in range(len(mirrors[i])):
if isinstance(mirrors[i][j],str):
continue
for k in range(len(mirrors[i][j])):
data["mirror_"+str(i+1)+"["+str(j)+"]["+str(k)+"]"] = mirrors[i][j][k]
if name != None:
data["name"] = name
if password != None:
data["folderpass"] = password
if captcha != None:
data["captcha"] = str(captcha)
if allow_cnl != None:
data["allow_cnl"] = str(allow_cnl)
if allow_dlc != None:
data["allow_dlc"] = str(allow_dlc)
if allow_links != None:
data["allow_links"] = str(allow_links)
if groupid != None:
data["group"] = str(groupid)
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def containerInfo(apikey,container_id):
"""
Returns an sorted object containing every link in your folder.
apikey: Your ApiKey from FileCrypt
container_id: the container_id as string
"""
data={"api_key":apikey,"sub":"info","fn":"containerV2","container_id":container_id}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def containerList(apikey,state=None,fav=None):
"""
Returns all Containers from your FileCrypt Account
apikey: Your ApiKey from FileCrypt
state(optional): filter by state of your folders. Allowed values: "unchecked", "ok", "uncheckable", "error", "offline", "partial"
fav(optional): filter on favorite folders 1 = favorite, 0 = regular folder
"""
data={"api_key":apikey,"sub":"listV2","fn":"containerV2"}
if state != None:
data["state"] = state
if fav != None:
data["fav"] = str(fav)
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def containerMyFolder(apikey,state=None,groupid=None):
"""
returns a short list of your own folders filtered by state (if passed as parameter).
Please note every Object child of the container-Node starts with a trailing _.
apikey: Your ApiKey from FileCrypt
state(optional): filter by state of your folders. Allowed values: "unchecked", "ok", "uncheckable", "error", "offline", "partial"
groupid(optional): filter for specified group
"""
data={"api_key":apikey,"fn":"containerV2","sub":"myfolder"}
if state != None:
data["state"] = state
if groupid != None:
data["group"] = str(groupid)
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def containerRemove(apikey,container_id):
"""
*Move* folder from public to trashbin the folder will not be public available.
apikey: Your ApiKey from FileCrypt
container_id: the container_id as string
"""
data={"api_key":apikey,"fn":"containerV2","sub":"remove","container_id":container_id}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def containerStatus(apikey,container_id):
"""
Get status of an filecrypt.cc folder: https://filecrypt.cc/docs/index.htm#api-General-Statusimages
apikey: Your ApiKey from FileCrypt
container_id: the container_id as string
"""
data={"api_key":apikey,"fn":"containerV2","sub":"statusV2","container_id":container_id}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def groupAdd(apikey,name):
"""
Creates a new Group with the chosen Name
apikey: Your ApiKey from FileCrypt
name: The name of your new group
"""
data={"api_key":apikey,"fn":"group","sub":"add","name":name,"parent":"0"}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def groupList(apikey):
"""
Returns all Groups existing in your FileCrypt Account
apikey: Your ApiKey from FileCrypt
"""
data={"api_key":apikey,"fn":"group","sub":"list"}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def groupMove(apikey,groupid,container_id):
"""
Move your filecrypt.cc containers to a specified group
apikey: Your ApiKey from FileCrypt
groupid: the group ID(!) you want the containers to be in
container_id: you can input a single Container via String or input a whole list with all containers.
"""
data={"api_key":apikey,"fn":"group","sub":"move","group":str(groupid)}
if isinstance(container_id,str):
data["container_id[0]"] = container_id
elif isinstance(container_id,list):
for i in range(len(container_id)):
data["container_id["+str(i)+"]"]=container_id[i]
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def groupRemove(apikey,groupid):
"""
Removes the group and moves all containers to group 0 (ungrouped)
apikey: Your ApiKey from FileCrypt
groupid: the group ID(!) you want to delete
"""
data={"api_key":apikey,"fn":"group","sub":"remove","id":str(groupid)}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text) | 38.55042 | 173 | 0.685014 | import requests, json
def userApiKey(apikey):
"""
Returns information about an apikey
apikey: Your ApiKey from FileCrypt
Attention: this API is limited to 10 requests for an timeframe of 1 hour.
"""
data={"api_key":apikey,"sub":"apikey","fn":"user"}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def userEarnings(apikey,year=None,month=None,day=None):
"""
Returns statistics from user income and the total available balance on the account
apikey: Your ApiKey from FileCrypt
year(optional): year (YYYY) (2019)
month(optional): month (MM) (06)
day(optional): month (DD) (13)
"""
data={"api_key":apikey,"sub":"earnings","fn":"user"}
if year != None:
data["year"] = str(year)
if month != None:
data["month"] = str(month)
if day != None:
data["day"] = str(day)
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def containerCreate(apikey,name,mirrors,password=None,captcha=None,allow_cnl=None,allow_dlc=None,allow_links=None,groupid=None):
"""
This function allows you to create an filecrypt.cc protected folder.
apikey: Your ApiKey from FileCrypt
name: name of your folder
mirrors: mirrors in a tripple listed list
example: [[[mirror_0_link1,mirror_0_link2],[mirror_0_backup_link1,mirror_0_backup_link2]],[[mirror_1_link1,mirror_1_link2],[mirror_1_backup_link1,mirror_1_backup_link2]]]
prettyprinted example:
[
[
[
"mirror_0_link1",
"mirror_0_link2"
],
[
"mirror_0_backup_link1",
"mirror_0_backup_link2"
]
],
[
[
"mirror_1_link1",
"mirror_1_link2"
],
[
"mirror_1_backup_link1",
"mirror_1_backup_link2"
]
]
]
all strings in the first and second list (where the other lists should be) will be skipped
password(optional): password of your folder
captcha(optional): enable captcha? Allowed Values: 0,1
allow_cnl(optional): enable cnl? Allowed Values: 0,1
allow_dlc(optional): enable dlc? Allowed Values: 0,1
allow_links(optional): enable links? Allowed Values: 0,1
groupid: group ID of your target group
"""
data={"api_key":apikey,"sub":"createV2","fn":"containerV2","name":name}
for i in range(len(mirrors)):
if isinstance(mirrors[i],str):
continue
for j in range(len(mirrors[i])):
if isinstance(mirrors[i][j],str):
continue
for k in range(len(mirrors[i][j])):
data["mirror_"+str(i+1)+"["+str(j)+"]["+str(k)+"]"] = mirrors[i][j][k]
if password != None:
data["folderpass"] = password
if captcha != None:
data["captcha"] = str(captcha)
if allow_cnl != None:
data["allow_cnl"] = str(allow_cnl)
if allow_dlc != None:
data["allow_dlc"] = str(allow_dlc)
if allow_links != None:
data["allow_links"] = str(allow_links)
if groupid != None:
data["group"] = str(groupid)
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def containerEdit(apikey,mirrors,container_id,name=None,password=None,captcha=None,allow_cnl=None,allow_dlc=None,allow_links=None,groupid=None):
"""
This function allows you to edit an filecrypt.cc protected folder.
Once you submit mirror_1 all links are permanently removed from this folder and will be replaced with the once submitted.
if you would like to replace just a mirror please use our info api to get informations about links inside this folder to build your request.
For informations on statusimages please visit https://filecrypt.cc/docs/index.htm#api-General-Statusimages
apikey: Your ApiKey from FileCrypt
name(optional): name of your folder
container_id: the container_id as string
mirrors: same as containerCreate()
password(optional): password of your folder
captcha(optional): enable captcha? Allowed Values: 0,1
allow_cnl(optional): enable cnl? Allowed Values: 0,1
allow_dlc(optional): enable dlc? Allowed Values: 0,1
allow_links(optional): enable links? Allowed Values: 0,1
groupid: group ID of your target group
"""
data={"api_key":apikey,"sub":"editV2","fn":"containerV2","name":name,"container_id":container_id}
for i in range(len(mirrors)):
if isinstance(mirrors[i],str):
continue
for j in range(len(mirrors[i])):
if isinstance(mirrors[i][j],str):
continue
for k in range(len(mirrors[i][j])):
data["mirror_"+str(i+1)+"["+str(j)+"]["+str(k)+"]"] = mirrors[i][j][k]
if name != None:
data["name"] = name
if password != None:
data["folderpass"] = password
if captcha != None:
data["captcha"] = str(captcha)
if allow_cnl != None:
data["allow_cnl"] = str(allow_cnl)
if allow_dlc != None:
data["allow_dlc"] = str(allow_dlc)
if allow_links != None:
data["allow_links"] = str(allow_links)
if groupid != None:
data["group"] = str(groupid)
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def containerInfo(apikey,container_id):
"""
Returns an sorted object containing every link in your folder.
apikey: Your ApiKey from FileCrypt
container_id: the container_id as string
"""
data={"api_key":apikey,"sub":"info","fn":"containerV2","container_id":container_id}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def containerList(apikey,state=None,fav=None):
"""
Returns all Containers from your FileCrypt Account
apikey: Your ApiKey from FileCrypt
state(optional): filter by state of your folders. Allowed values: "unchecked", "ok", "uncheckable", "error", "offline", "partial"
fav(optional): filter on favorite folders 1 = favorite, 0 = regular folder
"""
data={"api_key":apikey,"sub":"listV2","fn":"containerV2"}
if state != None:
data["state"] = state
if fav != None:
data["fav"] = str(fav)
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def containerMyFolder(apikey,state=None,groupid=None):
"""
returns a short list of your own folders filtered by state (if passed as parameter).
Please note every Object child of the container-Node starts with a trailing _.
apikey: Your ApiKey from FileCrypt
state(optional): filter by state of your folders. Allowed values: "unchecked", "ok", "uncheckable", "error", "offline", "partial"
groupid(optional): filter for specified group
"""
data={"api_key":apikey,"fn":"containerV2","sub":"myfolder"}
if state != None:
data["state"] = state
if groupid != None:
data["group"] = str(groupid)
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def containerRemove(apikey,container_id):
"""
*Move* folder from public to trashbin the folder will not be public available.
apikey: Your ApiKey from FileCrypt
container_id: the container_id as string
"""
data={"api_key":apikey,"fn":"containerV2","sub":"remove","container_id":container_id}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def containerStatus(apikey,container_id):
"""
Get status of an filecrypt.cc folder: https://filecrypt.cc/docs/index.htm#api-General-Statusimages
apikey: Your ApiKey from FileCrypt
container_id: the container_id as string
"""
data={"api_key":apikey,"fn":"containerV2","sub":"statusV2","container_id":container_id}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def groupAdd(apikey,name):
"""
Creates a new Group with the chosen Name
apikey: Your ApiKey from FileCrypt
name: The name of your new group
"""
data={"api_key":apikey,"fn":"group","sub":"add","name":name,"parent":"0"}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def groupList(apikey):
"""
Returns all Groups existing in your FileCrypt Account
apikey: Your ApiKey from FileCrypt
"""
data={"api_key":apikey,"fn":"group","sub":"list"}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def groupMove(apikey,groupid,container_id):
"""
Move your filecrypt.cc containers to a specified group
apikey: Your ApiKey from FileCrypt
groupid: the group ID(!) you want the containers to be in
container_id: you can input a single Container via String or input a whole list with all containers.
"""
data={"api_key":apikey,"fn":"group","sub":"move","group":str(groupid)}
if isinstance(container_id,str):
data["container_id[0]"] = container_id
elif isinstance(container_id,list):
for i in range(len(container_id)):
data["container_id["+str(i)+"]"]=container_id[i]
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text)
def groupRemove(apikey,groupid):
"""
Removes the group and moves all containers to group 0 (ungrouped)
apikey: Your ApiKey from FileCrypt
groupid: the group ID(!) you want to delete
"""
data={"api_key":apikey,"fn":"group","sub":"remove","id":str(groupid)}
return json.loads(requests.post("https://filecrypt.cc/api.php",data=data).text) | 0 | 0 | 0 |
222bffa1cd27cddad2aa20e2e4bec46c065c5610 | 363 | py | Python | pintest.py | PHSCRC/NotHotBot | 651671d5df263fe82debc264c4e74b598ed15252 | [
"MIT"
] | 1 | 2018-03-26T18:24:54.000Z | 2018-03-26T18:24:54.000Z | pintest.py | PHSCRC/NotHotBot | 651671d5df263fe82debc264c4e74b598ed15252 | [
"MIT"
] | null | null | null | pintest.py | PHSCRC/NotHotBot | 651671d5df263fe82debc264c4e74b598ed15252 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import RPi.GPIO as GPIO
import sys
import time
if __name__ == '__main__':
main(int(sys.argv[1]))
| 18.15 | 60 | 0.658402 | #!/usr/bin/env python3
import RPi.GPIO as GPIO
import sys
import time
def callback(pin):
out = GPIO.input(pin)
print(pin, out)
def main(pin):
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin, GPIO.IN)
GPIO.add_event_detect(pin, GPIO.BOTH, callback=callback)
while True:
time.sleep(1)
if __name__ == '__main__':
main(int(sys.argv[1]))
| 191 | 0 | 46 |
217a23271cf0e977d97d4f30363fd22f97b7adfe | 1,864 | py | Python | catalog/pub/redisco/__init__.py | onap/archive-vfc-nfvo-catalog | 24b92a2210c2063935d313f08e1da1e9cee45f3f | [
"Apache-2.0"
] | 4 | 2018-08-29T02:51:38.000Z | 2021-11-16T11:36:11.000Z | catalog/pub/redisco/__init__.py | onap/archive-vfc-nfvo-catalog | 24b92a2210c2063935d313f08e1da1e9cee45f3f | [
"Apache-2.0"
] | null | null | null | catalog/pub/redisco/__init__.py | onap/archive-vfc-nfvo-catalog | 24b92a2210c2063935d313f08e1da1e9cee45f3f | [
"Apache-2.0"
] | 1 | 2019-05-12T08:21:19.000Z | 2019-05-12T08:21:19.000Z | # Copyright (c) 2010 Tim Medina
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# The original code link is https://github.com/iamteem/redisco/tree/master/redisco/__init__.py
import redis
client = Client()
connection = client.redis()
__all__ = ['connection_setup', 'get_client']
| 31.59322 | 95 | 0.724785 | # Copyright (c) 2010 Tim Medina
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# The original code link is https://github.com/iamteem/redisco/tree/master/redisco/__init__.py
import redis
class Client(object):
def __init__(self, **kwargs):
self.connection_settings = kwargs or {'host': 'localhost', 'port': 6379, 'db': 0}
def redis(self):
return redis.Redis(**self.connection_settings)
def update(self, d):
self.connection_settings.update(d)
def connection_setup(**kwargs):
global connection, client
if client:
client.update(kwargs)
else:
client = Client(**kwargs)
connection = client.redis()
def get_client():
global connection
return connection
client = Client()
connection = client.redis()
__all__ = ['connection_setup', 'get_client']
| 391 | 0 | 149 |
5ed524bfc8f2e68a9f52c920340176e1a1192143 | 309 | py | Python | app/models.py | sundaming/flask_pure | 940dcb8eaeff43bb93445bec436542c8fdd28d27 | [
"MIT"
] | 143 | 2015-05-12T05:09:22.000Z | 2022-01-24T15:46:36.000Z | app/models.py | sundaming/flask_pure | 940dcb8eaeff43bb93445bec436542c8fdd28d27 | [
"MIT"
] | 1 | 2016-06-03T13:59:50.000Z | 2016-12-02T03:15:57.000Z | app/models.py | sundaming/flask_pure | 940dcb8eaeff43bb93445bec436542c8fdd28d27 | [
"MIT"
] | 73 | 2015-07-02T05:07:43.000Z | 2020-10-08T07:18:48.000Z | from app import db
import datetime
| 30.9 | 60 | 0.734628 | from app import db
import datetime
class Post(db.Document):
author = db.StringField(max_length=50)
title = db.StringField(max_length=120, required=True)
tags = db.ListField(db.StringField(max_length=30))
time = db.DateTimeField(default=datetime.datetime.now())
content = db.StringField()
| 0 | 251 | 23 |
ec66bc66f993ab1a980149e8ee9e58421317a060 | 420 | py | Python | backend/src/resource/randoms.py | myang81/Degree_Project | 4535f8a45148e3d6d061d0ddbca9cd94d995d42f | [
"MIT"
] | 1 | 2021-03-28T14:16:03.000Z | 2021-03-28T14:16:03.000Z | backend/src/resource/randoms.py | myang81/Degree_Project | 4535f8a45148e3d6d061d0ddbca9cd94d995d42f | [
"MIT"
] | null | null | null | backend/src/resource/randoms.py | myang81/Degree_Project | 4535f8a45148e3d6d061d0ddbca9cd94d995d42f | [
"MIT"
] | null | null | null |
import numpy as np
import random
import xlwt
#
# 市界的地理坐标为:北纬39”26’至41”03’,东经115”25’至 117”30’
workbook = xlwt.Workbook(encoding='utf-8')
worksheet = workbook.add_sheet("My Workbook")
print(worksheet)
a=[]
for i in range(0,5107):
x=random.uniform(115,117)
y=random.uniform(39,41)
a.append(str(
( str(y))))
print(a[0])
j=0
for i in a:
worksheet.write(j,0,i)
j=j+1
workbook.save("test.xls") | 16.153846 | 45 | 0.654762 |
import numpy as np
import random
import xlwt
#
# 市界的地理坐标为:北纬39”26’至41”03’,东经115”25’至 117”30’
workbook = xlwt.Workbook(encoding='utf-8')
worksheet = workbook.add_sheet("My Workbook")
print(worksheet)
a=[]
for i in range(0,5107):
x=random.uniform(115,117)
y=random.uniform(39,41)
a.append(str(
( str(y))))
print(a[0])
j=0
for i in a:
worksheet.write(j,0,i)
j=j+1
workbook.save("test.xls") | 0 | 0 | 0 |
3863c5d84e88b51b65a954ee08415339f58f5ec8 | 2,640 | py | Python | statsmodels/compatnp/iter_compat.py | toobaz/statsmodels | 5286dd713a809b0630232508bf9ad5104aae1980 | [
"BSD-3-Clause"
] | 2 | 2017-01-05T22:44:37.000Z | 2018-04-26T08:34:00.000Z | statsmodels/compatnp/iter_compat.py | AnaMP/statsmodels | 2d4aad9a14619ce0c84d4c7bca9dacd66b2be566 | [
"BSD-3-Clause"
] | null | null | null | statsmodels/compatnp/iter_compat.py | AnaMP/statsmodels | 2d4aad9a14619ce0c84d4c7bca9dacd66b2be566 | [
"BSD-3-Clause"
] | 1 | 2017-05-12T09:51:44.000Z | 2017-05-12T09:51:44.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 29 10:12:38 2012
Author: Josef Perktold
License: BSD-3
"""
import itertools
try:
#python 2.6, 2.7
zip_longest = itertools.izip_longest
pass
except AttributeError:
#python 3.2
try:
zip_longest = itertools.zip_longest
pass
except AttributeError:
#python 2.5
def zip_longest(*args, **kwds):
'''python 2.5 version for transposing a list of lists
adds None for lists of shorter length, may not have the same
behavior as python 2.6 izip_longest or python 3.2 zip_longest for
other cases
Parameters
----------
args : sequence of iterables
iterables that will be combined in transposed way
Returns
-------
it : iterator
iterator that generates tuples
Examples
--------
>>> lili = [['a0', 'b0', 'c0', 'd0'], ['a1', 'b1', 'c1'],
['a2', 'b2', 'c2', 'd2'], ['a3', 'b3', 'c3', 'd3'],
['a4', 'b4']]
>>> list(izip_longest(*lili))
[('a0', 'a1', 'a2', 'a3', 'a4'), ('b0', 'b1', 'b2', 'b3', 'b4'),
('c0', 'c1', 'c2', 'c3', None), ('d0', None, 'd2', 'd3', None)]
'''
# izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
fillvalue = kwds.get('fillvalue')
fillers = itertools.repeat(fillvalue)
iters = [itertools.chain(it, sentinel(), fillers) for it in args]
try:
for tup in itertools.izip(*iters):
yield tup
except IndexError:
pass
try:
from itertools import combinations
except ImportError:
#from python 2.6 documentation
| 30 | 84 | 0.481818 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 29 10:12:38 2012
Author: Josef Perktold
License: BSD-3
"""
import itertools
try:
#python 2.6, 2.7
zip_longest = itertools.izip_longest
pass
except AttributeError:
#python 3.2
try:
zip_longest = itertools.zip_longest
pass
except AttributeError:
#python 2.5
def zip_longest(*args, **kwds):
'''python 2.5 version for transposing a list of lists
adds None for lists of shorter length, may not have the same
behavior as python 2.6 izip_longest or python 3.2 zip_longest for
other cases
Parameters
----------
args : sequence of iterables
iterables that will be combined in transposed way
Returns
-------
it : iterator
iterator that generates tuples
Examples
--------
>>> lili = [['a0', 'b0', 'c0', 'd0'], ['a1', 'b1', 'c1'],
['a2', 'b2', 'c2', 'd2'], ['a3', 'b3', 'c3', 'd3'],
['a4', 'b4']]
>>> list(izip_longest(*lili))
[('a0', 'a1', 'a2', 'a3', 'a4'), ('b0', 'b1', 'b2', 'b3', 'b4'),
('c0', 'c1', 'c2', 'c3', None), ('d0', None, 'd2', 'd3', None)]
'''
# izip_longest('ABCD', 'xy', fillvalue='-') --> Ax By C- D-
fillvalue = kwds.get('fillvalue')
def sentinel(counter = ([fillvalue]*(len(args)-1)).pop):
yield counter() # yields the fillvalue, or raises IndexError
fillers = itertools.repeat(fillvalue)
iters = [itertools.chain(it, sentinel(), fillers) for it in args]
try:
for tup in itertools.izip(*iters):
yield tup
except IndexError:
pass
try:
from itertools import combinations
except ImportError:
#from python 2.6 documentation
def combinations(iterable, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
| 736 | 0 | 60 |
b0da74a3faec35ede730e6192678e7ff370057e5 | 4,119 | py | Python | tutorials/mobilenetv3_prod/Step1-5/05_test_backward.py | leiqing1/models | 33aa4e7ec09a0c210b182dec7aec8ff3bbda45e3 | [
"Apache-2.0"
] | 7,090 | 2017-04-21T00:22:09.000Z | 2022-03-31T11:52:40.000Z | tutorials/mobilenetv3_prod/Step1-5/05_test_backward.py | leiqing1/models | 33aa4e7ec09a0c210b182dec7aec8ff3bbda45e3 | [
"Apache-2.0"
] | 2,601 | 2017-04-21T02:52:04.000Z | 2022-03-09T08:43:09.000Z | tutorials/mobilenetv3_prod/Step1-5/05_test_backward.py | leiqing1/models | 33aa4e7ec09a0c210b182dec7aec8ff3bbda45e3 | [
"Apache-2.0"
] | 3,391 | 2017-04-21T00:22:19.000Z | 2022-03-30T17:53:25.000Z | import paddle
import numpy as np
import torch
import torch.optim.lr_scheduler as lr_scheduler
from reprod_log import ReprodLogger
from reprod_log import ReprodDiffHelper
from mobilenetv3_paddle.paddlevision.models import mobilenet_v3_small as mv3_small_paddle
from mobilenetv3_ref.torchvision.models import mobilenet_v3_small as mv3_small_torch
if __name__ == "__main__":
test_backward()
# load data
diff_helper = ReprodDiffHelper()
torch_info = diff_helper.load_info("./result/losses_ref.npy")
paddle_info = diff_helper.load_info("./result/losses_paddle.npy")
# compare result and produce log
diff_helper.compare_info(torch_info, paddle_info)
diff_helper.report(path="./result/log/backward_diff.log")
| 33.762295 | 89 | 0.667881 | import paddle
import numpy as np
import torch
import torch.optim.lr_scheduler as lr_scheduler
from reprod_log import ReprodLogger
from reprod_log import ReprodDiffHelper
from mobilenetv3_paddle.paddlevision.models import mobilenet_v3_small as mv3_small_paddle
from mobilenetv3_ref.torchvision.models import mobilenet_v3_small as mv3_small_torch
def train_one_epoch_paddle(inputs, labels, model, criterion, optimizer,
lr_scheduler, max_iter, reprod_logger):
for idx in range(max_iter):
image = paddle.to_tensor(inputs, dtype="float32")
target = paddle.to_tensor(labels, dtype="int64")
# import pdb; pdb.set_trace()
output = model(image)
loss = criterion(output, target)
reprod_logger.add("loss_{}".format(idx), loss.cpu().detach().numpy())
reprod_logger.add("lr_{}".format(idx), np.array(lr_scheduler.get_lr()))
optimizer.clear_grad()
loss.backward()
optimizer.step()
# lr_scheduler.step()
reprod_logger.save("./result/losses_paddle.npy")
def train_one_epoch_torch(inputs, labels, model, criterion, optimizer,
lr_scheduler, max_iter, reprod_logger):
for idx in range(max_iter):
image = torch.tensor(inputs, dtype=torch.float32).cuda()
target = torch.tensor(labels, dtype=torch.int64).cuda()
model = model.cuda()
output = model(image)
loss = criterion(output, target)
reprod_logger.add("loss_{}".format(idx), loss.cpu().detach().numpy())
reprod_logger.add("lr_{}".format(idx),
np.array(lr_scheduler.get_last_lr()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
# lr_scheduler.step()
reprod_logger.save("./result/losses_ref.npy")
def test_backward():
max_iter = 3
lr = 1e-3
momentum = 0.9
lr_gamma = 0.1
# set determinnistic flag
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
FLAGS_cudnn_deterministic = True
# load paddle model
paddle.set_device("gpu")
paddle_model = mv3_small_paddle(dropout=0.0)
paddle_model.eval()
paddle_state_dict = paddle.load("./data/mv3_small_paddle.pdparams")
paddle_model.set_dict(paddle_state_dict)
# load torch model
torch_model = mv3_small_torch(dropout=0.0)
torch_model.eval()
torch_state_dict = torch.load("./data/mobilenet_v3_small-047dcff4.pth")
torch_model.load_state_dict(torch_state_dict, strict=False)
# init loss
criterion_paddle = paddle.nn.CrossEntropyLoss()
criterion_torch = torch.nn.CrossEntropyLoss()
# init optimizer
lr_scheduler_paddle = paddle.optimizer.lr.StepDecay(
lr, step_size=max_iter // 3, gamma=lr_gamma)
opt_paddle = paddle.optimizer.Momentum(
learning_rate=lr,
momentum=momentum,
parameters=paddle_model.parameters())
opt_torch = torch.optim.SGD(torch_model.parameters(),
lr=lr,
momentum=momentum)
lr_scheduler_torch = lr_scheduler.StepLR(
opt_torch, step_size=max_iter // 3, gamma=lr_gamma)
# prepare logger & load data
reprod_logger = ReprodLogger()
inputs = np.load("./data/fake_data.npy")
labels = np.load("./data/fake_label.npy")
train_one_epoch_paddle(inputs, labels, paddle_model, criterion_paddle,
opt_paddle, lr_scheduler_paddle, max_iter,
reprod_logger)
train_one_epoch_torch(inputs, labels, torch_model, criterion_torch,
opt_torch, lr_scheduler_torch, max_iter,
reprod_logger)
if __name__ == "__main__":
test_backward()
# load data
diff_helper = ReprodDiffHelper()
torch_info = diff_helper.load_info("./result/losses_ref.npy")
paddle_info = diff_helper.load_info("./result/losses_paddle.npy")
# compare result and produce log
diff_helper.compare_info(torch_info, paddle_info)
diff_helper.report(path="./result/log/backward_diff.log")
| 3,308 | 0 | 69 |
66090fc039d6b9801a2c41c0d0b537b7e123ca6c | 4,302 | py | Python | Resaltador Paralelo/Carpeta1/Carpeta1-2/mejorracional.py | erteck/textHighlighter | 9f079d81b16acbd98a4f5f8520e43bef483d0071 | [
"MIT"
] | null | null | null | Resaltador Paralelo/Carpeta1/Carpeta1-2/mejorracional.py | erteck/textHighlighter | 9f079d81b16acbd98a4f5f8520e43bef483d0071 | [
"MIT"
] | null | null | null | Resaltador Paralelo/Carpeta1/Carpeta1-2/mejorracional.py | erteck/textHighlighter | 9f079d81b16acbd98a4f5f8520e43bef483d0071 | [
"MIT"
] | null | null | null | # ----------------------------------------------------------
#Erick Alberto Bustos Cruz A01378966
#
#
# ----------------------------------------------------------
from functools import total_ordering
import math as m
@total_ordering
class Racional:
"""Instancias de esta clase representan números
racionales con un numerador y un denominador.
"""
@property
@property
@numerador.setter
@denominador.setter | 36.769231 | 121 | 0.653649 | # ----------------------------------------------------------
#Erick Alberto Bustos Cruz A01378966
#
#
# ----------------------------------------------------------
from functools import total_ordering
import math as m
@total_ordering
class Racional:
"""Instancias de esta clase representan números
racionales con un numerador y un denominador.
"""
def __reducir(self):
GCD = m.gcd(int(self.__numerador),int(self.__denominador))
numdor = self.__numerador / GCD
denom = self.__denominador / GCD
self.__numerador = int(numdor)
self.__denominador = int(denom)
def __init__(self,numerador,denominador):
self.__numerador = int(numerador)
self.__denominador = int(denominador)
if self.__denominador < 0 and self.__numerador < 0:
self.__numerador = -numerador
self.__denominador = -denominador
elif self.__denominador < 0 and self.__numerador > 0:
self.__numerador = -numerador
self.__denominador = -denominador
Racional.__reducir(self)
@property
def numerador(self):
return self.__numerador
@property
def denominador(self):
return self.__denominador
@numerador.setter
def numerador(self,numerador):
self.__numerador = numerador
if self.__denominador < 0 and self.__numerador < 0:
self.__numerador = -numerador
#self.__denominador = -denominador
elif self.__denominador < 0 and self.__numerador > 0:
self.__numerador = - numerador
#self.__denominador = -denominador
Racional.__reducir(self)
@denominador.setter
def denominador(self,denominador):
self.__denominador = denominador
if self.__denominador < 0 and self.__numerador < 0:
#self.__numerador = -numerador
self.__denominador = -denominador
elif self.__denominador < 0 and self.__numerador > 0:
#self.__numerador = -numerador
self.__denominador = -denominador
Racional.__reducir(self)
def __str__(self):
return f'{self.__numerador}/{self.__denominador}'
def __repr__(self):
return f'Racional({self.__numerador}, {self.__denominador})'
def __eq__(self, otro):
return self.__numerador/self.__denominador == otro.__numerador/otro.__denominador
def __lt__(self, otro):
if self.__numerador/self.__denominador < otro.__numerador/otro.__denominador:
return True
if self.__numerador/self.__denominador < otro.__numerador/otro.__denominador:
return False
def __neg__(self):
numerador = - self.__numerador
denominador = self.__denominador
ans = Racional(numerador,denominador)
return ans
def inverso(self):
ans = Racional(self.__denominador,self.__numerador)
Racional.__reducir(ans)
return ans
def __add__(self, otro):
if self.__denominador == otro.__denominador:
return Racional(self.__numerador + otro.__numerador, self.__denominador)
else:
lcm = int(abs(self.__denominador*otro.__denominador) // m.gcd(self.__denominador, otro.__denominador))
numerador_nuevo = int((lcm/self.__denominador) * self.__numerador + (lcm/otro.__denominador) * otro.__numerador )
return Racional(numerador_nuevo, lcm)
def __sub__(self, otro):
if self.__denominador == otro.__denominador:
return Racional(self.__numerador - otro.__numerador, self.__denominador)
else:
lcm = int(abs(self.__denominador*otro.__denominador) // m.gcd(self.__denominador, otro.__denominador))
numerador_nuevo = int((lcm/self.__denominador) * self.__numerador - (lcm/otro.__denominador) * otro.__numerador )
return Racional(numerador_nuevo, lcm)
def __mul__(self,otro):
numerador = self.__numerador * otro.__numerador
denominador = self.__denominador * otro.__denominador
ans = Racional(numerador,denominador)
Racional.__reducir(ans)
return ans
def __truediv__(self,otro):
numerador = self.__numerador * otro.__denominador
denominador = self.__denominador * otro.__numerador
ans = Racional(numerador,denominador)
Racional.__reducir(ans)
return ans | 3,345 | 0 | 493 |
85f74b093fce2b393c1d5630945f304dc90ef82f | 13,422 | py | Python | src/neural_networks.py | HinanawiTS/ECE-143-Project | b23c85d4815943b284a4275f10f0a73f77d1c038 | [
"Apache-2.0"
] | null | null | null | src/neural_networks.py | HinanawiTS/ECE-143-Project | b23c85d4815943b284a4275f10f0a73f77d1c038 | [
"Apache-2.0"
] | null | null | null | src/neural_networks.py | HinanawiTS/ECE-143-Project | b23c85d4815943b284a4275f10f0a73f77d1c038 | [
"Apache-2.0"
] | 1 | 2021-11-05T23:10:30.000Z | 2021-11-05T23:10:30.000Z | import numpy as np
import pandas as pd
import json
from mplsoccer.pitch import Pitch, VerticalPitch
path = "C:/Users/brand/desktop/events/events_England.json"
with open(path) as f:
data = json.load(f)
train = pd.DataFrame(data)
path2 = "C:/Users/brand/desktop/players.json"
with open(path2) as f:
play = json.load(f)
players = pd.DataFrame(play)
lst = ['events_France.json','events_Germany.json','events_Italy.json','events_Spain.json']
pathway = "C:/Users/brand/desktop/events/"
for country in lst:
with open(pathway + country) as f:
datal = json.load(f)
tl = pd.DataFrame(datal)
train = pd.concat([train,tl],ignore_index=True)
#pd.unique(train['subEventName'])
shots = train[train['subEventName'] == 'Shot']
print(len(shots))
shots_model = pd.DataFrame(columns=["Goal","X","Y"], dtype=object)
for i,shot in shots.iterrows():
shots_model.at[i,'Header'] = 0
for tag in shot['tags']:
if tag['id'] == 403:
shots_model.at[i,'Header'] = 1
#take distance from center of goal at y = 50, x position of goal is always 100
shots_model.at[i,'X'] = 100-shot['positions'][0]['x']
shots_model.at[i,'Y'] = shot['positions'][0]['y']
shots_model.at[i,'C'] = abs(shot['positions'][0]['y'] - 50)
#distance in meters
x = shots_model.at[i,'X']* 105/100
y = shots_model.at[i,'C']* 65/100
shots_model.at[i,'Distance'] = np.sqrt(x**2 + y**2)
angle = np.arctan(7.32 * x / (x**2 + y**2 - (7.32/2)**2))
if angle < 0:
angle = np.pi + angle
shots_model.at[i,'Angle'] = angle
#goal check
shots_model.at[i,'Goal'] = 0
shots_model.at[i,'Counter Attack'] = 0
shots_model.at[i, 'Blocked'] = 0
shots_model.at[i, 'Right Foot'] = 0
shots_model.at[i,'wyId'] = shot['playerId']
if shot['matchPeriod'] == '1H':
shots_model.at[i, 'First Half'] = 1
else:
shots_model.at[i,'First Half'] = 0
for tags in shot['tags']:
if tags['id'] == 101:
shots_model.at[i,'Goal'] = 1
if tags['id'] == 1901:
shots_model.at[i, 'Counter Attack'] = 1
if tags['id'] == 2101:
shots_model.at[i, 'Blocked'] = 1
if tags['id'] == 402:
shots_model.at[i, 'Right Foot'] = 1
shots_model['angle_degrees'] = shots_model['Angle'] * 180 / np.pi
shots_model = shots_model.merge(players, left_on = 'wyId' , right_on = 'wyId')
for i,shot in shots_model.iterrows():
shots_model.at[i, 'strong foot'] = 0
if shot['Right Foot'] == 1:
if shot['foot'] == 'right':
shots_model.at[i, 'strong foot'] = 1
elif shot['Right Foot'] == 0:
if shot['foot'] == 'left':
shots_model.at[i, 'strong foot'] = 1
pitch = Pitch(pitch_color ='black', line_color = 'white', stripe=False,pitch_type='wyscout')
fig,ax = pitch.draw(figsize=(10,8))
df = shots_model.loc[shots_model['Goal'] == 1]
xpos = df["X"]
ypos = df["Y"]
df_nongoals = shots_model.loc[shots_model['Goal'] == 0]
xpos2 = df_nongoals["X"]
ypos2 = df_nongoals["Y"]
head = shots_model[shots_model['Header'] == 1]
counter = shots_model[shots_model['Counter Attack'] == 1]
strong = shots_model[shots_model['strong foot'] == 1]
first = shots_model[shots_model['First Half'] == 1]
head_df = head.loc[head['Goal'] == 1]
strong_goal = strong.loc[strong['Goal'] == 1]
headed_goals = len(head_df)
from sklearn.model_selection import train_test_split
X_full = shots_model[["Header","Distance", "Angle","Counter Attack","strong foot", "First Half"]]
y_full = shots_model[["Goal"]]
y_full['Goal'] = y_full['Goal'].astype(int)
print(X_full.head())
X_train,X_test,y_train,y_test = train_test_split(X_full,y_full,test_size = 0.15,random_state=2)
X_train,X_val,y_train,y_val = train_test_split(X_train,y_train,test_size = 0.176, random_state=2)
y_val = np.array(y_val.pop('Goal'))
y_test = np.array(y_test.pop('Goal'))
y_train = np.array(y_train.pop('Goal'))
X_train = X_train.to_numpy()
X_test = X_test.to_numpy()
X_val = X_val.to_numpy()
print(X_train.shape)
print(y_train.shape)
negative,positive = np.bincount(y_full['Goal'])
print(positive/(negative+positive))
import tensorflow as tf
from tensorflow import keras
import sklearn
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
train_feats = scaler.fit_transform(X_train)
val_feats = scaler.transform(X_val)
test_feats = scaler.transform(X_test)
train_feats = np.clip(train_feats,-5,5)
val_feats = np.clip(val_feats,-5,5)
test_feats = np.clip(test_feats,-5,5)
boolean_train_labels = y_train != 0
METRICS = [
keras.metrics.TruePositives(name='tp'),
keras.metrics.FalsePositives(name='fp'),
keras.metrics.TrueNegatives(name='tn'),
keras.metrics.FalseNegatives(name='fn'),
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall'),
keras.metrics.AUC(name='auc'),
keras.metrics.AUC(name='prc', curve='PR'), # precision-recall curve
]
EPOCHS = 100
BATCH_SIZE = 256
early_stopping = tf.keras.callbacks.EarlyStopping(monitor = 'val_prc',verbose=1,patience=10,mode='max',restore_best_weights=True)
model = make_model()
model.summary()
orig_bias = np.log([positive/negative])
model = make_model(output_bias=orig_bias)
import os
import tempfile
initial_weights = os.path.join(tempfile.mkdtemp(), 'initial_weights')
model.save_weights(initial_weights)
model = make_model()
model.load_weights(initial_weights)
careful_bias_history = model.fit(
train_feats,
y_train,
batch_size=BATCH_SIZE,
epochs=100,
validation_data=(val_feats, y_val),
verbose=0)
import matplotlib.pyplot as plt
plot_loss(careful_bias_history,"Bias")
model = make_model()
model.load_weights(initial_weights)
baseline_history = model.fit(
train_feats,
y_train,
batch_size=256,
epochs=100,
callbacks=[early_stopping],
validation_data=(val_feats, y_val))
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
plot_metrics(baseline_history)
train_predictions_baseline = model.predict(train_feats)
test_predictions_baseline = model.predict(test_feats)
test_preds = []
test_probs = []
for i in test_predictions_baseline:
if i > 0.5:
test_preds.append(1)
test_probs.append(i)
else:
test_preds.append(0)
test_probs.append(i)
TP = sum([(p and l) for (p,l) in zip(test_preds, y_test)])
FP = sum([(p and not l) for (p,l) in zip(test_preds, y_test)])
TN = sum([(not p and not l) for (p,l) in zip(test_preds, y_test)])
FN = sum([(not p and l) for (p,l) in zip(test_preds, y_test)])
TPR = TP/(TP+FN)
TNR = TN/(TN+FP)
BER = 1 - .5 * (TPR + TNR)
precision = TP/(TP+FP)
recall = TP/(TP + FN)
F1 = 2 * (precision*recall)/(precision + recall)
accuracy = (TP + TN)/(TP + FP + TN + FN)
print(BER)
print(precision)
print(recall)
print(F1)
print(accuracy)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from yellowbrick.regressor import ResidualsPlot
Y = []
X = []
count1 = 0
count2 = 0
for i in range(len(test_probs)):
count1 += test_probs[i]
count2 += y_test[i]
X.append(count1)
Y.append(count2)
count1 = 0
count2 = 0
print(Y[0:10])
X = np.array(X)
X = X.reshape(-1,1)
lin = LinearRegression().fit(X,Y)
print(lin.score(X,Y))
lpreds = lin.predict(X)
print('Coeffs')
print(lin.coef_)
print(mean_squared_error(Y,lpreds))
print(r2_score(Y,lpreds))
visualizer = ResidualsPlot(lin)
visualizer.fit(X,Y)
visualizer.score(X,lpreds)
visualizer.show()
from sklearn.metrics import roc_curve, roc_auc_score
r_probs = [0 for _ in range(len(y_test))]
r_auc = roc_auc_score(y_test,r_probs)
nn_auc = roc_auc_score(y_test,test_probs)
r_fpr,r_tpr,_ = roc_curve(y_test,r_probs)
nn_fpr,nn_tpr,_ = roc_curve(y_test,test_probs)
plt.plot(r_fpr,r_tpr, label = 'Guess 0 Always',color='black')
plt.plot(nn_fpr,nn_tpr, label = 'Baseline NN',color ='b')
plt.title('ROC Plot Baseline Neural Network')
plt.xlabel('FPR %')
plt.ylabel('TPR %')
plt.legend()
plt.show()
print(nn_auc)
weight_0 = (1/negative) * (positive+negative/2.0)
weight_1 = (1/positive) * (positive+negative/2.0)
class_weight = {0: weight_0, 1: weight_1}
print(weight_0)
print(weight_1)
weighted_model = make_model()
weighted_model.load_weights(initial_weights)
weighted_history = weighted_model.fit(
train_feats,
y_train,
batch_size=256,
epochs=100,
callbacks=[early_stopping],
validation_data=(val_feats, y_val),
class_weight=class_weight)
train_preds_weighted = weighted_model.predict(train_feats)
test_preds_weighted = weighted_model.predict(test_feats)
test_predictions_weighted = []
test_probs_weighted = []
for i in test_preds_weighted:
if i > 0.5:
test_predictions_weighted.append(1)
test_probs_weighted.append(i)
else:
test_predictions_weighted.append(0)
test_probs_weighted.append(i)
TP = sum([(p and l) for (p,l) in zip(test_predictions_weighted, y_test)])
FP = sum([(p and not l) for (p,l) in zip(test_predictions_weighted, y_test)])
TN = sum([(not p and not l) for (p,l) in zip(test_predictions_weighted, y_test)])
FN = sum([(not p and l) for (p,l) in zip(test_predictions_weighted, y_test)])
TPR = TP/(TP+FN)
TNR = TN/(TN+FP)
BER = 1 - .5 * (TPR + TNR)
precision = TP/(TP+FP)
recall = TP/(TP + FN)
F1 = 2 * (precision*recall)/(precision + recall)
accuracy = (TP + TN)/(TP + FP + TN + FN)
print("Balanced Error rate: " + str(BER))
print("Precision: " + str(precision))
print('Recall: ' + str(recall))
print('F1 score: ' + str(F1))
print('Total Classification Accuracy: ' + str(accuracy))
r_probs = [0 for _ in range(len(y_test))]
r_auc = roc_auc_score(y_test,r_probs)
nn_auc = roc_auc_score(y_test,test_probs_weighted)
r_fpr,r_tpr,_ = roc_curve(y_test,r_probs)
nn_fpr,nn_tpr,_ = roc_curve(y_test,test_probs_weighted)
plt.plot(r_fpr,r_tpr, label = 'BER = 0',color='black')
plt.plot(nn_fpr,nn_tpr, label = 'Weighted Class NN',color='b')
plt.title('ROC Plot Weighte Classes Neural Network')
plt.xlabel('FPR %')
plt.ylabel('TPR %')
plt.legend()
plt.show()
print(nn_auc)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from yellowbrick.regressor import ResidualsPlot
Y = []
X = []
count1 = 0
count2 = 0
for i in range(len(test_probs_weighted)):
count1 += test_probs_weighted[i]
count2 += y_test[i]
X.append(count1)
Y.append(count2)
count1 = 0
count2 = 0
print(Y[0:10])
X = np.array(X)
X = X.reshape(-1,1)
lin = LinearRegression().fit(X,Y)
print(lin.score(X,Y))
lpreds = lin.predict(X)
print('Coeffs')
print(lin.coef_)
print(mean_squared_error(Y,lpreds))
print(r2_score(Y,lpreds))
visualizer = ResidualsPlot(lin)
visualizer.fit(X,Y)
visualizer.score(X,lpreds)
visualizer.show()
| 24.855556 | 130 | 0.629116 | import numpy as np
import pandas as pd
import json
from mplsoccer.pitch import Pitch, VerticalPitch
path = "C:/Users/brand/desktop/events/events_England.json"
with open(path) as f:
data = json.load(f)
train = pd.DataFrame(data)
path2 = "C:/Users/brand/desktop/players.json"
with open(path2) as f:
play = json.load(f)
players = pd.DataFrame(play)
lst = ['events_France.json','events_Germany.json','events_Italy.json','events_Spain.json']
pathway = "C:/Users/brand/desktop/events/"
for country in lst:
with open(pathway + country) as f:
datal = json.load(f)
tl = pd.DataFrame(datal)
train = pd.concat([train,tl],ignore_index=True)
#pd.unique(train['subEventName'])
shots = train[train['subEventName'] == 'Shot']
print(len(shots))
shots_model = pd.DataFrame(columns=["Goal","X","Y"], dtype=object)
for i,shot in shots.iterrows():
shots_model.at[i,'Header'] = 0
for tag in shot['tags']:
if tag['id'] == 403:
shots_model.at[i,'Header'] = 1
#take distance from center of goal at y = 50, x position of goal is always 100
shots_model.at[i,'X'] = 100-shot['positions'][0]['x']
shots_model.at[i,'Y'] = shot['positions'][0]['y']
shots_model.at[i,'C'] = abs(shot['positions'][0]['y'] - 50)
#distance in meters
x = shots_model.at[i,'X']* 105/100
y = shots_model.at[i,'C']* 65/100
shots_model.at[i,'Distance'] = np.sqrt(x**2 + y**2)
angle = np.arctan(7.32 * x / (x**2 + y**2 - (7.32/2)**2))
if angle < 0:
angle = np.pi + angle
shots_model.at[i,'Angle'] = angle
#goal check
shots_model.at[i,'Goal'] = 0
shots_model.at[i,'Counter Attack'] = 0
shots_model.at[i, 'Blocked'] = 0
shots_model.at[i, 'Right Foot'] = 0
shots_model.at[i,'wyId'] = shot['playerId']
if shot['matchPeriod'] == '1H':
shots_model.at[i, 'First Half'] = 1
else:
shots_model.at[i,'First Half'] = 0
for tags in shot['tags']:
if tags['id'] == 101:
shots_model.at[i,'Goal'] = 1
if tags['id'] == 1901:
shots_model.at[i, 'Counter Attack'] = 1
if tags['id'] == 2101:
shots_model.at[i, 'Blocked'] = 1
if tags['id'] == 402:
shots_model.at[i, 'Right Foot'] = 1
shots_model['angle_degrees'] = shots_model['Angle'] * 180 / np.pi
shots_model = shots_model.merge(players, left_on = 'wyId' , right_on = 'wyId')
for i,shot in shots_model.iterrows():
shots_model.at[i, 'strong foot'] = 0
if shot['Right Foot'] == 1:
if shot['foot'] == 'right':
shots_model.at[i, 'strong foot'] = 1
elif shot['Right Foot'] == 0:
if shot['foot'] == 'left':
shots_model.at[i, 'strong foot'] = 1
pitch = Pitch(pitch_color ='black', line_color = 'white', stripe=False,pitch_type='wyscout')
fig,ax = pitch.draw(figsize=(10,8))
df = shots_model.loc[shots_model['Goal'] == 1]
xpos = df["X"]
ypos = df["Y"]
df_nongoals = shots_model.loc[shots_model['Goal'] == 0]
xpos2 = df_nongoals["X"]
ypos2 = df_nongoals["Y"]
head = shots_model[shots_model['Header'] == 1]
counter = shots_model[shots_model['Counter Attack'] == 1]
strong = shots_model[shots_model['strong foot'] == 1]
first = shots_model[shots_model['First Half'] == 1]
head_df = head.loc[head['Goal'] == 1]
strong_goal = strong.loc[strong['Goal'] == 1]
headed_goals = len(head_df)
from sklearn.model_selection import train_test_split
X_full = shots_model[["Header","Distance", "Angle","Counter Attack","strong foot", "First Half"]]
y_full = shots_model[["Goal"]]
y_full['Goal'] = y_full['Goal'].astype(int)
print(X_full.head())
X_train,X_test,y_train,y_test = train_test_split(X_full,y_full,test_size = 0.15,random_state=2)
X_train,X_val,y_train,y_val = train_test_split(X_train,y_train,test_size = 0.176, random_state=2)
y_val = np.array(y_val.pop('Goal'))
y_test = np.array(y_test.pop('Goal'))
y_train = np.array(y_train.pop('Goal'))
X_train = X_train.to_numpy()
X_test = X_test.to_numpy()
X_val = X_val.to_numpy()
print(X_train.shape)
print(y_train.shape)
negative,positive = np.bincount(y_full['Goal'])
print(positive/(negative+positive))
import tensorflow as tf
from tensorflow import keras
import sklearn
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
train_feats = scaler.fit_transform(X_train)
val_feats = scaler.transform(X_val)
test_feats = scaler.transform(X_test)
train_feats = np.clip(train_feats,-5,5)
val_feats = np.clip(val_feats,-5,5)
test_feats = np.clip(test_feats,-5,5)
boolean_train_labels = y_train != 0
METRICS = [
keras.metrics.TruePositives(name='tp'),
keras.metrics.FalsePositives(name='fp'),
keras.metrics.TrueNegatives(name='tn'),
keras.metrics.FalseNegatives(name='fn'),
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall'),
keras.metrics.AUC(name='auc'),
keras.metrics.AUC(name='prc', curve='PR'), # precision-recall curve
]
def make_model(metrics=METRICS, output_bias=None):
if output_bias is not None:
output_bias = tf.keras.initializers.Constant(output_bias)
model = keras.Sequential([
keras.layers.Dense(
10, activation='relu',
input_shape=(train_feats.shape[-1],)),
keras.layers.Dense(
10, activation='relu',
input_shape=(train_feats.shape[-1],)),keras.layers.Dropout(0.5),
keras.layers.Dense(1, activation='sigmoid',bias_initializer=output_bias),
])
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=0.001),
loss=keras.losses.BinaryCrossentropy(),
metrics=metrics)
return model
EPOCHS = 100
BATCH_SIZE = 256
early_stopping = tf.keras.callbacks.EarlyStopping(monitor = 'val_prc',verbose=1,patience=10,mode='max',restore_best_weights=True)
model = make_model()
model.summary()
orig_bias = np.log([positive/negative])
model = make_model(output_bias=orig_bias)
import os
import tempfile
initial_weights = os.path.join(tempfile.mkdtemp(), 'initial_weights')
model.save_weights(initial_weights)
model = make_model()
model.load_weights(initial_weights)
careful_bias_history = model.fit(
train_feats,
y_train,
batch_size=BATCH_SIZE,
epochs=100,
validation_data=(val_feats, y_val),
verbose=0)
import matplotlib.pyplot as plt
def plot_loss(history, label):
# Use a log scale on y-axis to show the wide range of values.
plt.semilogy(history.epoch, history.history['loss'],
'b', label='Train ' + label)
plt.semilogy(history.epoch, history.history['val_loss'],
color='r', label='Val ' + label,
linestyle="--")
plt.xlabel('Epoch')
plt.ylabel('Loss')
plot_loss(careful_bias_history,"Bias")
model = make_model()
model.load_weights(initial_weights)
baseline_history = model.fit(
train_feats,
y_train,
batch_size=256,
epochs=100,
callbacks=[early_stopping],
validation_data=(val_feats, y_val))
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
def plot_metrics(history):
metrics = ['loss', 'prc', 'precision', 'recall']
for n, metric in enumerate(metrics):
name = metric.replace("_"," ").capitalize()
plt.subplot(2,2,n+1)
plt.plot(history.epoch, history.history[metric], color=colors[0], label='Train')
plt.plot(history.epoch, history.history['val_'+metric],
color=colors[1], label='Val')
plt.xlabel('Epoch')
plt.ylabel(name)
if metric == 'loss':
plt.ylim([0, plt.ylim()[1]])
elif metric == 'auc':
plt.ylim([0.8,1])
else:
plt.ylim([0,1])
plt.legend()
plot_metrics(baseline_history)
train_predictions_baseline = model.predict(train_feats)
test_predictions_baseline = model.predict(test_feats)
test_preds = []
test_probs = []
for i in test_predictions_baseline:
if i > 0.5:
test_preds.append(1)
test_probs.append(i)
else:
test_preds.append(0)
test_probs.append(i)
TP = sum([(p and l) for (p,l) in zip(test_preds, y_test)])
FP = sum([(p and not l) for (p,l) in zip(test_preds, y_test)])
TN = sum([(not p and not l) for (p,l) in zip(test_preds, y_test)])
FN = sum([(not p and l) for (p,l) in zip(test_preds, y_test)])
TPR = TP/(TP+FN)
TNR = TN/(TN+FP)
BER = 1 - .5 * (TPR + TNR)
precision = TP/(TP+FP)
recall = TP/(TP + FN)
F1 = 2 * (precision*recall)/(precision + recall)
accuracy = (TP + TN)/(TP + FP + TN + FN)
print(BER)
print(precision)
print(recall)
print(F1)
print(accuracy)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from yellowbrick.regressor import ResidualsPlot
Y = []
X = []
count1 = 0
count2 = 0
for i in range(len(test_probs)):
count1 += test_probs[i]
count2 += y_test[i]
X.append(count1)
Y.append(count2)
count1 = 0
count2 = 0
print(Y[0:10])
X = np.array(X)
X = X.reshape(-1,1)
lin = LinearRegression().fit(X,Y)
print(lin.score(X,Y))
lpreds = lin.predict(X)
print('Coeffs')
print(lin.coef_)
print(mean_squared_error(Y,lpreds))
print(r2_score(Y,lpreds))
visualizer = ResidualsPlot(lin)
visualizer.fit(X,Y)
visualizer.score(X,lpreds)
visualizer.show()
from sklearn.metrics import roc_curve, roc_auc_score
r_probs = [0 for _ in range(len(y_test))]
r_auc = roc_auc_score(y_test,r_probs)
nn_auc = roc_auc_score(y_test,test_probs)
r_fpr,r_tpr,_ = roc_curve(y_test,r_probs)
nn_fpr,nn_tpr,_ = roc_curve(y_test,test_probs)
plt.plot(r_fpr,r_tpr, label = 'Guess 0 Always',color='black')
plt.plot(nn_fpr,nn_tpr, label = 'Baseline NN',color ='b')
plt.title('ROC Plot Baseline Neural Network')
plt.xlabel('FPR %')
plt.ylabel('TPR %')
plt.legend()
plt.show()
print(nn_auc)
weight_0 = (1/negative) * (positive+negative/2.0)
weight_1 = (1/positive) * (positive+negative/2.0)
class_weight = {0: weight_0, 1: weight_1}
print(weight_0)
print(weight_1)
weighted_model = make_model()
weighted_model.load_weights(initial_weights)
weighted_history = weighted_model.fit(
train_feats,
y_train,
batch_size=256,
epochs=100,
callbacks=[early_stopping],
validation_data=(val_feats, y_val),
class_weight=class_weight)
train_preds_weighted = weighted_model.predict(train_feats)
test_preds_weighted = weighted_model.predict(test_feats)
test_predictions_weighted = []
test_probs_weighted = []
for i in test_preds_weighted:
if i > 0.5:
test_predictions_weighted.append(1)
test_probs_weighted.append(i)
else:
test_predictions_weighted.append(0)
test_probs_weighted.append(i)
TP = sum([(p and l) for (p,l) in zip(test_predictions_weighted, y_test)])
FP = sum([(p and not l) for (p,l) in zip(test_predictions_weighted, y_test)])
TN = sum([(not p and not l) for (p,l) in zip(test_predictions_weighted, y_test)])
FN = sum([(not p and l) for (p,l) in zip(test_predictions_weighted, y_test)])
TPR = TP/(TP+FN)
TNR = TN/(TN+FP)
BER = 1 - .5 * (TPR + TNR)
precision = TP/(TP+FP)
recall = TP/(TP + FN)
F1 = 2 * (precision*recall)/(precision + recall)
accuracy = (TP + TN)/(TP + FP + TN + FN)
print("Balanced Error rate: " + str(BER))
print("Precision: " + str(precision))
print('Recall: ' + str(recall))
print('F1 score: ' + str(F1))
print('Total Classification Accuracy: ' + str(accuracy))
r_probs = [0 for _ in range(len(y_test))]
r_auc = roc_auc_score(y_test,r_probs)
nn_auc = roc_auc_score(y_test,test_probs_weighted)
r_fpr,r_tpr,_ = roc_curve(y_test,r_probs)
nn_fpr,nn_tpr,_ = roc_curve(y_test,test_probs_weighted)
plt.plot(r_fpr,r_tpr, label = 'BER = 0',color='black')
plt.plot(nn_fpr,nn_tpr, label = 'Weighted Class NN',color='b')
plt.title('ROC Plot Weighte Classes Neural Network')
plt.xlabel('FPR %')
plt.ylabel('TPR %')
plt.legend()
plt.show()
print(nn_auc)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from yellowbrick.regressor import ResidualsPlot
Y = []
X = []
count1 = 0
count2 = 0
for i in range(len(test_probs_weighted)):
count1 += test_probs_weighted[i]
count2 += y_test[i]
X.append(count1)
Y.append(count2)
count1 = 0
count2 = 0
print(Y[0:10])
X = np.array(X)
X = X.reshape(-1,1)
lin = LinearRegression().fit(X,Y)
print(lin.score(X,Y))
lpreds = lin.predict(X)
print('Coeffs')
print(lin.coef_)
print(mean_squared_error(Y,lpreds))
print(r2_score(Y,lpreds))
visualizer = ResidualsPlot(lin)
visualizer.fit(X,Y)
visualizer.score(X,lpreds)
visualizer.show()
| 1,709 | 0 | 75 |
e78e172f5ecee98c80457e5fc00170c7e563ca2f | 4,202 | py | Python | bce/parser/interface/molecule_parser.py | bce-toolkit/BCE | 9e4d168cab18132bbe2867d13c629510b86d350a | [
"BSD-3-Clause"
] | 12 | 2017-12-18T10:37:52.000Z | 2021-11-28T07:08:46.000Z | bce/parser/interface/molecule_parser.py | bce-toolkit/BCE | 9e4d168cab18132bbe2867d13c629510b86d350a | [
"BSD-3-Clause"
] | null | null | null | bce/parser/interface/molecule_parser.py | bce-toolkit/BCE | 9e4d168cab18132bbe2867d13c629510b86d350a | [
"BSD-3-Clause"
] | 4 | 2018-04-29T10:32:26.000Z | 2021-07-13T08:17:36.000Z | #!/usr/bin/env python
#
# Copyright 2014 - 2018 The BCE Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the license.txt file.
#
import bce.parser.interface.printer as _interface_printer
class SubstituteError(Exception):
"""Molecule substitution error."""
pass
class MoleculeParserInterface:
"""Interface for math expression parsers."""
def __init__(self):
"""Initialize."""
pass
# noinspection PyMethodMayBeStatic
def parse_expression(
self,
expression,
options,
mexp_protected_header_enabled=False,
mexp_protected_header_prefix="X"
):
"""Parse an expression.
:type expression: str
:type options: bce.option.Option
:type mexp_protected_header_enabled: bool
:type mexp_protected_header_prefix: str
:param expression: The expression.
:param options: The options.
:param mexp_protected_header_enabled: Whether the MEXP protected headers are enabled.
:param mexp_protected_header_prefix: The prefix of the MEXP protected headers.
:rtype : bce.parser.ast.molecule.ASTNodeMolecule | bce.parser.ast.molecule.ASTNodeHydrateGroup
:return: The root node of the AST.
"""
raise RuntimeError("parse_expression() method should be overrided.")
# noinspection PyMethodMayBeStatic
def parse_ast(
self,
expression,
ast_root,
option,
mexp_protected_header_enabled=False,
mexp_protected_header_prefix="X"
):
"""Parse an AST.
:type expression: str
:type ast_root: bce.parser.ast.molecule.ASTNodeMolecule | bce.parser.ast.molecule.ASTNodeHydrateGroup
:type option: bce.option.Option
:type mexp_protected_header_enabled: bool
:type mexp_protected_header_prefix: str
:param expression: The expression.
:param ast_root: The root node of the AST.
:param option: The options.
:param mexp_protected_header_enabled: Whether the MEXP protected headers are enabled.
:param mexp_protected_header_prefix: The prefix of the MEXP protected headers.
:rtype : dict
:return: The parsed element dictionary.
"""
raise RuntimeError("parse_ast() method should be overrided.")
# noinspection PyMethodMayBeStatic
def substitute(self, ast_root, substitute_map=None):
"""Substitute a ast_root.
:type ast_root: bce.parser.ast.molecule.ASTNodeMolecule | bce.parser.ast.molecule.ASTNodeHydrateGroup
:type substitute_map: dict | None
:param ast_root: The ast_root.
:param substitute_map: The substitution map.
:rtype : bce.parser.ast.molecule.ASTNodeMolecule | bce.parser.ast.molecule.ASTNodeHydrateGroup | None
:return: The substituted AST root node.
"""
raise RuntimeError("substitute() method should be overrided.")
# noinspection PyMethodMayBeStatic
def print_out(
self,
ast_root,
mexp_parser,
mexp_protected_header_enabled=False,
mexp_protected_header_prefix="X",
printer_type=_interface_printer.PRINTER_TYPE_TEXT
):
"""Print a molecule.
:type ast_root: bce.parser.ast.molecule.ASTNodeMolecule | bce.parser.ast.molecule.ASTNodeHydrateGroup
:type mexp_parser: bce.parser.interface.mexp_parser.MathExpressionParserInterface
:type mexp_protected_header_enabled: bool
:type mexp_protected_header_prefix: str
:type printer_type: int
:param ast_root: The AST root.
:param printer_type: The printer type.
:param mexp_parser: The math expression parser.
:param mexp_protected_header_enabled: Whether the MEXP protected headers are enabled.
:param mexp_protected_header_prefix: The prefix of the MEXP protected headers.
:rtype : str | bce.dom.mathml.all.Base
:return: The printed string or MathML object.
"""
raise RuntimeError("print_out() method should be overrided.")
| 36.224138 | 109 | 0.673489 | #!/usr/bin/env python
#
# Copyright 2014 - 2018 The BCE Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the license.txt file.
#
import bce.parser.interface.printer as _interface_printer
class SubstituteError(Exception):
"""Molecule substitution error."""
pass
class MoleculeParserInterface:
"""Interface for math expression parsers."""
def __init__(self):
"""Initialize."""
pass
# noinspection PyMethodMayBeStatic
def parse_expression(
self,
expression,
options,
mexp_protected_header_enabled=False,
mexp_protected_header_prefix="X"
):
"""Parse an expression.
:type expression: str
:type options: bce.option.Option
:type mexp_protected_header_enabled: bool
:type mexp_protected_header_prefix: str
:param expression: The expression.
:param options: The options.
:param mexp_protected_header_enabled: Whether the MEXP protected headers are enabled.
:param mexp_protected_header_prefix: The prefix of the MEXP protected headers.
:rtype : bce.parser.ast.molecule.ASTNodeMolecule | bce.parser.ast.molecule.ASTNodeHydrateGroup
:return: The root node of the AST.
"""
raise RuntimeError("parse_expression() method should be overrided.")
# noinspection PyMethodMayBeStatic
def parse_ast(
self,
expression,
ast_root,
option,
mexp_protected_header_enabled=False,
mexp_protected_header_prefix="X"
):
"""Parse an AST.
:type expression: str
:type ast_root: bce.parser.ast.molecule.ASTNodeMolecule | bce.parser.ast.molecule.ASTNodeHydrateGroup
:type option: bce.option.Option
:type mexp_protected_header_enabled: bool
:type mexp_protected_header_prefix: str
:param expression: The expression.
:param ast_root: The root node of the AST.
:param option: The options.
:param mexp_protected_header_enabled: Whether the MEXP protected headers are enabled.
:param mexp_protected_header_prefix: The prefix of the MEXP protected headers.
:rtype : dict
:return: The parsed element dictionary.
"""
raise RuntimeError("parse_ast() method should be overrided.")
# noinspection PyMethodMayBeStatic
def substitute(self, ast_root, substitute_map=None):
"""Substitute a ast_root.
:type ast_root: bce.parser.ast.molecule.ASTNodeMolecule | bce.parser.ast.molecule.ASTNodeHydrateGroup
:type substitute_map: dict | None
:param ast_root: The ast_root.
:param substitute_map: The substitution map.
:rtype : bce.parser.ast.molecule.ASTNodeMolecule | bce.parser.ast.molecule.ASTNodeHydrateGroup | None
:return: The substituted AST root node.
"""
raise RuntimeError("substitute() method should be overrided.")
# noinspection PyMethodMayBeStatic
def print_out(
self,
ast_root,
mexp_parser,
mexp_protected_header_enabled=False,
mexp_protected_header_prefix="X",
printer_type=_interface_printer.PRINTER_TYPE_TEXT
):
"""Print a molecule.
:type ast_root: bce.parser.ast.molecule.ASTNodeMolecule | bce.parser.ast.molecule.ASTNodeHydrateGroup
:type mexp_parser: bce.parser.interface.mexp_parser.MathExpressionParserInterface
:type mexp_protected_header_enabled: bool
:type mexp_protected_header_prefix: str
:type printer_type: int
:param ast_root: The AST root.
:param printer_type: The printer type.
:param mexp_parser: The math expression parser.
:param mexp_protected_header_enabled: Whether the MEXP protected headers are enabled.
:param mexp_protected_header_prefix: The prefix of the MEXP protected headers.
:rtype : str | bce.dom.mathml.all.Base
:return: The printed string or MathML object.
"""
raise RuntimeError("print_out() method should be overrided.")
| 0 | 0 | 0 |
18c1f8b7670e262a0bc985665281839f28f37ed9 | 12,407 | py | Python | moto/config/exceptions.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 5,460 | 2015-01-01T01:11:17.000Z | 2022-03-31T23:45:38.000Z | moto/config/exceptions.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 4,475 | 2015-01-05T19:37:30.000Z | 2022-03-31T13:55:12.000Z | moto/config/exceptions.py | gtourkas/moto | 307104417b579d23d02f670ff55217a2d4a16bee | [
"Apache-2.0"
] | 1,831 | 2015-01-14T00:00:44.000Z | 2022-03-31T20:30:04.000Z | from moto.core.exceptions import JsonRESTError
class NoSuchBucketException(JsonRESTError):
"""We are *only* validating that there is value that is not '' here."""
code = 400
class InvalidSNSTopicARNException(JsonRESTError):
"""We are *only* validating that there is value that is not '' here."""
code = 400
| 30.334963 | 123 | 0.650923 | from moto.core.exceptions import JsonRESTError
class NameTooLongException(JsonRESTError):
code = 400
def __init__(self, name, location, max_limit=256):
message = (
f"1 validation error detected: Value '{name}' at '{location}' "
f"failed to satisfy constraint: Member must have length less "
f"than or equal to {max_limit}"
)
super().__init__("ValidationException", message)
class InvalidConfigurationRecorderNameException(JsonRESTError):
code = 400
def __init__(self, name):
message = "The configuration recorder name '{name}' is not valid, blank string.".format(
name=name
)
super().__init__("InvalidConfigurationRecorderNameException", message)
class MaxNumberOfConfigurationRecordersExceededException(JsonRESTError):
code = 400
def __init__(self, name):
message = (
"Failed to put configuration recorder '{name}' because the maximum number of "
"configuration recorders: 1 is reached.".format(name=name)
)
super().__init__("MaxNumberOfConfigurationRecordersExceededException", message)
class InvalidRecordingGroupException(JsonRESTError):
code = 400
def __init__(self):
message = "The recording group provided is not valid"
super().__init__("InvalidRecordingGroupException", message)
class InvalidResourceTypeException(JsonRESTError):
code = 400
def __init__(self, bad_list, good_list):
message = (
"{num} validation error detected: Value '{bad_list}' at "
"'configurationRecorder.recordingGroup.resourceTypes' failed to satisfy constraint: "
"Member must satisfy constraint: [Member must satisfy enum value set: {good_list}]".format(
num=len(bad_list), bad_list=bad_list, good_list=good_list
)
)
# For PY2:
message = str(message)
super().__init__("ValidationException", message)
class NoSuchConfigurationAggregatorException(JsonRESTError):
code = 400
def __init__(self, number=1):
if number == 1:
message = "The configuration aggregator does not exist. Check the configuration aggregator name and try again."
else:
message = (
"At least one of the configuration aggregators does not exist. Check the configuration aggregator"
" names and try again."
)
super().__init__("NoSuchConfigurationAggregatorException", message)
class NoSuchConfigurationRecorderException(JsonRESTError):
code = 400
def __init__(self, name):
message = "Cannot find configuration recorder with the specified name '{name}'.".format(
name=name
)
super().__init__("NoSuchConfigurationRecorderException", message)
class InvalidDeliveryChannelNameException(JsonRESTError):
code = 400
def __init__(self, name):
message = "The delivery channel name '{name}' is not valid, blank string.".format(
name=name
)
super().__init__("InvalidDeliveryChannelNameException", message)
class NoSuchBucketException(JsonRESTError):
"""We are *only* validating that there is value that is not '' here."""
code = 400
def __init__(self):
message = "Cannot find a S3 bucket with an empty bucket name."
super().__init__("NoSuchBucketException", message)
class InvalidNextTokenException(JsonRESTError):
code = 400
def __init__(self):
message = "The nextToken provided is invalid"
super().__init__("InvalidNextTokenException", message)
class InvalidS3KeyPrefixException(JsonRESTError):
code = 400
def __init__(self):
message = "The s3 key prefix '' is not valid, empty s3 key prefix."
super().__init__("InvalidS3KeyPrefixException", message)
class InvalidSNSTopicARNException(JsonRESTError):
"""We are *only* validating that there is value that is not '' here."""
code = 400
def __init__(self):
message = "The sns topic arn '' is not valid."
super().__init__("InvalidSNSTopicARNException", message)
class InvalidDeliveryFrequency(JsonRESTError):
code = 400
def __init__(self, value, good_list):
message = (
"1 validation error detected: Value '{value}' at "
"'deliveryChannel.configSnapshotDeliveryProperties.deliveryFrequency' failed to satisfy "
"constraint: Member must satisfy enum value set: {good_list}".format(
value=value, good_list=good_list
)
)
super().__init__("InvalidDeliveryFrequency", message)
class MaxNumberOfDeliveryChannelsExceededException(JsonRESTError):
code = 400
def __init__(self, name):
message = (
"Failed to put delivery channel '{name}' because the maximum number of "
"delivery channels: 1 is reached.".format(name=name)
)
super().__init__("MaxNumberOfDeliveryChannelsExceededException", message)
class NoSuchDeliveryChannelException(JsonRESTError):
code = 400
def __init__(self, name):
message = "Cannot find delivery channel with specified name '{name}'.".format(
name=name
)
super().__init__("NoSuchDeliveryChannelException", message)
class NoAvailableConfigurationRecorderException(JsonRESTError):
code = 400
def __init__(self):
message = "Configuration recorder is not available to put delivery channel."
super().__init__("NoAvailableConfigurationRecorderException", message)
class NoAvailableDeliveryChannelException(JsonRESTError):
code = 400
def __init__(self):
message = "Delivery channel is not available to start configuration recorder."
super().__init__("NoAvailableDeliveryChannelException", message)
class LastDeliveryChannelDeleteFailedException(JsonRESTError):
code = 400
def __init__(self, name):
message = (
"Failed to delete last specified delivery channel with name '{name}', because there, "
"because there is a running configuration recorder.".format(name=name)
)
super().__init__("LastDeliveryChannelDeleteFailedException", message)
class TooManyAccountSources(JsonRESTError):
code = 400
def __init__(self, length):
locations = ["com.amazonaws.xyz"] * length
message = (
"Value '[{locations}]' at 'accountAggregationSources' failed to satisfy constraint: "
"Member must have length less than or equal to 1".format(
locations=", ".join(locations)
)
)
super().__init__("ValidationException", message)
class DuplicateTags(JsonRESTError):
code = 400
def __init__(self):
super().__init__(
"InvalidInput",
"Duplicate tag keys found. Please note that Tag keys are case insensitive.",
)
class TagKeyTooBig(JsonRESTError):
code = 400
def __init__(self, tag, param="tags.X.member.key"):
super().__init__(
"ValidationException",
"1 validation error detected: Value '{}' at '{}' failed to satisfy "
"constraint: Member must have length less than or equal to 128".format(
tag, param
),
)
class TagValueTooBig(JsonRESTError):
code = 400
def __init__(self, tag, param="tags.X.member.value"):
super().__init__(
"ValidationException",
"1 validation error detected: Value '{}' at '{}' failed to satisfy "
"constraint: Member must have length less than or equal to 256".format(
tag, param
),
)
class InvalidParameterValueException(JsonRESTError):
code = 400
def __init__(self, message):
super().__init__("InvalidParameterValueException", message)
class InvalidTagCharacters(JsonRESTError):
code = 400
def __init__(self, tag, param="tags.X.member.key"):
message = "1 validation error detected: Value '{}' at '{}' failed to satisfy ".format(
tag, param
)
message += "constraint: Member must satisfy regular expression pattern: [\\\\p{L}\\\\p{Z}\\\\p{N}_.:/=+\\\\-@]+"
super().__init__("ValidationException", message)
class TooManyTags(JsonRESTError):
code = 400
def __init__(self, tags, param="tags"):
super().__init__(
"ValidationException",
"1 validation error detected: Value '{}' at '{}' failed to satisfy "
"constraint: Member must have length less than or equal to 50.".format(
tags, param
),
)
class InvalidResourceParameters(JsonRESTError):
code = 400
def __init__(self):
super().__init__(
"ValidationException",
"Both Resource ID and Resource Name " "cannot be specified in the request",
)
class InvalidLimitException(JsonRESTError):
code = 400
def __init__(self, value):
super().__init__(
"InvalidLimitException",
"Value '{value}' at 'limit' failed to satisfy constraint: Member"
" must have value less than or equal to 100".format(value=value),
)
class TooManyResourceIds(JsonRESTError):
code = 400
def __init__(self):
super().__init__(
"ValidationException",
"The specified list had more than 20 resource ID's. "
"It must have '20' or less items",
)
class ResourceNotDiscoveredException(JsonRESTError):
code = 400
def __init__(self, resource_type, resource):
super().__init__(
"ResourceNotDiscoveredException",
"Resource {resource} of resourceType:{type} is unknown or has not been "
"discovered".format(resource=resource, type=resource_type),
)
class ResourceNotFoundException(JsonRESTError):
code = 400
def __init__(self, resource_arn):
super().__init__(
"ResourceNotFoundException",
"ResourceArn '{resource_arn}' does not exist".format(
resource_arn=resource_arn
),
)
class TooManyResourceKeys(JsonRESTError):
code = 400
def __init__(self, bad_list):
message = (
"1 validation error detected: Value '{bad_list}' at "
"'resourceKeys' failed to satisfy constraint: "
"Member must have length less than or equal to 100".format(
bad_list=bad_list
)
)
super().__init__("ValidationException", message)
class InvalidResultTokenException(JsonRESTError):
code = 400
def __init__(self):
message = "The resultToken provided is invalid"
super().__init__("InvalidResultTokenException", message)
class ValidationException(JsonRESTError):
code = 400
def __init__(self, message):
super().__init__("ValidationException", message)
class NoSuchOrganizationConformancePackException(JsonRESTError):
code = 400
def __init__(self, message):
super().__init__("NoSuchOrganizationConformancePackException", message)
class MaxNumberOfConfigRulesExceededException(JsonRESTError):
code = 400
def __init__(self, name, max_limit):
message = (
f"Failed to put config rule '{name}' because the maximum number "
f"of config rules: {max_limit} is reached."
)
super().__init__("MaxNumberOfConfigRulesExceededException", message)
class ResourceInUseException(JsonRESTError):
code = 400
def __init__(self, message):
super().__init__("ResourceInUseException", message)
class InsufficientPermissionsException(JsonRESTError):
code = 400
def __init__(self, message):
super().__init__("InsufficientPermissionsException", message)
class NoSuchConfigRuleException(JsonRESTError):
code = 400
def __init__(self, rule_name):
message = (
f"The ConfigRule '{rule_name}' provided in the request is "
f"invalid. Please check the configRule name"
)
super().__init__("NoSuchConfigRuleException", message)
class MissingRequiredConfigRuleParameterException(JsonRESTError):
code = 400
def __init__(self, message):
super().__init__("ParamValidationError", message)
| 8,493 | 2,643 | 905 |
343d6f9567caad37d311aa09d840dc694a588fef | 1,836 | py | Python | generators.py | sichkar-valentyn/Generators_in_Python | c8cb66960fc8f6e253cb4076dfaa440b7eb06503 | [
"MIT"
] | null | null | null | generators.py | sichkar-valentyn/Generators_in_Python | c8cb66960fc8f6e253cb4076dfaa440b7eb06503 | [
"MIT"
] | null | null | null | generators.py | sichkar-valentyn/Generators_in_Python | c8cb66960fc8f6e253cb4076dfaa440b7eb06503 | [
"MIT"
] | null | null | null | # File: generators.py
# Description: Examples on how to create and use generators in Python
# Environment: PyCharm and Anaconda environment
#
# MIT License
# Copyright (c) 2018 Valentyn N Sichkar
# github.com/sichkar-valentyn
#
# Reference to:
# [1] Valentyn N Sichkar. Examples on how to create and use generators in Python // GitHub platform [Electronic resource]. URL: https://github.com/sichkar-valentyn/Generators_in_Python (date of access: XX.XX.XXXX)
from random import random
# Creating the class for iterations
# We add the method __iter__ if we want to iterate this class
# Creating function as generator
# Instead of return we use yield
# In this way we remember the order till yield each time we call the function
# Creating instance of class
gen = random_generator(3)
print(type(gen))
for i in gen:
print(i)
# More clear example about remembering the yield the order
g = simple_gen()
x = next(g)
print(x)
y = next(g)
print(y)
z = next(g)
# Implementing the task - creating the methods for prime numbers
| 21.103448 | 213 | 0.638889 | # File: generators.py
# Description: Examples on how to create and use generators in Python
# Environment: PyCharm and Anaconda environment
#
# MIT License
# Copyright (c) 2018 Valentyn N Sichkar
# github.com/sichkar-valentyn
#
# Reference to:
# [1] Valentyn N Sichkar. Examples on how to create and use generators in Python // GitHub platform [Electronic resource]. URL: https://github.com/sichkar-valentyn/Generators_in_Python (date of access: XX.XX.XXXX)
from random import random
# Creating the class for iterations
class RandomIterator:
# We add the method __iter__ if we want to iterate this class
def __iter__(self):
return self
def __init__(self, k):
self.k = k
self.i = 0
def __next__(self):
if self.i < self.k:
self.i += 1
return random()
else:
raise StopIteration
# Creating function as generator
# Instead of return we use yield
# In this way we remember the order till yield each time we call the function
def random_generator(k):
for i in range(k):
yield random()
# Creating instance of class
gen = random_generator(3)
print(type(gen))
for i in gen:
print(i)
# More clear example about remembering the yield the order
def simple_gen():
print('Checkpoint 1')
yield 1
print('Checkpoint 2')
#return 'No more elements'
yield 2
print('Checkpoint 3')
g = simple_gen()
x = next(g)
print(x)
y = next(g)
print(y)
z = next(g)
# Implementing the task - creating the methods for prime numbers
def is_prime(n):
if n == 2:
return True
if n % 2 == 0:
return False
for i in range(3, n // 2, 2):
if n % i == 0:
return False
return True
def primes():
n = 2
while True:
if is_prime(n):
yield n
n += 1
| 604 | 0 | 191 |
705d98e1ed46727a93b0c67d7b1e53f1efab3221 | 25,439 | py | Python | src/UniData.py | bazilinskyy/agent-based-uni | a8a5086a9d012e6cd972cf58c7865463b5e6f9b3 | [
"MIT"
] | null | null | null | src/UniData.py | bazilinskyy/agent-based-uni | a8a5086a9d012e6cd972cf58c7865463b5e6f9b3 | [
"MIT"
] | null | null | null | src/UniData.py | bazilinskyy/agent-based-uni | a8a5086a9d012e6cd972cf58c7865463b5e6f9b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Loading data from Excel files stored at /src/data.
"""
# Copyright (c) 2014, Pavlo Bazilinskyy <pavlo.bazilinskyy@gmail.com>
# Department of Computer Science, National University of Ireland, Maynooth
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = "Pavlo Bazilinskyy"
__copyright__ = "Copyright 2008, National University of Ireland, Maynooth"
__credits__ = "Ronan Reilly"
__version__ = "1.0"
__maintainer__ = "Pavlo Bazilinskyy"
__email__ = "pavlo.bazilinskyy@gmail.com"
__status__ = "Production"
import traceback
from xlrd import open_workbook, cellname
import csv
import model
import conf
import random | 41.096931 | 147 | 0.671056 | #!/usr/bin/env python
"""
Loading data from Excel files stored at /src/data.
"""
# Copyright (c) 2014, Pavlo Bazilinskyy <pavlo.bazilinskyy@gmail.com>
# Department of Computer Science, National University of Ireland, Maynooth
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__author__ = "Pavlo Bazilinskyy"
__copyright__ = "Copyright 2008, National University of Ireland, Maynooth"
__credits__ = "Ronan Reilly"
__version__ = "1.0"
__maintainer__ = "Pavlo Bazilinskyy"
__email__ = "pavlo.bazilinskyy@gmail.com"
__status__ = "Production"
import traceback
from xlrd import open_workbook, cellname
import csv
import model
import conf
import random
class UniData():
intakeSummer = {}
intakeAutumn = {}
courses = []
courseTypes = []
modules = {}
moduleEnrollments = []
courseEnrollments = []
departments = []
faculties = []
# @property
# def intakeAutumn(self):
# return self._intakeAutumn
# @intakeAutumn.setter
# def intakeAutumn(self, value):
# self._intake = value
# @property
# def intakeSummer(self):
# print "BOO"
# return self._intakeSummer
# @intakeSummer.setter
# def intakeSummer(self, value):
# self._intakeSummer = value
# @property
# def courses(self):
# return self._courses
# @courses.setter
# def courses(self, value):
# self._courses = value
# @property
# def modules(self):
# return self._modules
# @modules.setter
# def modules(self, value):
# self._modules = modules
def importData(self):
# Populate
if (len(self.courses) < 1 and len(self.modules) < 1 and len(self.intakeSummer) < 1 and len(self.intakeAutumn) < 1): # import from files only once
try:
## Opening the excel or csv file
if (not conf.USE_SMALL_DATA): # Use full data
fileCourses = self.openDataFile(conf.FILE_WITH_COURSES)
fileModules = self.openDataFile(conf.FILE_WITH_MODULES)
fileIntakeSummer = self.openDataFile(conf.FILE_WITH_INTAKE_SUMMER)
fileIntakeAutumn = self.openDataFile(conf.FILE_WITH_INTAKE_AUTUMN)
else: # Use a smalls sample of data
fileCourses = self.openDataFile(conf.SMALL_FILE_WITH_COURSES)
fileModules = self.openDataFile(conf.SMALL_FILE_WITH_MODULES)
fileIntakeSummer = self.openDataFile(conf.SMALL_FILE_WITH_INTAKE_SUMMER)
fileIntakeAutumn = self.openDataFile(conf.SMALL_FILE_WITH_INTAKE_AUTUMN)
sheetModules = fileModules.sheet_by_index(0)
#sheetIntakeSummer = fileIntakeSummer.sheet_by_index(0)
sheetIntakeAutumn = fileIntakeAutumn.sheet_by_index(0)
## Calculating numbers of rows in received sheets
#numRowsIntakeSummer = sheetIntakeSummer.nrows - 1
numRowsIntakeAutumn = sheetIntakeAutumn.nrows - 1
## Courses and course types
linesIgnoreCourses = [1, 25, 36, 40, 44, 45, 56, 60, 63, 81, 82] # Lines to ignore in the file
sheetCourses = fileCourses.sheet_by_index(0) # Only 1 sheet present
numRowsCourses = sheetCourses.nrows - 1
currentCourseType = ""
for i in range(numRowsCourses):
row = sheetCourses.row_slice(i+1)
if (i + 1 not in linesIgnoreCourses):
jointHons = -1
singleHons = -1
accepts = -1
if (row[3].value != 0):
accepts = row[3].value
if (row[4].value != 0):
singleHons = row[4].value
if (row[5].value != 0):
jointHons = row[5].value
# Check if it is a type of course
if (row[0].value in model.CourseType.possibleCourseTypes):
self.courseTypes.append(model.CourseType(row[0].value, row[3].value, singleHons, jointHons))
currentCourseType = row[0].value
# Otherwise, it must be a course
else:
courseCredit = -1 #TODO: need this data
self.courses.append(model.Course(row[0].value, courseCredit,currentCourseType, accepts, singleHons, jointHons))
## Modules, departments, faculties
linesIgnoreModules = [0, 1, 2, 1209, 1210, 1211] # Lines to ignore in the file
sheetModules = fileModules.sheet_by_index(0) # Reading the sheet with UG
numRowsModules = sheetModules.nrows - 1
## Courses and course types
currentFaculty = model.Faculty("NA")
currentDepartment = model.Department("NA", currentFaculty)
for i in range(numRowsModules):
row = sheetModules.row_slice(i+1)
if (i + 1 not in linesIgnoreModules):
# Check if we found a new faculty
if (row[0].value != currentFaculty.name):
newFaculty = model.Faculty(row[0].value)
self.faculties.append(newFaculty)
currentFaculty = newFaculty
# Check if we found a new department
if (row[1].value != currentDepartment.name):
newDepartment = model.Department(row[1].value, currentFaculty)
self.departments.append(newDepartment)
currentDepartment = newDepartment
# Add new module
semesterGiven = -1 #TODO mossing data
self.modules[row[2].value] = model.Module(
row[2].value, # Module ID
row[3].value, # Module name
row[5].value, # Module credit
semesterGiven, # Semester given
currentDepartment, # Department
row[4].value # Enrolled students
)
## Summer and autumn intakes
# Summer intake
tempSet = set()
linesIgnoreIntakeSummer = [0, 1, 2, 3, 4, 5, 6] # Lines to ignore in the file
sheetIntakeSummer = fileIntakeSummer.sheet_by_index(0) # Reading the sheet with UG
numRowsIntakeSummer = sheetIntakeSummer.nrows - 1
for i in range(numRowsIntakeSummer):
row = sheetIntakeSummer.row_slice(i+1)
if (i + 1 not in linesIgnoreIntakeSummer and row[2].value == 1): # Limit to result from only the first year
if (str(row[3]) not in tempSet):
self.intakeSummer[str(int(row[3].value))] = model.Student(str(int(row[3].value)))
# Leaving certificate
if (int(row[11].value) <= 625):
self.intakeSummer[str(int(row[3].value))].leavingCertificate = int(row[11].value) # Leaving certificate from Wrs column
else:
if (int(row[12].value) <= 625): # Check if the value is still correct
self.intakeSummer[str(int(row[3].value))].leavingCertificate = int(row[12].value) # Leaving certificate from Random column
else: # The value is not real leaving certificate -> assign to a random value
self.intakeSummer[str(int(row[3].value))].leavingCertificate = random.randint(250, 625) # Leaving certificate from Random column
# Faculty
self.decideFaculty(str(row[1].value), self.intakeSummer[str(int(row[3].value))]) # Call a separate function, as there are a lot of programs
tempSet.add(str(row[3]))
## Enrol student into a module
# Default to the first semester of a given year
# Try to find a previously loaded module
try:
moduleEnrolled = self.modules[row[4].value]
except KeyError, e: # If not found, add a new entry
moduleEnrolled = model.Module(
row[2].value, # Module ID
"", # Module name
0, # Module credit
row[2].value * 2, # Semester given
None, # Department
None # Enrolled Students
) #TODO add more accurate information
# And add to the list of modules
self.modules[row[2].value] = moduleEnrolled
# And module enrollment
moduleEnroll = model.ModuleEnrollment(self.intakeSummer[str(int(row[3].value))], moduleEnrolled, 1)
# PASS / FAIL / ABSENT / PASS BY COMPENSATION etc.
if (row[6].value == "PASS"):
moduleEnroll.status = "PASS"
elif (row[6].value == "FAIL"):
moduleEnroll.status = "FAIL"
elif (row[6].value == "ABSENT"):
moduleEnroll.status = "ABSENT"
elif (row[6].value == "PASS BY COMPENSATION"):
moduleEnroll.status = "PASS BY COMPENSATION"
elif (row[6].value == "DID NOT COMPLETE"):
moduleEnroll.status = "DID NOT COMPLETE"
elif (row[6].value == "EXEMPTION"):
moduleEnroll.status = "EXEMPTION"
elif (row[6].value == "SATISFACTORY"):
moduleEnroll.status = "SATISFACTORY"
# Marks received
if (row[5].value != ""):
moduleEnroll.marksReceived = row[5].value
else:
moduleEnroll.marksReceived = 0
self.moduleEnrollments.append(moduleEnroll)
# And add to the list of modules of a particular student
self.intakeSummer[str(int(row[3].value))].modules.append(moduleEnrolled)
# Also, add module enrollment to the student
self.intakeSummer[str(int(row[3].value))].moduleEnrollments[row[4].value] = moduleEnroll
# Autumn intake
tempSet.clear()
linesIgnoreIntakeAutumn = [0, 1, 2, 3, 4, 5, 6] # Lines to ignore in the file
sheetIntakeAutumn = fileIntakeAutumn.sheet_by_index(0) # Reading the sheet with UG
numRowsIntakeAutumn = sheetIntakeAutumn.nrows - 1
for i in range(numRowsIntakeAutumn):
row = sheetIntakeAutumn.row_slice(i+1)
if (i + 1 not in linesIgnoreIntakeAutumn and row[2].value == 1): # Limit to result from only the first year
if (str(row[3]) not in tempSet):
self.intakeAutumn[str(int(row[3].value))] = model.Student(str(int(row[3].value)))
# Leaving certificate
if (int(row[11].value) <= 625):
self.intakeAutumn[str(int(row[3].value))].leavingCertificate = int(row[11].value) # Leaving certificate from Wrs column
else:
if (int(row[12].value) <= 625): # Check if the value is still correct
self.intakeAutumn[str(int(row[3].value))].leavingCertificate = int(row[12].value) # Leaving certificate from Random column
else: # The value is not real leaving certificate -> assign to a random value
self.intakeAutumn[str(int(row[3].value))].leavingCertificate = random.randint(250, 625) # Leaving certificate from Random column
# Faculty
self.decideFaculty(str(row[1].value), self.intakeAutumn[str(int(row[3].value))]) # Call a separate function, as there are a lot of programs
tempSet.add(str(row[3]))
## Enrol student into a module
# Default to the second semester of a given year
# Try to find a previously loaded module
try:
moduleEnrolled = self.modules[row[4].value]
except KeyError, e: # If not found, add a new entry
moduleEnrolled = model.Module(
row[2].value, # Module ID
"", # Module name
0, # Module credit
row[2].value * 2, # Semester given
None, # Department
None # Enrolled Students
) #TODO add more accurate information
# And add to the list of modules
self.modules[row[2].value] = moduleEnrolled
# And module enrollment
moduleEnroll = model.ModuleEnrollment(self.intakeAutumn[str(int(row[3].value))], moduleEnrolled, 2)
# PASS / FAIL / ABSENT / PASS BY COMPENSATION etc.
if (row[6].value == "PASS"):
moduleEnroll.status = "PASS"
elif (row[6].value == "FAIL"):
moduleEnroll.status = "FAIL"
elif (row[6].value == "ABSENT"):
moduleEnroll.status = "ABSENT"
elif (row[6].value == "PASS BY COMPENSATION"):
moduleEnroll.status = "PASS BY COMPENSATION"
elif (row[6].value == "DID NOT COMPLETE"):
moduleEnroll.status = "DID NOT COMPLETE"
elif (row[6].value == "EXEMPTION"):
moduleEnroll.status = "EXEMPTION"
elif (row[6].value == "SATISFACTORY"):
moduleEnroll.status = "SATISFACTORY"
# Marks received
if (row[5].value != ""):
moduleEnroll.marksReceived = row[5].value
else:
moduleEnroll.marksReceived = 0
self.moduleEnrollments.append(moduleEnroll)
# And add to the list of modules of a particular student
self.intakeAutumn[str(int(row[3].value))].modules.append(moduleEnrolled)
# Also, add module enrollment to the student
self.intakeAutumn[str(int(row[3].value))].moduleEnrollments[row[4].value] = moduleEnroll
update = 'Data imported'
update += "\n" + 'SUMMER INTAKE length:' + str(len(self.intakeSummer))
# # Print student IDs
# keys = self.intakeSummer.keys()
# keys.sort()
# for key in keys:
# update += "\n" + self.intakeSummer[key].studentID
update += "\n" + 'AUTUMN INTAKE length:' + str(len(self.intakeAutumn))
update += "\n" + 'COURSES length:' + str(len(self.courses))
update += "\n" + 'COURSE TYPES length:' + str(len(self.courseTypes))
update += "\n" + 'MODULES length:' + str(len(self.modules))
update += "\n" + 'FACULTIES length:' + str(len(self.faculties))
update += "\n" + 'DEPARTMENTS length:' + str(len(self.departments))
# Calculate an average number of enrolled modules per student
keys = self.intakeSummer.keys()
numModulesEnrolled = 0
for key in keys:
numModulesEnrolled += len(self.intakeSummer[key].getModules())
keys = self.intakeAutumn.keys()
for key in keys:
numModulesEnrolled += len(self.intakeAutumn[key].getModules())
numModulesEnrolled /= (len(self.intakeSummer.keys()) + len(self.intakeAutumn.keys()))
update += "\n" + 'STUDENTS average enrolled modules:' + str(numModulesEnrolled)
update += "\n" + 'SUMMER'
passedList = []
failedList = []
compensationList = []
absentList = []
didnotcompleteList = []
excemptionList = []
satisfactoryList = []
for i in self.moduleEnrollments:
if (i.semesterTaken == 1):
if (i.status == "PASS"):
passedList.append(i)
elif (i.status == "FAIL"):
failedList.append(i)
elif (i.status == "PASS BY COMPENSATION"):
compensationList.append(i)
elif (i.status == "ABSENT"):
absentList.append(i)
elif (i.status == "DID NOT COMPLETE"):
didnotcompleteList.append(i)
elif (i.status == "EXEMPTION"):
excemptionList.append(i)
elif (i.status == "SATISFACTORY"):
satisfactoryList.append(i)
update += "\n" + 'Passed:' + str(len(passedList))
update += "\n" + 'Failed:' + str(len(failedList))
update += "\n" + 'Passed by compensation:' + str(len(compensationList))
update += "\n" + 'Absent:' + str(len(absentList))
update += "\n" + 'Did not complete:' + str(len(didnotcompleteList))
update += "\n" + 'Excempt:' + str(len(excemptionList))
update += "\n" + 'Satisfactory:' + str(len(satisfactoryList))
update += "\n" + 'AUTUMN'
passedList = []
failedList = []
compensationList = []
absentList = []
didnotcompleteList = []
excemptionList = []
satisfactoryList = []
for i in self.moduleEnrollments:
if (i.semesterTaken == 2):
if (i.status == "PASS"):
passedList.append(i)
elif (i.status == "FAIL"):
failedList.append(i)
elif (i.status == "PASS BY COMPENSATION"):
compensationList.append(i)
elif (i.status == "ABSENT"):
absentList.append(i)
elif (i.status == "DID NOT COMPLETE"):
didnotcompleteList.append(i)
elif (i.status == "EXEMPTION"):
excemptionList.append(i)
elif (i.status == "SATISFACTORY"):
satisfactoryList.append(i)
update += "\n" + 'Passed:' + str(len(passedList))
update += "\n" + 'Failed:' + str(len(failedList))
update += "\n" + 'Passed by compensation:' + str(len(compensationList))
update += "\n" + 'Absent:' + str(len(absentList))
update += "\n" + 'Did not complete:' + str(len(didnotcompleteList))
update += "\n" + 'Excempt:' + str(len(excemptionList))
update += "\n" + 'Satisfactory:' + str(len(satisfactoryList))
if conf.DEBUG:
print update
return update
except:
print traceback.format_exc()
## Add a name of the faculty that a student is enrolled in, based on the name of the programme
def decideFaculty(self, programme, student):
if str(programme) == "ARTS (ANTHROPOLOGY) SINGLE HONOURS":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "MATHEMATICS":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "ARTS - SINGLE HONOURS":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "ANTHROPOLOLGY - INTERNATIONAL":
student.faculty = "SOCIAL SCIENCES"
elif str(programme) == "INTERNATIONAL FINANCE & ECONOMICS":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY" # ?
elif str(programme) == "MEDIA STUDIES - INTERNATIONAL":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "MULTIMEDIA - INTERNATIONAL":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "MUSIC TECHNOLOGY - INTERNATIONAL":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "BA INTERNATIONAL DEGREE":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "POLITICS INTERNATIONAL":
student.faculty = "SOCIAL SCIENCES" # ?
elif str(programme) == "BUSINESS & MANAGEMENT INTERNATIONAL":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "BIOLOGICAL AND BIOMEDICAL SCIENCES":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "SCIENCE (BIOTECHNOLOGY)":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "B.B.S. BUSINESS & MANAGEMENT":
student.faculty = "SOCIAL SCIENCES"
elif str(programme) == "B.B.S. BUSINESS & ACCOUNTING":
student.faculty = "SOCIAL SCIENCES"
elif str(programme) == "B.B.S. BUSINESS & ACCOUNTING INTERNATION":
student.faculty = "SOCIAL SCIENCES"
elif str(programme) == "BGENETICS & BIOINFORMATICS":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "COMPUTATIONAL THINKING":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "COMPUTER SCI & SOFTWARE ENG (ARTS)":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "COMPUTER SCI.& SOFTWARE ENGINEERING":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "BA COMMUNITY & YOUTH WORK":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "BA COMMUNITY & YOUTH WORK P/T":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "PRODUCT DESIGN (MARKETING & INNOVATION)":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "DIGITAL MEDIA":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "BA IN EARLY CHILDHOOD - TEACHING & LEARN":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "BACHELOR OF EDUCATION":
student.faculty = "SOCIAL SCIENCES"
elif str(programme) == "BACHELOR OF EDUCATION":
student.faculty = "SOCIAL SCIENCES"
elif str(programme) == "B.B.A. BUSINESS & ACCOUNTING":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "ENGINEERING":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "ELECTRONIC ENGINEER. WITH COMMUNICATIONS":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "ELECTRONIC ENGINEERING WITH COMPUTERS":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "ELECTRONIC ENGINEERING":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "ARTS (ENGLISH)":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "B.B.S. EQUINE BUSINESS":
student.faculty = "SOCIAL SCIENCES"
elif str(programme) == "B.B.A. EQUINE BUSINESS":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "B.B.S. EQUINE BUSINESS INTERNATIONAL":
student.faculty = "SOCIAL SCIENCES"
elif str(programme) == "EUROPEAN STUDIES":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY" # ?
elif str(programme) == "ARTS (FINANCE)":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "ARTS(FINANCE) MAJOR/MINOR":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "ACCOUNTING & FINANCE":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY" # ?
elif str(programme) == "ARTS (GEOGRAPHY)":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "ARTS (HISTORY)":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "LL.B. LAW":
student.faculty = "SOCIAL SCIENCES" # ?
elif str(programme) == "LL.B. LAW WITH PLACEMENT":
student.faculty = "SOCIAL SCIENCES" # ?
elif str(programme) == "LAW AND ARTS":
student.faculty = "SOCIAL SCIENCES" # ?
elif str(programme) == "LAW AND ARTS INTERNATIONAL":
student.faculty = "SOCIAL SCIENCES" # ?
elif str(programme) == "LAW & ARTS INTERNATIONAL WITH PLACEMENT":
student.faculty = "SOCIAL SCIENCES" # ?
elif str(programme) == "LAW AND MINOR ARTS":
student.faculty = "SOCIAL SCIENCES" # ?
elif str(programme) == "LAW AND ARTS WITH PLACEMENT":
student.faculty = "SOCIAL SCIENCES" # ?
elif str(programme) == "LAW AND BUSINESS":
student.faculty = "SOCIAL SCIENCES" # ?
elif str(programme) == "LAW AND BUSINESS WITH PLACEMENT":
student.faculty = "SOCIAL SCIENCES" # ?
elif str(programme) == "B.B.S. MARKETING":
student.faculty = "SOCIAL SCIENCES"
elif str(programme) == "MATHEMATICS EDUCATION":
student.faculty = "SOCIAL SCIENCES"
elif str(programme) == "MEDIA STUDIES":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "ARTS (MULTIMEDIA)":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "MUSIC HONOURS":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "MUSIC TECHNOLOGY":
student.faculty = "SCIENCE AND ENGINEERING" # ?
elif str(programme) == "PHARMACEUTICAL AND BIOMEDICAL CHEMISTRY":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "PHYSICS WITH ASTROPHYSICS":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "PHYSICS WITH ASTROPHYSICS INTERNATIONAL":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "ARTS (POLITICS)":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "PHILOSOPHY,POLITICS & ECONOMICS":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "ARTS (PSYCHOLOGY)":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "BA (PUBLIC POLICY)":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "SCIENCE EDUCATION":
student.faculty = "SOCIAL SCIENCES" # ?
elif str(programme) == "SCIENCE HONOURS":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "SCIENCE HONOURS ACCELERATED":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "THEORETICAL PHYSICS & MATHEMATICS":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "SCIENCE SINGLE HONOURS":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "SCIENCE MULTIMEDIA":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "MULTIMEDIA, MOBILE & WEB DEVELOPMENT":
student.faculty = "SCIENCE AND ENGINEERING"
elif str(programme) == "SOCIAL SCIENCE":
student.faculty = "SOCIAL SCIENCES"
elif str(programme) == "THEOLOGY":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "FINANCE & VENTURE MANAGEMENT":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "ENTREPRENEURSHIP":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
elif str(programme) == "ENTREPRENEURSHIP WITH PLACEMENT":
student.faculty = "ARTS,CELT.STUD. AND PHILOSOPHY"
else:
student.faculty = "N/A"
# Based on http://stackoverflow.com/questions/5731670/simple-random-name-generator-in-python/5732034#5732034
def createIntake(self):
# Fetch lists of names
parts = {}
with open(conf.FILE_WITH_NAMES, 'r') as f:
nameList = []
for line in f.readlines():
line = line.strip()
if line.startswith('[') and line.endswith(']'):
nameList = []
parts[line[1:-1]] = nameList
else:
nameList.append(line.strip())
i = 0
# Create student instances
for count in xrange(conf.INTAKE_SIZE):
name = ' '.join(random.choice(parts[partName]) for partName in sorted(parts))
gender = random.choice(["m", "f"])
studentID = i
i = i + 1
s = model.Student(studentID, name, gender)
intake.append(s)
# for x in intake:
# print x.name, x.gender, x.studentID
# Open either Excel of CSV file with data
def openDataFile(self, path):
try:
return open_workbook(path)
except:
return open(path, 'r')
return None | 22,651 | 1,112 | 23 |
8193d1da13031328c8e781646be0da0dbfa4d864 | 346 | py | Python | projeto_labhacker/core/urls.py | thiagonf/prova-labhacker | c6cf3bbc89f5af16e9d784b53e0787a71e2ab292 | [
"MIT"
] | null | null | null | projeto_labhacker/core/urls.py | thiagonf/prova-labhacker | c6cf3bbc89f5af16e9d784b53e0787a71e2ab292 | [
"MIT"
] | 7 | 2020-06-05T22:40:01.000Z | 2022-02-10T09:51:52.000Z | projeto_labhacker/core/urls.py | thiagonf/prova-labhacker | c6cf3bbc89f5af16e9d784b53e0787a71e2ab292 | [
"MIT"
] | null | null | null | from django.urls import path, include
from django.contrib import admin
from django.contrib.auth import views
from authentication.views import HomeView
urlpatterns = [
path('admin/', admin.site.urls),
path('', HomeView.as_view(), name='home'),
path('auth/', include('authentication.urls')),
path('', include('dashboard.urls')),
]
| 28.833333 | 50 | 0.708092 | from django.urls import path, include
from django.contrib import admin
from django.contrib.auth import views
from authentication.views import HomeView
urlpatterns = [
path('admin/', admin.site.urls),
path('', HomeView.as_view(), name='home'),
path('auth/', include('authentication.urls')),
path('', include('dashboard.urls')),
]
| 0 | 0 | 0 |
c22e4ba0798f0669fe19d69f6312a8620bb1d30a | 47 | py | Python | arbitrary_dateparser/__init__.py | nottheswimmer/abstract-dateparser | e8c6085f2db610bb36e8807e0248c4b310e9599e | [
"MIT"
] | 1 | 2019-08-02T21:36:14.000Z | 2019-08-02T21:36:14.000Z | arbitrary_dateparser/__init__.py | nottheswimmer/abstract-dateparser | e8c6085f2db610bb36e8807e0248c4b310e9599e | [
"MIT"
] | 1 | 2019-08-13T15:49:39.000Z | 2019-08-14T11:29:20.000Z | arbitrary_dateparser/__init__.py | nottheswimmer/abstract-dateparser | e8c6085f2db610bb36e8807e0248c4b310e9599e | [
"MIT"
] | null | null | null | from arbitrary_dateparser.dateparser import *
| 23.5 | 46 | 0.851064 | from arbitrary_dateparser.dateparser import *
| 0 | 0 | 0 |
ca737514e4cb0068974995165c48326b7ef7ffb8 | 3,895 | py | Python | tests/src/pat_LO_Table/check_catogory_levels.py | JalajaTR/cQube | 6bf58ab25f0c36709630987ab730bbd5d9192c03 | [
"MIT"
] | null | null | null | tests/src/pat_LO_Table/check_catogory_levels.py | JalajaTR/cQube | 6bf58ab25f0c36709630987ab730bbd5d9192c03 | [
"MIT"
] | null | null | null | tests/src/pat_LO_Table/check_catogory_levels.py | JalajaTR/cQube | 6bf58ab25f0c36709630987ab730bbd5d9192c03 | [
"MIT"
] | null | null | null | import csv
import os
import time
from selenium.webdriver.support.select import Select
from Data.parameters import Data
from filenames import file_extention
from get_dir import pwd
from reuse_func import GetData
| 39.744898 | 86 | 0.622593 | import csv
import os
import time
from selenium.webdriver.support.select import Select
from Data.parameters import Data
from filenames import file_extention
from get_dir import pwd
from reuse_func import GetData
class Catagory_series():
def __init__(self,driver):
self.driver = driver
def viewbys_options(self):
self.p = pwd()
self.load = GetData()
count = 0
self.fname = file_extention()
self.driver.find_element_by_xpath(Data.hyper_link).click()
self.load.page_loading(self.driver)
view_by = Select(self.driver.find_element_by_id(Data.view_by))
for i in range(1,len(view_by.options)):
view_by.select_by_index(i)
self.load.page_loading(self.driver)
if view_by.options[i].text in self.driver.page_source:
print(view_by.options[i].text, 'is displayed records in LO TABLE')
else:
print(view_by.options[i].text, 'Records are not displayed')
count = count + 1
self.load.page_loading(self.driver)
self.driver.find_element_by_id(Data.Download).click()
time.sleep(3)
self.filename = self.p.get_download_dir()+"/"+self.fname.pchart_views()
if os.path.isfile(self.filename) != True:
print(view_by.options[i].text,'csv file is not downloaded')
count = count + 1
else:
print(view_by.options[i].text,'csv file is downloaded')
os.remove(self.filename)
return count
def test_questions_records(self):
self.p = pwd()
self.load = GetData()
count = 0
self.fname = file_extention()
self.driver.find_element_by_xpath(Data.hyper_link).click()
self.load.page_loading(self.driver)
view_by = Select(self.driver.find_element_by_id(Data.view_by))
view_by.select_by_index(1)
self.load.page_loading(self.driver)
if view_by.options[1].text in self.driver.page_source:
print(view_by.options[1].text, 'is displayed records in heat chart')
else:
print(view_by.options[1].text, 'Records are not displayed')
count = count + 1
self.load.page_loading(self.driver)
self.driver.find_element_by_id(Data.Download).click()
time.sleep(3)
self.filename = self.p.get_download_dir() + "/" + self.fname.pchart_views()
if os.path.isfile(self.filename) != True:
print(view_by.options[1].text, 'csv file is not downloaded')
count = count + 1
else:
print(view_by.options[1].text, 'csv file is downloaded')
os.remove(self.filename)
return count
def test_indicator_records(self):
self.p = pwd()
self.load = GetData()
count = 0
self.fname = file_extention()
self.driver.find_element_by_xpath(Data.hyper_link).click()
time.sleep(5)
view_by = Select(self.driver.find_element_by_id(Data.view_by))
view_by.select_by_index(2)
self.load.page_loading(self.driver)
if view_by.options[2].text in self.driver.page_source:
print(view_by.options[2].text, 'is displayed records in heat chart')
else:
print(view_by.options[2].text, 'Records are not displayed')
count = count + 1
self.load.page_loading(self.driver)
self.driver.find_element_by_id(Data.Download).click()
time.sleep(3)
self.filename = self.p.get_download_dir() + "/" + self.fname.pchart_subjects()
if os.path.isfile(self.filename) != True:
print(view_by.options[2].text, 'csv file is not downloaded')
count = count + 1
else:
print(view_by.options[2].text, 'csv file is downloaded')
os.remove(self.filename)
return count
| 3,548 | 3 | 130 |
fc8976d261902bb332c90666a49e82c71afea265 | 4,257 | py | Python | conf/example_apps/ObjectTracker/objectcontrole.py | chipsi007/appdaemon | 4eaf4cc9a9c9bd3f703014bd60730c65ed7bfb74 | [
"Apache-2.0"
] | 2 | 2018-10-01T10:09:16.000Z | 2018-12-25T20:03:48.000Z | conf/example_apps/ObjectTracker/objectcontrole.py | chipsi007/appdaemon | 4eaf4cc9a9c9bd3f703014bd60730c65ed7bfb74 | [
"Apache-2.0"
] | null | null | null | conf/example_apps/ObjectTracker/objectcontrole.py | chipsi007/appdaemon | 4eaf4cc9a9c9bd3f703014bd60730c65ed7bfb74 | [
"Apache-2.0"
] | 1 | 2021-05-09T00:28:23.000Z | 2021-05-09T00:28:23.000Z | ###########################################################################################
# #
# ObjectTracker 2.0 #
# #
###########################################################################################
# #
# with ObjectTracker you can track the last updated time from any object in HA #
# options are to give the last time an object was updated or the time that has gone by #
# you have to set the following options in the appdaemon.cfg: #
# #
# object_type = the type you like to track (switch, input_boolean, sensor, etc) #
# time_gone_by = True or False (false for showing last updated time) #
# dir_name = the name of the directory you want the files with times saved #
# time_format = any timeformat you like (python strftime type) without % #
# H:M gives 01:27, Y-m-d H:M:S gives 2016-09-04 01:27:25, etc. #
# total_objects = the amount off object you want to track #
# object1 = HA entity_ID without the platform part. (for switch.light1 use light1) #
# object2 = ... #
# object3 = untill you reached youre total_object amount #
# #
# note that you need to set a new sections in the cfg for each type of object you like #
# to track. if you want to track 1 switch and 1 sensor you need to make to sections. #
# #
# ObjectTracker depends on general_app_functions.py set as app and set in the cfg as #
# [generalvars] #
# #
# Rene Tode ( hass@reot.org ) #
# version 2.0 #
# 2016/09/04 Germany #
# #
###########################################################################################
import appdaemon.plugins.hass.hassapi as hass
import datetime
| 73.396552 | 209 | 0.439276 | ###########################################################################################
# #
# ObjectTracker 2.0 #
# #
###########################################################################################
# #
# with ObjectTracker you can track the last updated time from any object in HA #
# options are to give the last time an object was updated or the time that has gone by #
# you have to set the following options in the appdaemon.cfg: #
# #
# object_type = the type you like to track (switch, input_boolean, sensor, etc) #
# time_gone_by = True or False (false for showing last updated time) #
# dir_name = the name of the directory you want the files with times saved #
# time_format = any timeformat you like (python strftime type) without % #
# H:M gives 01:27, Y-m-d H:M:S gives 2016-09-04 01:27:25, etc. #
# total_objects = the amount off object you want to track #
# object1 = HA entity_ID without the platform part. (for switch.light1 use light1) #
# object2 = ... #
# object3 = untill you reached youre total_object amount #
# #
# note that you need to set a new sections in the cfg for each type of object you like #
# to track. if you want to track 1 switch and 1 sensor you need to make to sections. #
# #
# ObjectTracker depends on general_app_functions.py set as app and set in the cfg as #
# [generalvars] #
# #
# Rene Tode ( hass@reot.org ) #
# version 2.0 #
# 2016/09/04 Germany #
# #
###########################################################################################
import appdaemon.plugins.hass.hassapi as hass
import datetime
class objectcontrole(hass.Hass):
def initialize(self):
self.listen_state(self.object_controle, self.args["object_type"])
if self.args["time_gone_by"] == "true" or self.args["time_gone_by"] == "True":
time = datetime.time(0, 0, 0)
self.run_minutely(self.object_controle_minutely, time)
def object_controle(self, entity, attribute, old, new, kwargs):
fnc = self.get_app("generalvars")
device, entity_name = self.split_entity(entity)
for counter in range(1,int(self.args["total_objects"])+1):
device, entity_name = self.split_entity(entity)
object_name=self.args["object" + str(counter)]
if entity_name == object_name:
fnc.update_object_time(object_name, self.friendly_name(entity), self.args["dir_name"], self.args["time_gone_by"], self.args["time_format"], self.args["object_type"])
fnc.save_last_update_time(self.args["dir_name"], object_name)
def object_controle_minutely(self, kwargs):
fnc = self.get_app("generalvars")
for counter in range(1,int(self.args["total_objects"])+1):
object_name=self.args["object" + str(counter)]
fnc.update_object_time(object_name, self.friendly_name(self.args["object_type"] + "." + object_name), self.args["dir_name"], self.args["time_gone_by"], self.args["time_format"], self.args["object_type"])
| 1,228 | 11 | 103 |
0055b398181444b5c296d52fa505b1604e04fe4d | 1,663 | py | Python | getmedia/face_mesh.py | yuanzhoulvpi2017/tiny_python | 0247ef6ce76eeb8219a6fd2a4b2c092dc62f89f7 | [
"MIT"
] | 34 | 2021-09-17T09:32:34.000Z | 2022-03-28T07:26:02.000Z | getmedia/face_mesh.py | luji1515/tiny_python | 5ff7921bd241bdd8dca695712f6e0666cc35527f | [
"MIT"
] | null | null | null | getmedia/face_mesh.py | luji1515/tiny_python | 5ff7921bd241bdd8dca695712f6e0666cc35527f | [
"MIT"
] | 27 | 2021-09-17T14:07:28.000Z | 2022-03-15T02:03:37.000Z | import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
# mp_drawing_styles = mp.solutions.drawing_styles
mp_face_mesh = mp.solutions.face_mesh
# for webcam input:
if __name__ == '__main__':
main()
| 32.607843 | 113 | 0.58629 | import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
# mp_drawing_styles = mp.solutions.drawing_styles
mp_face_mesh = mp.solutions.face_mesh
# for webcam input:
def main():
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
cap = cv2.VideoCapture(0)
with mp_face_mesh.FaceMesh(min_tracking_confidence=0.5, min_detection_confidence=0.5) as face_mesh:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty frames.")
continue
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
image.flags.writeable = False
results = face_mesh.process(image)
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
if face_landmarks is not None:
# print(face_landmarks.landmark)
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACEMESH_CONTOURS,
# landmark_drawing_spec=None,
# connection_drawing_spec=mp_drawing_styles.get_default_face_mesh_tesselation_style()
)
cv2.imshow(winname="face Mesh", mat=image)
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
if __name__ == '__main__':
main()
| 1,415 | 0 | 23 |
1df567ae3270fbc9ec71f1694fa6fa8978628cd5 | 2,510 | py | Python | src/CacheSim/CacheRow.py | andrewoconnell89/CS472_Project2 | 8846b1ad6ecdbe88406d994d9e396b4c6f2bf552 | [
"MIT"
] | null | null | null | src/CacheSim/CacheRow.py | andrewoconnell89/CS472_Project2 | 8846b1ad6ecdbe88406d994d9e396b4c6f2bf552 | [
"MIT"
] | null | null | null | src/CacheSim/CacheRow.py | andrewoconnell89/CS472_Project2 | 8846b1ad6ecdbe88406d994d9e396b4c6f2bf552 | [
"MIT"
] | null | null | null | # Designed by Andy OConnell <aoconnel@bu.edu> <andrewoconnell89@gmail.com>
class CacheRow(object):
"""This represents a row of cache.
Instance Variables
Slot : middle 4 bits
Valid : If the row is real data
Tag : left 4 Bits
Data : 16 Bytes of information from memory
Dirty : is true if any data has been changed
"""
| 34.861111 | 74 | 0.323904 | # Designed by Andy OConnell <aoconnel@bu.edu> <andrewoconnell89@gmail.com>
class CacheRow(object):
"""This represents a row of cache.
Instance Variables
Slot : middle 4 bits
Valid : If the row is real data
Tag : left 4 Bits
Data : 16 Bytes of information from memory
Dirty : is true if any data has been changed
"""
def __init__(self, **kwargs):
self.slot = 99
self.valid = 0
self.tag = 0
self.data = [0]*16
self.dirty = False
#Fill in from kwargs
if kwargs.get('slot') is not None:
self.slot = kwargs.get('slot')
if kwargs.get('valid') is not None:
self.valid = kwargs.get('valid')
if kwargs.get('tag') is not None:
self.tag = kwargs.get('tag')
if kwargs.get('dirty') is not None:
self.dirty = kwargs.get('dirty')
def __str__(self):
result = '{0:>6x}'+\
'{1:>6}'+\
'{2:>6x}'+\
'{3:>6}'+\
'{4:>10x}'+\
'{5:>3x}'+\
'{6:>3x}'+\
'{7:>3x}'+\
'{8:>3x}'+\
'{9:>3x}'+\
'{10:>3x}'+\
'{11:>3x}'+\
'{12:>3x}'+\
'{13:>3x}'+\
'{14:>3x}'+\
'{15:>3x}'+\
'{16:>3x}'+\
'{17:>3x}'+\
'{18:>3x}'+\
'{19:>3x}'
result = result.format( self.slot,
self.valid,
self.tag,
self.dirty,
self.data[0],
self.data[1],
self.data[2],
self.data[3],
self.data[4],
self.data[5],
self.data[6],
self.data[7],
self.data[8],
self.data[9],
self.data[10],
self.data[11],
self.data[12],
self.data[13],
self.data[14],
self.data[15],)
return result
| 2,088 | 0 | 53 |
c7cc5935f3c8fcff3c7f90b35a1a3b5f5c1c9ae1 | 3,857 | py | Python | helpdesk/forms.py | bobbybabu007/django-simple-helpdesk | d507682e82808ba182736d33e26824dcad8d801f | [
"BSD-2-Clause"
] | 14 | 2016-10-11T21:29:32.000Z | 2021-09-21T13:51:16.000Z | helpdesk/forms.py | bobbybabu007/django-simple-helpdesk | d507682e82808ba182736d33e26824dcad8d801f | [
"BSD-2-Clause"
] | 7 | 2015-11-14T19:15:47.000Z | 2021-06-24T18:35:42.000Z | helpdesk/forms.py | bobbybabu007/django-simple-helpdesk | d507682e82808ba182736d33e26824dcad8d801f | [
"BSD-2-Clause"
] | 17 | 2015-06-01T17:35:58.000Z | 2021-09-11T22:01:19.000Z | from ckeditor.fields import RichTextFormField
from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models import Count
from django.forms import ModelChoiceField
from django.utils.module_loading import import_string
from helpdesk.models import State, Comment, Ticket, Project
from helpdesk.utils import DefaultProfile
| 36.046729 | 110 | 0.667358 | from ckeditor.fields import RichTextFormField
from django import forms
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models import Count
from django.forms import ModelChoiceField
from django.utils.module_loading import import_string
from helpdesk.models import State, Comment, Ticket, Project
from helpdesk.utils import DefaultProfile
def get_default_profile(user):
try:
return import_string(settings.HELPDESK_DEFAULT_PROFILE)(user)
except AttributeError:
return DefaultProfile(user)
class ProfileChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
label = obj.helpdeskprofile.label if hasattr(obj, 'helpdeskprofile') else None
if not label:
label = get_default_profile(obj).label
return '%s (%s)' % (obj.get_full_name(), label) if label else obj.get_full_name()
def __init__(self, *args, **kwargs):
queryset = User.objects.filter(is_active=True, groups__name='Helpdesk support').order_by('first_name')
super(ProfileChoiceField, self).__init__(queryset, *args, **kwargs)
class CommentForm(forms.ModelForm):
state = forms.ModelChoiceField(State.objects.all(), widget=forms.RadioSelect, initial='resolved')
body = RichTextFormField()
def __init__(self, *args, **kwargs):
super(CommentForm, self).__init__(*args, **kwargs)
self.fields['body'].widget.attrs['placeholder'] = 'Enter your answer here'
self.fields['body'].label = 'Answer body'
class Meta:
model = Comment
fields = ('body', 'state', 'internal')
class TicketForm(forms.ModelForm):
assignee = ProfileChoiceField()
def __init__(self, *args, **kwargs):
super(TicketForm, self).__init__(*args, **kwargs)
self.fields['project'].empty_label = '- None -'
for fieldname in self.Meta.fields:
self.fields[fieldname].show_hidden_initial = True
class Meta:
model = Ticket
fields = ['assignee', 'priority', 'project', 'state']
class FilterForm(forms.Form):
ASSIGNEES = (
('me', 'Me'),
('all', 'All')
)
MODES = (
('normal', 'Normal'),
('compact', 'Compact')
)
mode = forms.ChoiceField(choices=MODES)
assignee = forms.ChoiceField(choices=ASSIGNEES)
state = forms.ModelChoiceField(State.objects.all(), required=False, empty_label='All')
project = forms.ModelChoiceField(Project.objects.all(), required=False, empty_label='All')
def _get_user_label(self, user):
label = user.helpdeskprofile.label if hasattr(user, 'helpdeskprofile') else None
return '{} ({})'.format(user.first_name, label) if label else user.first_name
def __init__(self, *args, **kwargs):
email_filter = kwargs.pop('email_filter', False)
view_assignees = kwargs.pop('view_assignees', False)
super(FilterForm, self).__init__(*args, **kwargs)
if email_filter:
self.fields['email'] = forms.EmailField(required=False)
if view_assignees:
choices = User.objects.filter(ticket__isnull=False, ticket__state='open').annotate(
tickets=Count('ticket')).order_by('-tickets')
assignees = self.ASSIGNEES + tuple(
(u.pk, '{} - {}'.format(self._get_user_label(u), u.tickets)) for u in choices
)
self.fields['assignee'].choices = assignees
class TicketCreateForm(forms.ModelForm):
comment = RichTextFormField()
assignee = ProfileChoiceField()
class Meta:
model = Ticket
fields = ['title', 'assignee', 'priority', 'project', 'state', 'customer', 'comment']
class SearchForm(forms.Form):
search = forms.CharField(required=False, widget=forms.TextInput(attrs={
'placeholder': 'Search by email, title or body',
}))
| 1,926 | 1,328 | 214 |
50f97070acc1569e51c3cee3d2a264838a2d00e1 | 61,329 | py | Python | sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/operations/workspaces_operations.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/operations/workspaces_operations.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | sdk/loganalytics/azure-mgmt-loganalytics/azure/mgmt/loganalytics/operations/workspaces_operations.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class WorkspacesOperations(object):
"""WorkspacesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def list_link_targets(
self, custom_headers=None, raw=False, **operation_config):
"""Get a list of workspaces which the current user has administrator
privileges and are not associated with an Azure Subscription. The
subscriptionId parameter in the Url is ignored.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.mgmt.loganalytics.models.LinkTarget] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-03-20"
# Construct URL
url = self.list_link_targets.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[LinkTarget]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_link_targets.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.OperationalInsights/linkTargets'}
def get_schema(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets the schema for a given workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Log Analytics workspace name
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SearchGetSchemaResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.loganalytics.models.SearchGetSchemaResponse or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-03-20"
# Construct URL
url = self.get_schema.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SearchGetSchemaResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_schema.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/schema'}
def get_search_results(
self, resource_group_name, workspace_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Submit a search for a given workspace. The response will contain an id
to track the search. User can use the id to poll the search status and
get the full search result later if the search takes long time to
finish. .
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Log Analytics workspace name
:type workspace_name: str
:param parameters: The parameters required to execute a search query.
:type parameters: ~azure.mgmt.loganalytics.models.SearchParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns SearchResultsResponse
or ClientRawResponse<SearchResultsResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.loganalytics.models.SearchResultsResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.loganalytics.models.SearchResultsResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_search_results_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_search_results.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/search'}
def update_search_results(
self, resource_group_name, workspace_name, id, custom_headers=None, raw=False, **operation_config):
"""Gets updated search results for a given search query.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Log Analytics workspace name
:type workspace_name: str
:param id: The id of the search that will have results updated. You
can get the id from the response of the GetResults call.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SearchResultsResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.loganalytics.models.SearchResultsResponse or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-03-20"
# Construct URL
url = self.update_search_results.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'id': self._serialize.url("id", id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SearchResultsResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update_search_results.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/search/{id}'}
def purge(
self, resource_group_name, workspace_name, table, filters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Purges data in an Log Analytics workspace by a set of user-defined
filters.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Log Analytics workspace name
:type workspace_name: str
:param table: Table from which to purge data.
:type table: str
:param filters: The set of columns and filters (queries) to run over
them to purge the resulting data.
:type filters:
list[~azure.mgmt.loganalytics.models.WorkspacePurgeBodyFilters]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns object or
ClientRawResponse<object> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[object] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[object]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._purge_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table=table,
filters=filters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
purge.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/purge'}
def disable_intelligence_pack(
self, resource_group_name, workspace_name, intelligence_pack_name, custom_headers=None, raw=False, **operation_config):
"""Disables an intelligence pack for a given workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param intelligence_pack_name: The name of the intelligence pack to be
disabled.
:type intelligence_pack_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = self.disable_intelligence_pack.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'intelligencePackName': self._serialize.url("intelligence_pack_name", intelligence_pack_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
disable_intelligence_pack.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/intelligencePacks/{intelligencePackName}/Disable'}
def enable_intelligence_pack(
self, resource_group_name, workspace_name, intelligence_pack_name, custom_headers=None, raw=False, **operation_config):
"""Enables an intelligence pack for a given workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param intelligence_pack_name: The name of the intelligence pack to be
enabled.
:type intelligence_pack_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = self.enable_intelligence_pack.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'intelligencePackName': self._serialize.url("intelligence_pack_name", intelligence_pack_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
enable_intelligence_pack.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/intelligencePacks/{intelligencePackName}/Enable'}
def list_intelligence_packs(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the intelligence packs possible and whether they are enabled
or disabled for a given workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.mgmt.loganalytics.models.IntelligencePack] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = self.list_intelligence_packs.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[IntelligencePack]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_intelligence_packs.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/intelligencePacks'}
def get_shared_keys(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets the shared keys for a workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SharedKeys or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.loganalytics.models.SharedKeys or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = self.get_shared_keys.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SharedKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_shared_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/sharedKeys'}
def list_usages(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of usage metrics for a workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of UsageMetric
:rtype:
~azure.mgmt.loganalytics.models.UsageMetricPaged[~azure.mgmt.loganalytics.models.UsageMetric]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Deserialize response
deserialized = models.UsageMetricPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UsageMetricPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_usages.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/usages'}
def list_management_groups(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of management groups connected to a workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ManagementGroup
:rtype:
~azure.mgmt.loganalytics.models.ManagementGroupPaged[~azure.mgmt.loganalytics.models.ManagementGroup]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Deserialize response
deserialized = models.ManagementGroupPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ManagementGroupPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_management_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/managementGroups'}
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets workspaces in a resource group.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Workspace
:rtype:
~azure.mgmt.loganalytics.models.WorkspacePaged[~azure.mgmt.loganalytics.models.Workspace]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Deserialize response
deserialized = models.WorkspacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkspacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces'}
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets the workspaces in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Workspace
:rtype:
~azure.mgmt.loganalytics.models.WorkspacePaged[~azure.mgmt.loganalytics.models.Workspace]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Deserialize response
deserialized = models.WorkspacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkspacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.OperationalInsights/workspaces'}
def create_or_update(
self, resource_group_name, workspace_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Create or update a workspace.
:param resource_group_name: The resource group name of the workspace.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param parameters: The parameters required to create or update a
workspace.
:type parameters: ~azure.mgmt.loganalytics.models.Workspace
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Workspace or
ClientRawResponse<Workspace> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.loganalytics.models.Workspace]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.loganalytics.models.Workspace]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}'}
def delete(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a workspace instance.
:param resource_group_name: The resource group name of the workspace.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}'}
def get(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets a workspace instance.
:param resource_group_name: The resource group name of the workspace.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Workspace or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.loganalytics.models.Workspace or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Workspace', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}'}
def update(
self, resource_group_name, workspace_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Updates a workspace.
:param resource_group_name: The resource group name of the workspace.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param parameters: The parameters required to patch a workspace.
:type parameters: ~azure.mgmt.loganalytics.models.Workspace
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Workspace or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.loganalytics.models.Workspace or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Workspace')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Workspace', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}'}
| 47.764019 | 234 | 0.669634 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class WorkspacesOperations(object):
"""WorkspacesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def list_link_targets(
self, custom_headers=None, raw=False, **operation_config):
"""Get a list of workspaces which the current user has administrator
privileges and are not associated with an Azure Subscription. The
subscriptionId parameter in the Url is ignored.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.mgmt.loganalytics.models.LinkTarget] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-03-20"
# Construct URL
url = self.list_link_targets.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[LinkTarget]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_link_targets.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.OperationalInsights/linkTargets'}
def get_schema(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets the schema for a given workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Log Analytics workspace name
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SearchGetSchemaResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.loganalytics.models.SearchGetSchemaResponse or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-03-20"
# Construct URL
url = self.get_schema.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SearchGetSchemaResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_schema.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/schema'}
def _get_search_results_initial(
self, resource_group_name, workspace_name, parameters, custom_headers=None, raw=False, **operation_config):
api_version = "2015-03-20"
# Construct URL
url = self.get_search_results.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'SearchParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SearchResultsResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_search_results(
self, resource_group_name, workspace_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Submit a search for a given workspace. The response will contain an id
to track the search. User can use the id to poll the search status and
get the full search result later if the search takes long time to
finish. .
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Log Analytics workspace name
:type workspace_name: str
:param parameters: The parameters required to execute a search query.
:type parameters: ~azure.mgmt.loganalytics.models.SearchParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns SearchResultsResponse
or ClientRawResponse<SearchResultsResponse> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.loganalytics.models.SearchResultsResponse]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.loganalytics.models.SearchResultsResponse]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._get_search_results_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('SearchResultsResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_search_results.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/search'}
def update_search_results(
self, resource_group_name, workspace_name, id, custom_headers=None, raw=False, **operation_config):
"""Gets updated search results for a given search query.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Log Analytics workspace name
:type workspace_name: str
:param id: The id of the search that will have results updated. You
can get the id from the response of the GetResults call.
:type id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SearchResultsResponse or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.loganalytics.models.SearchResultsResponse or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-03-20"
# Construct URL
url = self.update_search_results.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'id': self._serialize.url("id", id, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SearchResultsResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update_search_results.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/search/{id}'}
def _purge_initial(
self, resource_group_name, workspace_name, table, filters, custom_headers=None, raw=False, **operation_config):
body = models.WorkspacePurgeBody(table=table, filters=filters)
api_version = "2015-03-20"
# Construct URL
url = self.purge.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(body, 'WorkspacePurgeBody')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('WorkspacePurgeStatusResponse', response)
if response.status_code == 202:
deserialized = self._deserialize('WorkspacePurgeResponse', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def purge(
self, resource_group_name, workspace_name, table, filters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Purges data in an Log Analytics workspace by a set of user-defined
filters.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Log Analytics workspace name
:type workspace_name: str
:param table: Table from which to purge data.
:type table: str
:param filters: The set of columns and filters (queries) to run over
them to purge the resulting data.
:type filters:
list[~azure.mgmt.loganalytics.models.WorkspacePurgeBodyFilters]
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns object or
ClientRawResponse<object> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[object] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[object]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._purge_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
table=table,
filters=filters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('object', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
purge.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/purge'}
def disable_intelligence_pack(
self, resource_group_name, workspace_name, intelligence_pack_name, custom_headers=None, raw=False, **operation_config):
"""Disables an intelligence pack for a given workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param intelligence_pack_name: The name of the intelligence pack to be
disabled.
:type intelligence_pack_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = self.disable_intelligence_pack.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'intelligencePackName': self._serialize.url("intelligence_pack_name", intelligence_pack_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
disable_intelligence_pack.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/intelligencePacks/{intelligencePackName}/Disable'}
def enable_intelligence_pack(
self, resource_group_name, workspace_name, intelligence_pack_name, custom_headers=None, raw=False, **operation_config):
"""Enables an intelligence pack for a given workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param intelligence_pack_name: The name of the intelligence pack to be
enabled.
:type intelligence_pack_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = self.enable_intelligence_pack.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'intelligencePackName': self._serialize.url("intelligence_pack_name", intelligence_pack_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
enable_intelligence_pack.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/intelligencePacks/{intelligencePackName}/Enable'}
def list_intelligence_packs(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Lists all the intelligence packs possible and whether they are enabled
or disabled for a given workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: list or ClientRawResponse if raw=true
:rtype: list[~azure.mgmt.loganalytics.models.IntelligencePack] or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = self.list_intelligence_packs.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('[IntelligencePack]', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
list_intelligence_packs.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/intelligencePacks'}
def get_shared_keys(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets the shared keys for a workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: SharedKeys or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.loganalytics.models.SharedKeys or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = self.get_shared_keys.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SharedKeys', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_shared_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/sharedKeys'}
def list_usages(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of usage metrics for a workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of UsageMetric
:rtype:
~azure.mgmt.loganalytics.models.UsageMetricPaged[~azure.mgmt.loganalytics.models.UsageMetric]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_usages.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.UsageMetricPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.UsageMetricPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_usages.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/usages'}
def list_management_groups(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets a list of management groups connected to a workspace.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of ManagementGroup
:rtype:
~azure.mgmt.loganalytics.models.ManagementGroupPaged[~azure.mgmt.loganalytics.models.ManagementGroup]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_management_groups.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.ManagementGroupPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.ManagementGroupPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_management_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/managementGroups'}
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets workspaces in a resource group.
:param resource_group_name: The name of the resource group to get. The
name is case insensitive.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Workspace
:rtype:
~azure.mgmt.loganalytics.models.WorkspacePaged[~azure.mgmt.loganalytics.models.Workspace]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.WorkspacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkspacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces'}
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Gets the workspaces in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of Workspace
:rtype:
~azure.mgmt.loganalytics.models.WorkspacePaged[~azure.mgmt.loganalytics.models.Workspace]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.WorkspacePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.WorkspacePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.OperationalInsights/workspaces'}
def _create_or_update_initial(
self, resource_group_name, workspace_name, parameters, custom_headers=None, raw=False, **operation_config):
api_version = "2015-11-01-preview"
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Workspace')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Workspace', response)
if response.status_code == 201:
deserialized = self._deserialize('Workspace', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, workspace_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Create or update a workspace.
:param resource_group_name: The resource group name of the workspace.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param parameters: The parameters required to create or update a
workspace.
:type parameters: ~azure.mgmt.loganalytics.models.Workspace
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns Workspace or
ClientRawResponse<Workspace> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.loganalytics.models.Workspace]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.loganalytics.models.Workspace]]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('Workspace', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}'}
def delete(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a workspace instance.
:param resource_group_name: The resource group name of the workspace.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}'}
def get(
self, resource_group_name, workspace_name, custom_headers=None, raw=False, **operation_config):
"""Gets a workspace instance.
:param resource_group_name: The resource group name of the workspace.
:type resource_group_name: str
:param workspace_name: Name of the Log Analytics Workspace.
:type workspace_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Workspace or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.loganalytics.models.Workspace or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Workspace', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}'}
def update(
self, resource_group_name, workspace_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Updates a workspace.
:param resource_group_name: The resource group name of the workspace.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param parameters: The parameters required to patch a workspace.
:type parameters: ~azure.mgmt.loganalytics.models.Workspace
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: Workspace or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.loganalytics.models.Workspace or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
api_version = "2015-11-01-preview"
# Construct URL
url = self.update.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=63, min_length=4, pattern=r'^[A-Za-z0-9][A-Za-z0-9-]+[A-Za-z0-9]$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'Workspace')
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Workspace', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}'}
| 15,513 | 0 | 325 |
89803101cd22e318a3f2888d7a654ab6d1db616b | 237 | py | Python | utils/file_related.py | ishine/ppg-vc | b59cb9862cf4b82a3bdb589950e25cab85fc9b03 | [
"Apache-2.0"
] | 133 | 2021-06-12T04:44:36.000Z | 2022-03-31T09:42:26.000Z | utils/file_related.py | zhouyh-jlu/ppg-vc | b59cb9862cf4b82a3bdb589950e25cab85fc9b03 | [
"Apache-2.0"
] | 20 | 2021-06-14T20:15:49.000Z | 2022-02-18T09:05:00.000Z | utils/file_related.py | zhouyh-jlu/ppg-vc | b59cb9862cf4b82a3bdb589950e25cab85fc9b03 | [
"Apache-2.0"
] | 41 | 2021-06-12T05:43:50.000Z | 2022-03-27T07:56:28.000Z | import numpy as np
import torch
| 21.545455 | 70 | 0.704641 | import numpy as np
import torch
def load_filepaths_and_text(filename, split="|"):
with open(filename, encoding='utf-8') as f:
filepaths_and_text = [line.strip().split(split) for line in f]
return filepaths_and_text
| 177 | 0 | 24 |
3e2e8941c27d3a24c5b1554248525566a0a63029 | 765 | py | Python | softlearning/scripts/console_scripts.py | hanglai/bmpo | d065cf195b942c3e534d4c789a0982e0ec09957c | [
"MIT"
] | 19 | 2020-09-02T05:58:09.000Z | 2021-08-23T11:03:00.000Z | softlearning/scripts/console_scripts.py | hanglai/bmpo | d065cf195b942c3e534d4c789a0982e0ec09957c | [
"MIT"
] | 4 | 2020-07-19T13:57:56.000Z | 2021-11-10T19:42:58.000Z | softlearning/scripts/console_scripts.py | apexrl/bmpo | d065cf195b942c3e534d4c789a0982e0ec09957c | [
"MIT"
] | 3 | 2020-09-28T11:21:24.000Z | 2020-12-28T23:27:25.000Z | """A command line interface that exposes softlearning examples to user.
run_example_* methods, which run the experiments by invoking
`tune.run_experiments` function.
"""
import click
import sys
sys.path.append('./')
from examples.instrument import run_example_local
@click.group()
@cli.command(
name='run_local',
context_settings={'ignore_unknown_options': True})
@click.argument("example_module_name", required=True, type=str)
@click.argument('example_argv', nargs=-1, type=click.UNPROCESSED)
cli.add_command(run_example_local_cmd)
if __name__ == "__main__":
main()
| 20.131579 | 71 | 0.754248 | """A command line interface that exposes softlearning examples to user.
run_example_* methods, which run the experiments by invoking
`tune.run_experiments` function.
"""
import click
import sys
sys.path.append('./')
from examples.instrument import run_example_local
@click.group()
def cli():
pass
@cli.command(
name='run_local',
context_settings={'ignore_unknown_options': True})
@click.argument("example_module_name", required=True, type=str)
@click.argument('example_argv', nargs=-1, type=click.UNPROCESSED)
def run_example_local_cmd(example_module_name, example_argv):
run_example_local(example_module_name, example_argv)
cli.add_command(run_example_local_cmd)
def main():
return cli()
if __name__ == "__main__":
main()
| 102 | 0 | 67 |
9687324a6d1d4c0aad3ca95660de57e524efe185 | 510 | py | Python | students/migrations/0004_auto_20210801_0618.py | yamako-tech/yamako-tech-web50-projects-2020-x-capstone | 5ff4faaa2cf1b3489c554232044dfc532213a0a7 | [
"MIT"
] | null | null | null | students/migrations/0004_auto_20210801_0618.py | yamako-tech/yamako-tech-web50-projects-2020-x-capstone | 5ff4faaa2cf1b3489c554232044dfc532213a0a7 | [
"MIT"
] | null | null | null | students/migrations/0004_auto_20210801_0618.py | yamako-tech/yamako-tech-web50-projects-2020-x-capstone | 5ff4faaa2cf1b3489c554232044dfc532213a0a7 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-31 21:18
from django.db import migrations, models
| 22.173913 | 50 | 0.568627 | # Generated by Django 3.2.5 on 2021-07-31 21:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0003_auto_20210731_2009'),
]
operations = [
migrations.AlterModelOptions(
name='textbook',
options={'ordering': ['-page']},
),
migrations.AlterField(
model_name='textbook',
name='page',
field=models.IntegerField(blank=True),
),
]
| 0 | 396 | 23 |
db3c24c12d152e2453312bcbf65b95284e2451dd | 9,495 | py | Python | tools/living_resources.py | osvenskan/data_rescue_D62CD1E5 | ebcadde2f0046bcfbd3f76166655c56d254e13fc | [
"BSD-3-Clause"
] | null | null | null | tools/living_resources.py | osvenskan/data_rescue_D62CD1E5 | ebcadde2f0046bcfbd3f76166655c56d254e13fc | [
"BSD-3-Clause"
] | null | null | null | tools/living_resources.py | osvenskan/data_rescue_D62CD1E5 | ebcadde2f0046bcfbd3f76166655c56d254e13fc | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Python imports
import collections
# Project imports
import util
import util_date
NAMESPACE = 'LivingResources'
DATE_RANGE = util_date.DATES[NAMESPACE]
"""
LivingResources data has 3 top-level categories (programs) -- Tidal Plankton Data, Tidal Benthic
Data, and Nontidal Benthic Data.
"""
# https://data.chesapeakebay.net/api.json/LivingResources/Programs
programs = util.download_and_jsonify(NAMESPACE, 'Programs')
program_ids = sorted([d['ProgramId'] for d in programs])
# [{
# "ProgramId": "TidalPlankton",
# "ProgramIdentifier": "TPD",
# "ProgramName": "Tidal Plankton Data"
# }, {
# "ProgramId": "TidalBenthic",
# "ProgramIdentifier": "TBD",
# "ProgramName": "Tidal Benthic Data"
# }, {
# "ProgramId": "NontidalBenthic",
# "ProgramIdentifier": "NBD",
# "ProgramName": "Nontidal Benthic Data"
# }]
# Each of the 3 programs requires some study to understand and download properly. I doubt
# they'll add/delete/change programs, but if they do, this code might break. That being the case, I
# check here to ensure that the programs are what I expect them to be, and fail loudly if not.
expected_program_ids = sorted(('TidalPlankton', 'TidalBenthic', 'NontidalBenthic'))
if program_ids != expected_program_ids:
print('Received programs: ' + ', '.join(program_ids))
print('Expected programs: ' + ', '.join(expected_program_ids))
raise ValueError('Unexpected or missing program(s)')
# Each of the 3 programs has a number of different data types. They all behave the same, except
# for /TidalPlankton/Station which requires special handling. I handle it here.
# https://data.chesapeakebay.net/api.json/LivingResources/TidalPlankton/GeographicalAttributes
geographical_types = util.download_and_jsonify(NAMESPACE, 'TidalPlankton', 'GeographicalAttributes')
# The last two elements of the DataTypes URL seem backwards to me but that's what they are.
# https://data.chesapeakebay.net/api.json/LivingResources/DataTypes/TidalPlankton
data_types = util.download_and_jsonify(NAMESPACE, 'DataTypes', 'TidalPlankton')
data_type_ids = [d['DataTypeId'] for d in data_types]
# For TidalPlankton, geographical_types is a dicts of lists keyed by data type. Simplify.
# ref: LivingResources.js (line 67)
temp_geographical_types = collections.defaultdict(list)
for data_type_id in data_type_ids:
temp_geographical_types[data_type_id] = [d['GeoTypeId'] for
d in geographical_types[data_type_id]]
geographical_types = temp_geographical_types
# geographical_types is now a dict like this --
# {'MonitorEvent': ['HUC8', 'HUC12', 'FIPS', 'CBSeg2003', 'Station'],
# 'Reported': ['HUC8', 'HUC12', 'FIPS', 'CBSeg2003', 'Station'],
# 'Station': ['HUC8', 'HUC12', 'FIPS', 'CBSeg2003', 'SegmentShed2009', 'Station']}
# First handle program == TidalPlankton, geo type == Station
for geographical_type_id in geographical_types['Station']:
attributes = util.download_and_jsonify(NAMESPACE, 'TidalPlankton', 'Station',
geographical_type_id)
attribute_ids = util.extract_attribute_ids(geographical_type_id, attributes)
attribute_ids = map(str, attribute_ids)
if geographical_type_id == 'FIPS':
# FIPS works differently than other geo types (why?) and requires POSTing data rather
# than simple GETs.
for attribute_id in attribute_ids:
# e.g. https://data.chesapeakebay.net/api.JSON/LivingResources/TidalPlankton/Station/FIPS
util.post_single_attribute_id(attribute_id, NAMESPACE, 'TidalPlankton', 'Station',
geographical_type_id)
else:
# e.g. https://data.chesapeakebay.net/api.json/LivingResources/TidalPlankton/Station/HUC8/
for attribute_id in attribute_ids:
# e.g. https://data.chesapeakebay.net/api.JSON/LivingResources/TidalPlankton/Station/HUC8/20
util.download(NAMESPACE, 'TidalPlankton', 'Station', geographical_type_id, attribute_id)
# Remove Station since it was handled above
data_type_ids = [data_type_id for data_type_id in data_type_ids if data_type_id != 'Station']
# https://data.chesapeakebay.net/api.json/LivingResources/Projects/TidalPlankton
projects = util.download_and_jsonify(NAMESPACE, 'Projects', 'TidalPlankton')
project_ids = [str(d['ProjectId']) for d in projects]
# At this point, data_type_ids should be just ('MonitorEvent', 'Reported')
for data_type_id in data_type_ids:
for project_id in project_ids:
for geographical_type_id in geographical_types[data_type_id]:
# e.g. https://data.chesapeakebay.net/api.JSON/LivingResources/TidalPlankton/MonitorEvent/1-16-1984/4-4-2017/9/HUC12
attributes = util.download_and_jsonify(NAMESPACE,
'TidalPlankton',
data_type_id,
DATE_RANGE.start.url_format,
DATE_RANGE.end.url_format,
project_id,
geographical_type_id)
attribute_ids = util.extract_attribute_ids(geographical_type_id, attributes)
attribute_ids = map(str, attribute_ids)
for attribute_id in attribute_ids:
# e.g. https://data.chesapeakebay.net/api.JSON/LivingResources/TidalPlankton/MonitorEvent/1-16-1984/4-4-2017/9/HUC12/767
util.download(NAMESPACE,
'TidalPlankton',
data_type_id,
DATE_RANGE.start.url_format,
DATE_RANGE.end.url_format,
project_id,
geographical_type_id,
attribute_id)
# OK, that takes care of all 3 TidalPlankton data types. Remove TidalPlankton since it's been
# handled.
program_ids = [program_id for program_id in program_ids if program_id != 'TidalPlankton']
# And now for the remaining programs. At this point, program_ids should be just this --
# ('NontidalBenthic', 'TidalBenthic')
for program_id in program_ids:
print('Starting program {}...'.format(program_id))
# e.g. https://data.chesapeakebay.net/api.json/LivingResources/DataTypes/TidalBenthic
data_types = util.download_and_jsonify(NAMESPACE, 'DataTypes', program_id)
data_type_ids = [d['DataTypeId'] for d in data_types]
# e.g. https://data.chesapeakebay.net/api.json/LivingResources/NontidalBenthic/GeographicalAttributes
geographical_types = util.download_and_jsonify(NAMESPACE, program_id, 'GeographicalAttributes')
geographical_type_ids = [d['GeoTypeId'] for d in geographical_types]
for data_type_id in data_type_ids:
print('Starting program {}, data type {}...'.format(program_id, data_type_id))
# e.g. https://data.chesapeakebay.net/api.json/LivingResources/Projects/TidalBenthic
projects = util.download_and_jsonify(NAMESPACE, 'Projects', program_id)
project_ids = [str(d['ProjectId']) for d in projects]
for project_id in project_ids:
params = (program_id, data_type_id, project_id)
print('Starting program {}, data type {}, project {}...'.format(*params))
for geographical_type_id in geographical_type_ids:
params = (program_id, data_type_id, project_id, geographical_type_id)
print('Starting program {}, data type {}, project {}, geo type {}...'.format(*params))
# geographical_type_id is something like HUC8, FIPS, etc.
# https://data.chesapeakebay.net/api.JSON/LivingResources/TidalPlankton/MonitorEvent/4-4-2012/4-4-2017/17/HUC12
attributes = util.download_and_jsonify(NAMESPACE,
program_id,
data_type_id,
DATE_RANGE.start.url_format,
DATE_RANGE.end.url_format,
project_id,
geographical_type_id)
attribute_ids = util.extract_attribute_ids(geographical_type_id, attributes)
attribute_ids = map(str, attribute_ids)
for attribute_id in attribute_ids:
if (NAMESPACE == 'LivingResources') and (program_id == 'TidalBenthic') and \
(data_type_id == 'WaterQuality') and (geographical_type_id == 'Station'):
# This is broken on the server side, skip for now.
pass
else:
# e.g. https://data.chesapeakebay.net/api.JSON/LivingResources/TidalPlankton/MonitorEvent/4-4-2012/4-4-2017/17/HUC12/781
util.download(NAMESPACE,
program_id,
data_type_id,
DATE_RANGE.start.url_format,
DATE_RANGE.end.url_format,
project_id,
geographical_type_id,
attribute_id)
| 54.568966 | 144 | 0.627278 | #!/usr/bin/env python
# Python imports
import collections
# Project imports
import util
import util_date
NAMESPACE = 'LivingResources'
DATE_RANGE = util_date.DATES[NAMESPACE]
"""
LivingResources data has 3 top-level categories (programs) -- Tidal Plankton Data, Tidal Benthic
Data, and Nontidal Benthic Data.
"""
# https://data.chesapeakebay.net/api.json/LivingResources/Programs
programs = util.download_and_jsonify(NAMESPACE, 'Programs')
program_ids = sorted([d['ProgramId'] for d in programs])
# [{
# "ProgramId": "TidalPlankton",
# "ProgramIdentifier": "TPD",
# "ProgramName": "Tidal Plankton Data"
# }, {
# "ProgramId": "TidalBenthic",
# "ProgramIdentifier": "TBD",
# "ProgramName": "Tidal Benthic Data"
# }, {
# "ProgramId": "NontidalBenthic",
# "ProgramIdentifier": "NBD",
# "ProgramName": "Nontidal Benthic Data"
# }]
# Each of the 3 programs requires some study to understand and download properly. I doubt
# they'll add/delete/change programs, but if they do, this code might break. That being the case, I
# check here to ensure that the programs are what I expect them to be, and fail loudly if not.
expected_program_ids = sorted(('TidalPlankton', 'TidalBenthic', 'NontidalBenthic'))
if program_ids != expected_program_ids:
print('Received programs: ' + ', '.join(program_ids))
print('Expected programs: ' + ', '.join(expected_program_ids))
raise ValueError('Unexpected or missing program(s)')
# Each of the 3 programs has a number of different data types. They all behave the same, except
# for /TidalPlankton/Station which requires special handling. I handle it here.
# https://data.chesapeakebay.net/api.json/LivingResources/TidalPlankton/GeographicalAttributes
geographical_types = util.download_and_jsonify(NAMESPACE, 'TidalPlankton', 'GeographicalAttributes')
# The last two elements of the DataTypes URL seem backwards to me but that's what they are.
# https://data.chesapeakebay.net/api.json/LivingResources/DataTypes/TidalPlankton
data_types = util.download_and_jsonify(NAMESPACE, 'DataTypes', 'TidalPlankton')
data_type_ids = [d['DataTypeId'] for d in data_types]
# For TidalPlankton, geographical_types is a dicts of lists keyed by data type. Simplify.
# ref: LivingResources.js (line 67)
temp_geographical_types = collections.defaultdict(list)
for data_type_id in data_type_ids:
temp_geographical_types[data_type_id] = [d['GeoTypeId'] for
d in geographical_types[data_type_id]]
geographical_types = temp_geographical_types
# geographical_types is now a dict like this --
# {'MonitorEvent': ['HUC8', 'HUC12', 'FIPS', 'CBSeg2003', 'Station'],
# 'Reported': ['HUC8', 'HUC12', 'FIPS', 'CBSeg2003', 'Station'],
# 'Station': ['HUC8', 'HUC12', 'FIPS', 'CBSeg2003', 'SegmentShed2009', 'Station']}
# First handle program == TidalPlankton, geo type == Station
for geographical_type_id in geographical_types['Station']:
attributes = util.download_and_jsonify(NAMESPACE, 'TidalPlankton', 'Station',
geographical_type_id)
attribute_ids = util.extract_attribute_ids(geographical_type_id, attributes)
attribute_ids = map(str, attribute_ids)
if geographical_type_id == 'FIPS':
# FIPS works differently than other geo types (why?) and requires POSTing data rather
# than simple GETs.
for attribute_id in attribute_ids:
# e.g. https://data.chesapeakebay.net/api.JSON/LivingResources/TidalPlankton/Station/FIPS
util.post_single_attribute_id(attribute_id, NAMESPACE, 'TidalPlankton', 'Station',
geographical_type_id)
else:
# e.g. https://data.chesapeakebay.net/api.json/LivingResources/TidalPlankton/Station/HUC8/
for attribute_id in attribute_ids:
# e.g. https://data.chesapeakebay.net/api.JSON/LivingResources/TidalPlankton/Station/HUC8/20
util.download(NAMESPACE, 'TidalPlankton', 'Station', geographical_type_id, attribute_id)
# Remove Station since it was handled above
data_type_ids = [data_type_id for data_type_id in data_type_ids if data_type_id != 'Station']
# https://data.chesapeakebay.net/api.json/LivingResources/Projects/TidalPlankton
projects = util.download_and_jsonify(NAMESPACE, 'Projects', 'TidalPlankton')
project_ids = [str(d['ProjectId']) for d in projects]
# At this point, data_type_ids should be just ('MonitorEvent', 'Reported')
for data_type_id in data_type_ids:
for project_id in project_ids:
for geographical_type_id in geographical_types[data_type_id]:
# e.g. https://data.chesapeakebay.net/api.JSON/LivingResources/TidalPlankton/MonitorEvent/1-16-1984/4-4-2017/9/HUC12
attributes = util.download_and_jsonify(NAMESPACE,
'TidalPlankton',
data_type_id,
DATE_RANGE.start.url_format,
DATE_RANGE.end.url_format,
project_id,
geographical_type_id)
attribute_ids = util.extract_attribute_ids(geographical_type_id, attributes)
attribute_ids = map(str, attribute_ids)
for attribute_id in attribute_ids:
# e.g. https://data.chesapeakebay.net/api.JSON/LivingResources/TidalPlankton/MonitorEvent/1-16-1984/4-4-2017/9/HUC12/767
util.download(NAMESPACE,
'TidalPlankton',
data_type_id,
DATE_RANGE.start.url_format,
DATE_RANGE.end.url_format,
project_id,
geographical_type_id,
attribute_id)
# OK, that takes care of all 3 TidalPlankton data types. Remove TidalPlankton since it's been
# handled.
program_ids = [program_id for program_id in program_ids if program_id != 'TidalPlankton']
# And now for the remaining programs. At this point, program_ids should be just this --
# ('NontidalBenthic', 'TidalBenthic')
for program_id in program_ids:
print('Starting program {}...'.format(program_id))
# e.g. https://data.chesapeakebay.net/api.json/LivingResources/DataTypes/TidalBenthic
data_types = util.download_and_jsonify(NAMESPACE, 'DataTypes', program_id)
data_type_ids = [d['DataTypeId'] for d in data_types]
# e.g. https://data.chesapeakebay.net/api.json/LivingResources/NontidalBenthic/GeographicalAttributes
geographical_types = util.download_and_jsonify(NAMESPACE, program_id, 'GeographicalAttributes')
geographical_type_ids = [d['GeoTypeId'] for d in geographical_types]
for data_type_id in data_type_ids:
print('Starting program {}, data type {}...'.format(program_id, data_type_id))
# e.g. https://data.chesapeakebay.net/api.json/LivingResources/Projects/TidalBenthic
projects = util.download_and_jsonify(NAMESPACE, 'Projects', program_id)
project_ids = [str(d['ProjectId']) for d in projects]
for project_id in project_ids:
params = (program_id, data_type_id, project_id)
print('Starting program {}, data type {}, project {}...'.format(*params))
for geographical_type_id in geographical_type_ids:
params = (program_id, data_type_id, project_id, geographical_type_id)
print('Starting program {}, data type {}, project {}, geo type {}...'.format(*params))
# geographical_type_id is something like HUC8, FIPS, etc.
# https://data.chesapeakebay.net/api.JSON/LivingResources/TidalPlankton/MonitorEvent/4-4-2012/4-4-2017/17/HUC12
attributes = util.download_and_jsonify(NAMESPACE,
program_id,
data_type_id,
DATE_RANGE.start.url_format,
DATE_RANGE.end.url_format,
project_id,
geographical_type_id)
attribute_ids = util.extract_attribute_ids(geographical_type_id, attributes)
attribute_ids = map(str, attribute_ids)
for attribute_id in attribute_ids:
if (NAMESPACE == 'LivingResources') and (program_id == 'TidalBenthic') and \
(data_type_id == 'WaterQuality') and (geographical_type_id == 'Station'):
# This is broken on the server side, skip for now.
pass
else:
# e.g. https://data.chesapeakebay.net/api.JSON/LivingResources/TidalPlankton/MonitorEvent/4-4-2012/4-4-2017/17/HUC12/781
util.download(NAMESPACE,
program_id,
data_type_id,
DATE_RANGE.start.url_format,
DATE_RANGE.end.url_format,
project_id,
geographical_type_id,
attribute_id)
| 0 | 0 | 0 |
6eabdf90cb21e1477bd6afe8491180df9c31d5b2 | 2,229 | py | Python | server.py | schollz/twittermatic | b66cf1142b77788d485c8f0efed47aa1e41150cf | [
"Unlicense",
"MIT"
] | 8 | 2015-10-09T15:37:33.000Z | 2022-01-03T10:15:21.000Z | server.py | schollz/twittermatic | b66cf1142b77788d485c8f0efed47aa1e41150cf | [
"Unlicense",
"MIT"
] | 9 | 2015-10-09T21:03:03.000Z | 2016-03-12T14:00:20.000Z | server.py | schollz/twittermatic | b66cf1142b77788d485c8f0efed47aa1e41150cf | [
"Unlicense",
"MIT"
] | 3 | 2015-10-08T19:40:49.000Z | 2019-12-17T19:16:37.000Z | import csv
import json
from flask import Flask
from flask import Response
from flask import request
from flask import render_template
from flask.ext.triangle import Triangle
import data.database_commands as database_commands
app = Flask(__name__)
Triangle(app)
@app.route("/")
@app.route("/tweets")
# @app.route("/tweets")
# def root():
# handle = request.args.get('handle')
# tweets = database_commands.get_tweet_by_handle(handle)
# data = 'handle,timestamp,data_type,data_id,status,tweet_text<br>'
# for tweet in tweets:
# data += str(tweet.twitter_handle) + ","
# data += str(tweet.tweet_time) + ","
# data += str(tweet.data_type) + ","
# data += str(tweet.data_id) + ","
# data += str(tweet.status) + ","
# data += str(tweet.tweet_text)
# data += '<br>'
# return data
@app.route("/export")
if __name__ == '__main__':
app.run(
host= '0.0.0.0',
debug=True,
port=8080
)
| 28.576923 | 71 | 0.612831 | import csv
import json
from flask import Flask
from flask import Response
from flask import request
from flask import render_template
from flask.ext.triangle import Triangle
import data.database_commands as database_commands
app = Flask(__name__)
Triangle(app)
@app.route("/")
def index():
handles = json.load(open('server.json','r'))['handles']
results = ''
for handle in handles:
results += handle
results += '<br>'
return results
@app.route("/tweets")
def tweets():
handle = request.args.get('handle')
tweets = database_commands.get_tweet_by_handle(handle)
data = []
for tweet in tweets:
arr = {
"twitter_handle": str(tweet.twitter_handle),
"tweet_time": str(tweet.tweet_time),
"data_type": str(tweet.data_type),
"data_id": str(tweet.data_id),
"status": str(tweet.status),
"tweet_text": str(tweet.tweet_text)
}
data.append(arr)
return render_template('index.html',tweets=data,title=handle)
# @app.route("/tweets")
# def root():
# handle = request.args.get('handle')
# tweets = database_commands.get_tweet_by_handle(handle)
# data = 'handle,timestamp,data_type,data_id,status,tweet_text<br>'
# for tweet in tweets:
# data += str(tweet.twitter_handle) + ","
# data += str(tweet.tweet_time) + ","
# data += str(tweet.data_type) + ","
# data += str(tweet.data_id) + ","
# data += str(tweet.status) + ","
# data += str(tweet.tweet_text)
# data += '<br>'
# return data
@app.route("/export")
def export():
handle = request.args.get('handle')
tweets = database_commands.get_tweet_by_handle(handle)
data = 'handle,timestamp,data_type,data_id,status,tweet_text\n'
for tweet in tweets:
data += str(tweet.twitter_handle)
data += str(tweet.tweet_time)
data += str(tweet.data_type)
data += str(tweet.data_id)
data += str(tweet.status)
data += str(tweet.tweet_text)
data += '\n'
return Response(data, mimetype='text/csv')
if __name__ == '__main__':
app.run(
host= '0.0.0.0',
debug=True,
port=8080
)
| 1,174 | 0 | 66 |
40d1a3102a6c8c45938443c8b532d5d246c9d3f2 | 2,347 | py | Python | src/simmate/calculators/vasp/error_handlers/rotation_matrix_nonint.py | laurenmm/simmate-1 | c06b94c46919b01cda50f78221ad14f75c100a14 | [
"BSD-3-Clause"
] | 9 | 2021-12-21T02:58:21.000Z | 2022-01-25T14:00:06.000Z | src/simmate/calculators/vasp/error_handlers/rotation_matrix_nonint.py | laurenmm/simmate-1 | c06b94c46919b01cda50f78221ad14f75c100a14 | [
"BSD-3-Clause"
] | 51 | 2022-01-01T15:59:58.000Z | 2022-03-26T21:25:42.000Z | src/simmate/calculators/vasp/error_handlers/rotation_matrix_nonint.py | laurenmm/simmate-1 | c06b94c46919b01cda50f78221ad14f75c100a14 | [
"BSD-3-Clause"
] | 7 | 2022-01-01T03:44:32.000Z | 2022-03-29T19:59:27.000Z | # -*- coding: utf-8 -*-
import os
import json
from simmate.workflow_engine import ErrorHandler
from simmate.calculators.vasp.inputs import Incar
class RotationNonIntMatrix(ErrorHandler):
"""
This a simple error handler that is active when VASP finds an issue with the
rotation matrix.
"""
is_monitor = True
filename_to_check = "vasp.out"
possible_error_messages = [
"Found some non-integer element in rotation matrix",
"SGRCON",
]
| 33.528571 | 83 | 0.637836 | # -*- coding: utf-8 -*-
import os
import json
from simmate.workflow_engine import ErrorHandler
from simmate.calculators.vasp.inputs import Incar
class RotationNonIntMatrix(ErrorHandler):
"""
This a simple error handler that is active when VASP finds an issue with the
rotation matrix.
"""
is_monitor = True
filename_to_check = "vasp.out"
possible_error_messages = [
"Found some non-integer element in rotation matrix",
"SGRCON",
]
def correct(self, directory: str) -> str:
# load the INCAR file to view the current settings
incar_filename = os.path.join(directory, "INCAR")
incar = Incar.from_file(incar_filename)
# load the error-count file if it exists
error_count_filename = os.path.join(directory, "simmate_error_counts.json")
if os.path.exists(error_count_filename):
with open(error_count_filename) as error_count_file:
error_counts = json.load(error_count_file)
# otherwise we are starting with an empty dictionary
else:
error_counts = {}
# The fix is based on the number of times we've already tried to
# fix brmix. So let's make sure it's in our error_count dictionary.
# If it isn't there yet, set the count to 0 and we'll update it below.
error_counts["brmix"] = error_counts.get("brmix", 0)
# Our first attempt to fix this error is to switch to a gamma-centered mesh
if error_counts["brmix"] == 0:
# switch to gamma-centered mesh
incar["KGAMMA"] = True
correction = "switched KGAMMA to True"
# our second attempt turns symmetry off
elif error_counts["brmix"] == 1:
incar["ISYM"] = 0
correction = "switched KGAMMA to True"
# if the two attempts above didn't work, we give up by raising an error
else:
raise Exception(
"Exceeded maximum corrections for RotationNonIntMatrix error."
)
# rewrite the INCAR with new settings
incar.to_file(incar_filename)
# rewrite the new error count file
with open(error_count_filename, "w") as file:
json.dump(error_counts, file)
# now return the correction made for logging
return correction
| 1,836 | 0 | 27 |
9ed1895ea4a827b4694e3d99b715c2f22204199e | 776 | py | Python | darshan-util/pydarshan/darshan/cli/to_json.py | gaocegege/darshan | 2d54cd8ec96d26db23e9ca421df48d2031a4c55e | [
"mpich2"
] | null | null | null | darshan-util/pydarshan/darshan/cli/to_json.py | gaocegege/darshan | 2d54cd8ec96d26db23e9ca421df48d2031a4c55e | [
"mpich2"
] | null | null | null | darshan-util/pydarshan/darshan/cli/to_json.py | gaocegege/darshan | 2d54cd8ec96d26db23e9ca421df48d2031a4c55e | [
"mpich2"
] | null | null | null | import sys
import argparse
import darshan
if __name__ == "__main__":
main()
| 21.555556 | 95 | 0.668814 | import sys
import argparse
import darshan
def setup_parser(parser=None):
# setup nested actions/subcommands?
#actions = parser.add_subparsers(dest='api')
# setup arguments
parser.add_argument('input', help='darshan log file', nargs='?', default='example.darshan')
parser.add_argument('--verbose', help='', action='store_true')
parser.add_argument('--debug', help='', action='store_true')
def main(args=None):
if args is None:
parser = argparse.ArgumentParser(description='')
setup_parser(parser)
args = parser.parse_args()
if args.debug:
print(args)
report = darshan.DarshanReport(args.input, read_all=True) # Default behavior
print(report.to_json())
if __name__ == "__main__":
main()
| 643 | 0 | 46 |
3acc55ec7cebd9acbc300172a3e236a44fad366b | 4,826 | py | Python | crownstone_cloud/cloud.py | crownstone/crownstone-lib-python-cloud | c6a48ab8c6990c5c99d136d9a8cf1df01a7c094d | [
"MIT"
] | 1 | 2020-05-29T23:45:15.000Z | 2020-05-29T23:45:15.000Z | crownstone_cloud/cloud.py | crownstone/crownstone-lib-python-cloud | c6a48ab8c6990c5c99d136d9a8cf1df01a7c094d | [
"MIT"
] | 3 | 2021-11-09T15:36:48.000Z | 2021-11-12T00:43:23.000Z | crownstone_cloud/cloud.py | crownstone/crownstone-lib-python-cloud | c6a48ab8c6990c5c99d136d9a8cf1df01a7c094d | [
"MIT"
] | 1 | 2021-11-09T15:33:01.000Z | 2021-11-09T15:33:01.000Z | """Main class for the Crownstone cloud cloud."""
from __future__ import annotations
import asyncio
import logging
import aiohttp
from crownstone_cloud.cloud_models.crownstones import Crownstone
from crownstone_cloud.cloud_models.spheres import Spheres
from crownstone_cloud.exceptions import CrownstoneNotFoundError
from crownstone_cloud.helpers.conversion import password_to_hash
from crownstone_cloud.helpers.requests import RequestHandler
_LOGGER = logging.getLogger(__name__)
class CrownstoneCloud:
"""Create a Crownstone cloud instance."""
cloud_data: Spheres
access_token: str
async def async_initialize(self) -> None:
"""
Login to Crownstone API & synchronize all cloud data.
This method is a coroutine.
"""
# Login
login_response = await self.request_handler.request_login(self.login_data)
# Save access token & create cloud data object
self.access_token = login_response["id"]
self.cloud_data = Spheres(self, login_response["userId"])
_LOGGER.debug("Login to Crownstone Cloud successful")
# Synchronize data
await self.async_synchronize()
async def async_synchronize(self) -> None:
"""
Sync all data from cloud.
This method is a coroutine.
"""
_LOGGER.debug("Initiating all cloud data")
# get the sphere data for this user_id
await self.cloud_data.async_update_sphere_data()
# get the data from the sphere attributes
for sphere in self.cloud_data:
await asyncio.gather(
sphere.async_update_sphere_presence(),
sphere.crownstones.async_update_crownstone_data(),
sphere.locations.async_update_location_data(),
sphere.locations.async_update_location_presence(),
sphere.users.async_update_user_data(),
)
_LOGGER.debug("Cloud data successfully initialized")
def get_crownstone(
self, crownstone_name: str, sphere_id: str | None = None
) -> Crownstone:
"""
Get a Crownstone object by providing the name and optionally a sphere id.
:param crownstone_name: Name of the Crownstone.
:param sphere_id: Sphere id that should match.
:return: Crownstone object.
"""
for sphere in self.cloud_data:
if sphere_id is not None:
if sphere.cloud_id != sphere_id:
continue
for crownstone in sphere.crownstones:
if crownstone.name == crownstone_name:
return crownstone
raise CrownstoneNotFoundError from None
def get_crownstone_by_id(
self, crownstone_id: str, sphere_id: str | None = None
) -> Crownstone:
"""
Get a Crownstone object by providing the id and optionally a sphere id.
:param crownstone_id: The cloud id of the Crownstone.
:param sphere_id: Sphere id that should match.
:return: Crownstone object.
"""
for sphere in self.cloud_data:
if sphere_id is not None:
if sphere.cloud_id != sphere_id:
continue
for crownstone in sphere.crownstones:
if crownstone.cloud_id == crownstone_id:
return crownstone
raise CrownstoneNotFoundError from None
def get_crownstone_by_uid(
self, crownstone_uid: int, sphere_id: str | None = None
) -> Crownstone:
"""
Get a Crownstone object by providing the uid and optionally a sphere id.
:param crownstone_uid: The unique id of the Crownstone.
:param sphere_id: Sphere id that should match.
:return: Crownstone object.
"""
for sphere in self.cloud_data:
if sphere_id is not None:
if sphere.cloud_id != sphere_id:
continue
for crownstone in sphere.crownstones:
if crownstone.unique_id == crownstone_uid:
return crownstone
raise CrownstoneNotFoundError from None
async def async_close_session(self) -> None:
"""
Close the aiohttp clientsession after all requests are done.
The session should always be closed when the program ends.
When there's an external clientsession in use, DON'T use this method.
This method is a coroutine.
"""
await self.request_handler.client_session.close()
| 33.513889 | 82 | 0.640075 | """Main class for the Crownstone cloud cloud."""
from __future__ import annotations
import asyncio
import logging
import aiohttp
from crownstone_cloud.cloud_models.crownstones import Crownstone
from crownstone_cloud.cloud_models.spheres import Spheres
from crownstone_cloud.exceptions import CrownstoneNotFoundError
from crownstone_cloud.helpers.conversion import password_to_hash
from crownstone_cloud.helpers.requests import RequestHandler
_LOGGER = logging.getLogger(__name__)
class CrownstoneCloud:
"""Create a Crownstone cloud instance."""
cloud_data: Spheres
access_token: str
def __init__(
self,
email: str,
password: str,
clientsession: aiohttp.ClientSession | None = None,
) -> None:
self.request_handler = RequestHandler(self, clientsession)
self.login_data = {"email": email, "password": password_to_hash(password)}
async def async_initialize(self) -> None:
"""
Login to Crownstone API & synchronize all cloud data.
This method is a coroutine.
"""
# Login
login_response = await self.request_handler.request_login(self.login_data)
# Save access token & create cloud data object
self.access_token = login_response["id"]
self.cloud_data = Spheres(self, login_response["userId"])
_LOGGER.debug("Login to Crownstone Cloud successful")
# Synchronize data
await self.async_synchronize()
async def async_synchronize(self) -> None:
"""
Sync all data from cloud.
This method is a coroutine.
"""
_LOGGER.debug("Initiating all cloud data")
# get the sphere data for this user_id
await self.cloud_data.async_update_sphere_data()
# get the data from the sphere attributes
for sphere in self.cloud_data:
await asyncio.gather(
sphere.async_update_sphere_presence(),
sphere.crownstones.async_update_crownstone_data(),
sphere.locations.async_update_location_data(),
sphere.locations.async_update_location_presence(),
sphere.users.async_update_user_data(),
)
_LOGGER.debug("Cloud data successfully initialized")
def get_crownstone(
self, crownstone_name: str, sphere_id: str | None = None
) -> Crownstone:
"""
Get a Crownstone object by providing the name and optionally a sphere id.
:param crownstone_name: Name of the Crownstone.
:param sphere_id: Sphere id that should match.
:return: Crownstone object.
"""
for sphere in self.cloud_data:
if sphere_id is not None:
if sphere.cloud_id != sphere_id:
continue
for crownstone in sphere.crownstones:
if crownstone.name == crownstone_name:
return crownstone
raise CrownstoneNotFoundError from None
def get_crownstone_by_id(
self, crownstone_id: str, sphere_id: str | None = None
) -> Crownstone:
"""
Get a Crownstone object by providing the id and optionally a sphere id.
:param crownstone_id: The cloud id of the Crownstone.
:param sphere_id: Sphere id that should match.
:return: Crownstone object.
"""
for sphere in self.cloud_data:
if sphere_id is not None:
if sphere.cloud_id != sphere_id:
continue
for crownstone in sphere.crownstones:
if crownstone.cloud_id == crownstone_id:
return crownstone
raise CrownstoneNotFoundError from None
def get_crownstone_by_uid(
self, crownstone_uid: int, sphere_id: str | None = None
) -> Crownstone:
"""
Get a Crownstone object by providing the uid and optionally a sphere id.
:param crownstone_uid: The unique id of the Crownstone.
:param sphere_id: Sphere id that should match.
:return: Crownstone object.
"""
for sphere in self.cloud_data:
if sphere_id is not None:
if sphere.cloud_id != sphere_id:
continue
for crownstone in sphere.crownstones:
if crownstone.unique_id == crownstone_uid:
return crownstone
raise CrownstoneNotFoundError from None
async def async_close_session(self) -> None:
"""
Close the aiohttp clientsession after all requests are done.
The session should always be closed when the program ends.
When there's an external clientsession in use, DON'T use this method.
This method is a coroutine.
"""
await self.request_handler.client_session.close()
| 274 | 0 | 27 |
3f157bc25feb7dd79f2e1fa5015bd441d9c0eef3 | 8,773 | py | Python | corehq/apps/locations/util.py | bglar/commcare-hq | 972129fc26864c08c7bef07874bd2a7218550bff | [
"BSD-3-Clause"
] | 1 | 2017-02-10T03:14:51.000Z | 2017-02-10T03:14:51.000Z | corehq/apps/locations/util.py | bglar/commcare-hq | 972129fc26864c08c7bef07874bd2a7218550bff | [
"BSD-3-Clause"
] | null | null | null | corehq/apps/locations/util.py | bglar/commcare-hq | 972129fc26864c08c7bef07874bd2a7218550bff | [
"BSD-3-Clause"
] | null | null | null | from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.products.models import Product
from corehq.apps.locations.models import Location, SQLLocation
from corehq.apps.domain.models import Domain
from corehq.util.quickcache import quickcache
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.excel import flatten_json, json_to_headers
from couchdbkit import ResourceNotFound
from dimagi.utils.couch.loosechange import map_reduce
from couchexport.writers import Excel2007ExportWriter
from StringIO import StringIO
from corehq.apps.consumption.shortcuts import get_loaded_default_monthly_consumption, build_consumption_dict
def load_locs_json(domain, selected_loc_id=None, include_archived=False):
"""initialize a json location tree for drill-down controls on
the client. tree is only partially initialized and branches
will be filled in on the client via ajax.
what is initialized:
* all top level locs
* if a 'selected' loc is provided, that loc and its complete
ancestry
"""
loc_json = [
loc_to_json(loc) for loc in
SQLLocation.root_locations(
domain, include_archive_ancestors=include_archived
)
]
# if a location is selected, we need to pre-populate its location hierarchy
# so that the data is available client-side to pre-populate the drop-downs
if selected_loc_id:
selected = SQLLocation.objects.get(
domain=domain,
location_id=selected_loc_id
)
lineage = selected.get_ancestors()
parent = {'children': loc_json}
for loc in lineage:
# find existing entry in the json tree that corresponds to this loc
this_loc = [k for k in parent['children'] if k['uuid'] == loc.location_id][0]
this_loc['children'] = [
loc_to_json(loc) for loc in
loc.child_locations(include_archive_ancestors=include_archived)
]
parent = this_loc
return loc_json
def parent_child(domain):
"""
Returns a dict mapping from a location type to its possible
child types
"""
return map_reduce(lambda (k, v): [(p, k) for p in v], data=dict(location_hierarchy_config(domain)).iteritems())
@quickcache(['domain'], timeout=60)
def write_to_file(locations):
"""
locations = [
('loc_type1', {
'headers': ['header1', 'header2', ...]
'rows': [
{
'header1': val1
'header2': val2
},
{...},
]
})
]
"""
outfile = StringIO()
writer = Excel2007ExportWriter()
header_table = [(loc_type, [tab['headers']]) for loc_type, tab in locations]
writer.open(header_table=header_table, file=outfile)
for loc_type, tab in locations:
headers = tab['headers']
tab_rows = [[row.get(header, '') for header in headers]
for row in tab['rows']]
writer.write([(loc_type, tab_rows)])
writer.close()
return outfile.getvalue()
| 34.952191 | 115 | 0.640032 | from corehq.apps.commtrack.models import SupplyPointCase
from corehq.apps.products.models import Product
from corehq.apps.locations.models import Location, SQLLocation
from corehq.apps.domain.models import Domain
from corehq.util.quickcache import quickcache
from dimagi.utils.decorators.memoized import memoized
from dimagi.utils.excel import flatten_json, json_to_headers
from couchdbkit import ResourceNotFound
from dimagi.utils.couch.loosechange import map_reduce
from couchexport.writers import Excel2007ExportWriter
from StringIO import StringIO
from corehq.apps.consumption.shortcuts import get_loaded_default_monthly_consumption, build_consumption_dict
def load_locs_json(domain, selected_loc_id=None, include_archived=False):
"""initialize a json location tree for drill-down controls on
the client. tree is only partially initialized and branches
will be filled in on the client via ajax.
what is initialized:
* all top level locs
* if a 'selected' loc is provided, that loc and its complete
ancestry
"""
def loc_to_json(loc):
return {
'name': loc.name,
'location_type': loc.location_type,
'uuid': loc.location_id,
'is_archived': loc.is_archived,
}
loc_json = [
loc_to_json(loc) for loc in
SQLLocation.root_locations(
domain, include_archive_ancestors=include_archived
)
]
# if a location is selected, we need to pre-populate its location hierarchy
# so that the data is available client-side to pre-populate the drop-downs
if selected_loc_id:
selected = SQLLocation.objects.get(
domain=domain,
location_id=selected_loc_id
)
lineage = selected.get_ancestors()
parent = {'children': loc_json}
for loc in lineage:
# find existing entry in the json tree that corresponds to this loc
this_loc = [k for k in parent['children'] if k['uuid'] == loc.location_id][0]
this_loc['children'] = [
loc_to_json(loc) for loc in
loc.child_locations(include_archive_ancestors=include_archived)
]
parent = this_loc
return loc_json
def location_hierarchy_config(domain):
return [(loc_type.name, [p or None for p in loc_type.allowed_parents])
for loc_type in Domain.get_by_name(domain).location_types]
def defined_location_types(domain):
return [k for k, v in location_hierarchy_config(domain)]
def parent_child(domain):
"""
Returns a dict mapping from a location type to its possible
child types
"""
return map_reduce(lambda (k, v): [(p, k) for p in v], data=dict(location_hierarchy_config(domain)).iteritems())
def allowed_child_types(domain, parent):
parent_type = parent.location_type if parent else None
return parent_child(domain).get(parent_type, [])
def lookup_by_property(domain, prop_name, val, scope, root=None):
if root and not isinstance(root, basestring):
root = root._id
if prop_name == 'site_code':
index_view = 'locations/prop_index_site_code'
else:
# this was to be backwards compatible with the api
# if this ever comes up, please take a moment to decide whether it's
# worth changing the API to raise a less nonsensical error
# (or change this function to not sound so general!)
raise ResourceNotFound('missing prop_index_%s' % prop_name)
startkey = [domain, val]
if scope == 'global':
startkey.append(None)
elif scope == 'descendant':
startkey.append(root)
elif scope == 'child':
startkey.extend([root, 1])
else:
raise ValueError('invalid scope type')
return set(row['id'] for row in Location.get_db().view(index_view, startkey=startkey, endkey=startkey + [{}]))
@quickcache(['domain'], timeout=60)
def get_location_data_model(domain):
from .views import LocationFieldsView
from corehq.apps.custom_data_fields import CustomDataFieldsDefinition
return CustomDataFieldsDefinition.get_or_create(
domain,
LocationFieldsView.field_type,
)
class LocationExporter(object):
def __init__(self, domain, include_consumption=False):
self.domain = domain
self.domain_obj = Domain.get_by_name(domain)
self.include_consumption_flag = include_consumption
self.data_model = get_location_data_model(domain)
@property
@memoized
def consumption_dict(self):
return build_consumption_dict(self.domain)
@property
@memoized
def include_consumption(self):
if bool(
self.include_consumption_flag and
self.domain_obj.commtrack_settings.individual_consumption_defaults
):
# we'll be needing these, so init 'em:
self.products = Product.by_domain(self.domain)
self.product_codes = [p.code for p in self.products]
self.supply_point_map = SupplyPointCase.get_location_map_by_domain(self.domain)
self.administrative_types = {
lt.name for lt in self.domain_obj.location_types
if lt.administrative
}
return True
return False
def get_consumption(self, loc):
if (
not self.include_consumption or
loc.location_type in self.administrative_types or
not self.consumption_dict
):
return {}
if loc._id in self.supply_point_map:
sp_id = self.supply_point_map[loc._id]
else:
# this only happens if the supply point case did
# not already exist
sp_id = SupplyPointCase.get_or_create_by_location(loc)._id
return {
p.code: get_loaded_default_monthly_consumption(
self.consumption_dict,
self.domain,
p._id,
loc.location_type,
sp_id
) or ''
for p in self.products
}
def _loc_type_dict(self, loc_type):
uncategorized_keys = set()
tab_rows = []
for loc in Location.filter_by_type(self.domain, loc_type):
model_data, uncategorized_data = \
self.data_model.get_model_and_uncategorized(loc.metadata)
uncategorized_keys.update(uncategorized_data.keys())
loc_dict = {
'site_code': loc.site_code,
'name': loc.name,
'parent_site_code': loc.parent.site_code if loc.parent else '',
'latitude': loc.latitude or '',
'longitude': loc.longitude or '',
'data': model_data,
'uncategorized_data': uncategorized_data,
'consumption': self.get_consumption(loc),
}
tab_rows.append(dict(flatten_json(loc_dict)))
tab_headers = ['site_code', 'name', 'parent_site_code', 'latitude', 'longitude']
def _extend_headers(prefix, headers):
tab_headers.extend(json_to_headers(
{prefix: {header: None for header in headers}}
))
_extend_headers('data', (f.slug for f in self.data_model.fields))
_extend_headers('uncategorized_data', uncategorized_keys)
if self.include_consumption_flag and loc_type not in self.administrative_types:
_extend_headers('consumption', self.product_codes)
return (loc_type, {
'headers': tab_headers,
'rows': tab_rows,
})
def get_export_dict(self):
return [self._loc_type_dict(loc_type.name)
for loc_type in self.domain_obj.location_types]
def dump_locations(response, domain, include_consumption=False):
exporter = LocationExporter(domain, include_consumption=include_consumption)
result = write_to_file(exporter.get_export_dict())
response.write(result)
def write_to_file(locations):
"""
locations = [
('loc_type1', {
'headers': ['header1', 'header2', ...]
'rows': [
{
'header1': val1
'header2': val2
},
{...},
]
})
]
"""
outfile = StringIO()
writer = Excel2007ExportWriter()
header_table = [(loc_type, [tab['headers']]) for loc_type, tab in locations]
writer.open(header_table=header_table, file=outfile)
for loc_type, tab in locations:
headers = tab['headers']
tab_rows = [[row.get(header, '') for header in headers]
for row in tab['rows']]
writer.write([(loc_type, tab_rows)])
writer.close()
return outfile.getvalue()
| 5,236 | 227 | 186 |
b784ed5536e878aab0baf0382c465a053b11a1b1 | 6,704 | py | Python | google/cloud/forseti/enforcer/enforcer.py | mitsuo0114/forseti-security | a21dc6b7a7420a60f02c1a4bdfbab9e101291dd2 | [
"Apache-2.0"
] | 1 | 2018-10-06T23:16:59.000Z | 2018-10-06T23:16:59.000Z | google/cloud/forseti/enforcer/enforcer.py | mitsuo0114/forseti-security | a21dc6b7a7420a60f02c1a4bdfbab9e101291dd2 | [
"Apache-2.0"
] | null | null | null | google/cloud/forseti/enforcer/enforcer.py | mitsuo0114/forseti-security | a21dc6b7a7420a60f02c1a4bdfbab9e101291dd2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Enforcer runner.
Usage for enforcing a single project's firewall:
$ forseti_enforcer --enforce_project <project_id> \\
--policy_file <policy file path>
"""
import argparse
import sys
import threading
from google.cloud.forseti.common.util import file_loader
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.enforcer import batch_enforcer
from google.cloud.forseti.enforcer import enforcer_log_pb2
LOGGER = logger.get_logger(__name__)
class Error(Exception):
"""Base error class for the module."""
class InvalidParsedPolicyFileError(Error):
"""An invalid policy file was parsed."""
def initialize_batch_enforcer(global_configs, concurrent_threads,
max_write_threads, max_running_operations,
dry_run):
"""Initialize and return a BatchFirewallEnforcer object.
Args:
global_configs (dict): Global configurations.
concurrent_threads (str): The number of parallel enforcement threads to
execute.
max_write_threads (str): The maximum number of enforcement threads that
can be actively updating project firewalls.
max_running_operations (str): [DEPRECATED] The maximum number of write
operations per enforcement thread.
dry_run (boolean): If True, will simply log what action would have been
taken without actually applying any modifications.
Returns:
BatchFirewallEnforcer: A BatchFirewallEnforcer instance.
"""
if max_running_operations:
LOGGER.warn('Deprecated argument max_running_operations set.')
if max_write_threads:
project_sema = threading.BoundedSemaphore(value=max_write_threads)
else:
project_sema = None
enforcer = batch_enforcer.BatchFirewallEnforcer(
global_configs=global_configs,
dry_run=dry_run,
concurrent_workers=concurrent_threads,
project_sema=project_sema)
return enforcer
def enforce_single_project(enforcer, project_id, policy_filename):
"""Runs the enforcer on a single project.
Args:
enforcer (BatchFirewallEnforcer): An instance of the
batch_enforcer.BatchFirewallEnforcer class.
project_id (str): The project to enforce.
policy_filename (str): The json encoded file to read the firewall policy
from.
Raises:
InvalidParsedPolicyFileError: When the policy file can't be parsed.
Returns:
EnforcerLogProto: A instance of the proto.
"""
policy = file_loader.read_and_parse_file(policy_filename)
if not isinstance(policy, list):
raise InvalidParsedPolicyFileError(
'Invalid parsed policy file: found %s expected list' %
type(policy))
project_policies = [(project_id, policy)]
enforcer_results = enforcer.run(project_policies)
for result in enforcer_results.results:
result.gce_firewall_enforcement.policy_path = policy_filename
result.run_context = enforcer_log_pb2.ENFORCER_ONE_PROJECT
return enforcer_results
def main():
"""The main entry point for Forseti Security Enforcer runner."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'--forseti_config',
default='/home/ubuntu/forseti-security/configs/'
'forseti_conf_server.yaml',
help='Fully qualified path and filename of the Forseti config file.')
arg_parser.add_argument(
'--enforce_project', default=None,
help='A single projectId to enforce the firewall on.'
' Must be used with the policy_file flag.')
arg_parser.add_argument(
'--policy_file', default=None,
help='A json encoded policy file to enforce,'
' must contain a list of Firewall resources to'
'apply to the project. If in a GCS bucket, '
'include full path, e.g. '
'"gs://<bucketname>/path/to/file".')
arg_parser.add_argument(
'--dry_run', default=False,
help='If True will simulate the changes and not change'
'any policies.')
arg_parser.add_argument(
'--concurrent_threads', default=10,
help='The number concurrent worker threads to use.')
arg_parser.add_argument(
'--maximum_firewall_write_operations', default=10,
help='The maximum number of in flight write operations'
'on project firewalls. Each running thread is '
'allowed up to this many running operations, '
'so to limit the over all number of operations, '
'limit the number of write threads using the'
' maximum_project_writer_threads flag.')
arg_parser.add_argument(
'--maximum_project_writer_threads', default=1,
help='The maximum number of projects with active write '
'operations on project firewalls.')
flags = vars(arg_parser.parse_args())
forseti_config = flags['forseti_config']
if forseti_config is None:
LOGGER.error('Path to Forseti Security config needs to be specified.')
sys.exit()
try:
configs = file_loader.read_and_parse_file(forseti_config)
except IOError:
LOGGER.exception('Unable to open Forseti Security config file. '
'Please check your path and filename and try again.')
sys.exit()
global_configs = configs.get('global')
enforcer = initialize_batch_enforcer(
global_configs, flags['concurrent_threads'],
flags['maximum_project_writer_threads'],
flags['maximum_firewall_write_operations'],
flags['dry_run']
)
if flags['enforce_project'] and flags['policy_file']:
enforcer_results = enforce_single_project(enforcer,
flags['enforce_project'],
flags['policy_file'])
print enforcer_results
else:
print 'Batch mode not implemented yet.'
if __name__ == '__main__':
main()
| 34.379487 | 80 | 0.676909 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Enforcer runner.
Usage for enforcing a single project's firewall:
$ forseti_enforcer --enforce_project <project_id> \\
--policy_file <policy file path>
"""
import argparse
import sys
import threading
from google.cloud.forseti.common.util import file_loader
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.enforcer import batch_enforcer
from google.cloud.forseti.enforcer import enforcer_log_pb2
LOGGER = logger.get_logger(__name__)
class Error(Exception):
"""Base error class for the module."""
class InvalidParsedPolicyFileError(Error):
"""An invalid policy file was parsed."""
def initialize_batch_enforcer(global_configs, concurrent_threads,
max_write_threads, max_running_operations,
dry_run):
"""Initialize and return a BatchFirewallEnforcer object.
Args:
global_configs (dict): Global configurations.
concurrent_threads (str): The number of parallel enforcement threads to
execute.
max_write_threads (str): The maximum number of enforcement threads that
can be actively updating project firewalls.
max_running_operations (str): [DEPRECATED] The maximum number of write
operations per enforcement thread.
dry_run (boolean): If True, will simply log what action would have been
taken without actually applying any modifications.
Returns:
BatchFirewallEnforcer: A BatchFirewallEnforcer instance.
"""
if max_running_operations:
LOGGER.warn('Deprecated argument max_running_operations set.')
if max_write_threads:
project_sema = threading.BoundedSemaphore(value=max_write_threads)
else:
project_sema = None
enforcer = batch_enforcer.BatchFirewallEnforcer(
global_configs=global_configs,
dry_run=dry_run,
concurrent_workers=concurrent_threads,
project_sema=project_sema)
return enforcer
def enforce_single_project(enforcer, project_id, policy_filename):
"""Runs the enforcer on a single project.
Args:
enforcer (BatchFirewallEnforcer): An instance of the
batch_enforcer.BatchFirewallEnforcer class.
project_id (str): The project to enforce.
policy_filename (str): The json encoded file to read the firewall policy
from.
Raises:
InvalidParsedPolicyFileError: When the policy file can't be parsed.
Returns:
EnforcerLogProto: A instance of the proto.
"""
policy = file_loader.read_and_parse_file(policy_filename)
if not isinstance(policy, list):
raise InvalidParsedPolicyFileError(
'Invalid parsed policy file: found %s expected list' %
type(policy))
project_policies = [(project_id, policy)]
enforcer_results = enforcer.run(project_policies)
for result in enforcer_results.results:
result.gce_firewall_enforcement.policy_path = policy_filename
result.run_context = enforcer_log_pb2.ENFORCER_ONE_PROJECT
return enforcer_results
def main():
"""The main entry point for Forseti Security Enforcer runner."""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
'--forseti_config',
default='/home/ubuntu/forseti-security/configs/'
'forseti_conf_server.yaml',
help='Fully qualified path and filename of the Forseti config file.')
arg_parser.add_argument(
'--enforce_project', default=None,
help='A single projectId to enforce the firewall on.'
' Must be used with the policy_file flag.')
arg_parser.add_argument(
'--policy_file', default=None,
help='A json encoded policy file to enforce,'
' must contain a list of Firewall resources to'
'apply to the project. If in a GCS bucket, '
'include full path, e.g. '
'"gs://<bucketname>/path/to/file".')
arg_parser.add_argument(
'--dry_run', default=False,
help='If True will simulate the changes and not change'
'any policies.')
arg_parser.add_argument(
'--concurrent_threads', default=10,
help='The number concurrent worker threads to use.')
arg_parser.add_argument(
'--maximum_firewall_write_operations', default=10,
help='The maximum number of in flight write operations'
'on project firewalls. Each running thread is '
'allowed up to this many running operations, '
'so to limit the over all number of operations, '
'limit the number of write threads using the'
' maximum_project_writer_threads flag.')
arg_parser.add_argument(
'--maximum_project_writer_threads', default=1,
help='The maximum number of projects with active write '
'operations on project firewalls.')
flags = vars(arg_parser.parse_args())
forseti_config = flags['forseti_config']
if forseti_config is None:
LOGGER.error('Path to Forseti Security config needs to be specified.')
sys.exit()
try:
configs = file_loader.read_and_parse_file(forseti_config)
except IOError:
LOGGER.exception('Unable to open Forseti Security config file. '
'Please check your path and filename and try again.')
sys.exit()
global_configs = configs.get('global')
enforcer = initialize_batch_enforcer(
global_configs, flags['concurrent_threads'],
flags['maximum_project_writer_threads'],
flags['maximum_firewall_write_operations'],
flags['dry_run']
)
if flags['enforce_project'] and flags['policy_file']:
enforcer_results = enforce_single_project(enforcer,
flags['enforce_project'],
flags['policy_file'])
print enforcer_results
else:
print 'Batch mode not implemented yet.'
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
cebe158fa96d11636d7e608ec07e33216f13dd64 | 368 | py | Python | admin_tools/urls.py | asherf/django-admin-tools | 26a993545de7d68286be56ac640fe12acf1a1abe | [
"MIT"
] | 1 | 2020-04-06T23:21:17.000Z | 2020-04-06T23:21:17.000Z | admin_tools/urls.py | asherf/django-admin-tools | 26a993545de7d68286be56ac640fe12acf1a1abe | [
"MIT"
] | 6 | 2020-03-25T16:58:30.000Z | 2021-06-10T19:55:55.000Z | admin_tools/urls.py | asherf/django-admin-tools | 26a993545de7d68286be56ac640fe12acf1a1abe | [
"MIT"
] | 1 | 2020-02-24T22:34:17.000Z | 2020-02-24T22:34:17.000Z | from django.conf import settings
from django.conf.urls import url, include
urlpatterns = []
if 'admin_tools.menu' in settings.INSTALLED_APPS:
urlpatterns.append(url(r'^menu/', include('admin_tools.menu.urls')))
if 'admin_tools.dashboard' in settings.INSTALLED_APPS:
urlpatterns.append(
url(r'^dashboard/', include('admin_tools.dashboard.urls'))
)
| 33.454545 | 72 | 0.73913 | from django.conf import settings
from django.conf.urls import url, include
urlpatterns = []
if 'admin_tools.menu' in settings.INSTALLED_APPS:
urlpatterns.append(url(r'^menu/', include('admin_tools.menu.urls')))
if 'admin_tools.dashboard' in settings.INSTALLED_APPS:
urlpatterns.append(
url(r'^dashboard/', include('admin_tools.dashboard.urls'))
)
| 0 | 0 | 0 |
5a22efca0450afe45817960d54602b633e1343fa | 622 | py | Python | app/models/__init__.py | dandye/DjanGoat | 72beb30afe3ddd5b31ce74a5d3b9da61d2c5df1d | [
"MIT"
] | 65 | 2017-08-18T15:12:03.000Z | 2021-08-14T16:50:07.000Z | app/models/__init__.py | dandye/DjanGoat | 72beb30afe3ddd5b31ce74a5d3b9da61d2c5df1d | [
"MIT"
] | 83 | 2017-11-28T21:45:20.000Z | 2021-11-02T18:52:52.000Z | app/models/__init__.py | dandye/DjanGoat | 72beb30afe3ddd5b31ce74a5d3b9da61d2c5df1d | [
"MIT"
] | 71 | 2017-08-17T14:58:01.000Z | 2022-02-02T17:09:49.000Z | from app.models.Note.note import Note
from app.models.Message.message import Message
from app.models.User.user import User
from app.models.Analytics.analytics import Analytics
from app.models.Benefits.benefits import Benefits
from app.models.KeyManagement.key_management import KeyManagement
from app.models.PaidTimeOff.paid_time_off import PaidTimeOff
from app.models.Pay.pay import Pay
from app.models.Performance.performance import Performance
from app.models.Retirement.retirement import Retirement
from app.models.Schedule.schedule import Schedule
from app.models.WorkInfo.work_info import WorkInfo
import app.signals | 47.846154 | 65 | 0.861736 | from app.models.Note.note import Note
from app.models.Message.message import Message
from app.models.User.user import User
from app.models.Analytics.analytics import Analytics
from app.models.Benefits.benefits import Benefits
from app.models.KeyManagement.key_management import KeyManagement
from app.models.PaidTimeOff.paid_time_off import PaidTimeOff
from app.models.Pay.pay import Pay
from app.models.Performance.performance import Performance
from app.models.Retirement.retirement import Retirement
from app.models.Schedule.schedule import Schedule
from app.models.WorkInfo.work_info import WorkInfo
import app.signals | 0 | 0 | 0 |
95b23018af8b6d6db6b5511ee2ad613d8f7f0859 | 32,798 | py | Python | resources/libraries/python/DUTSetup.py | preym17/csit | 3151c98618c78e3782e48bbe4d9c8f906c126f69 | [
"Apache-2.0"
] | null | null | null | resources/libraries/python/DUTSetup.py | preym17/csit | 3151c98618c78e3782e48bbe4d9c8f906c126f69 | [
"Apache-2.0"
] | null | null | null | resources/libraries/python/DUTSetup.py | preym17/csit | 3151c98618c78e3782e48bbe4d9c8f906c126f69 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DUT setup library."""
from robot.api import logger
from resources.libraries.python.Constants import Constants
from resources.libraries.python.ssh import SSH, exec_cmd_no_error
from resources.libraries.python.topology import NodeType, Topology
class DUTSetup(object):
"""Contains methods for setting up DUTs."""
@staticmethod
def get_service_logs(node, service):
"""Get specific service unit logs from node.
:param node: Node in the topology.
:param service: Service unit name.
:type node: dict
:type service: str
"""
if DUTSetup.running_in_container(node):
command = ('echo $(< /var/log/supervisord.log);'
'echo $(< /tmp/*supervisor*.log)')
else:
command = ('journalctl --no-pager --unit={name} '
'--since="$(echo `systemctl show -p '
'ActiveEnterTimestamp {name}` | '
'awk \'{{print $2 $3}}\')"'.
format(name=service))
message = 'Node {host} failed to get logs from unit {name}'.\
format(host=node['host'], name=service)
exec_cmd_no_error(node, command, timeout=30, sudo=True,
message=message)
@staticmethod
def get_service_logs_on_all_duts(nodes, service):
"""Get specific service unit logs from all DUTs.
:param nodes: Nodes in the topology.
:param service: Service unit name.
:type nodes: dict
:type service: str
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
DUTSetup.get_service_logs(node, service)
@staticmethod
def restart_service(node, service):
"""Restart the named service on node.
:param node: Node in the topology.
:param service: Service unit name.
:type node: dict
:type service: str
"""
if DUTSetup.running_in_container(node):
command = 'supervisorctl restart {name}'.format(name=service)
else:
command = 'service {name} restart'.format(name=service)
message = 'Node {host} failed to restart service {name}'.\
format(host=node['host'], name=service)
exec_cmd_no_error(
node, command, timeout=180, sudo=True, message=message)
DUTSetup.get_service_logs(node, service)
@staticmethod
def restart_service_on_all_duts(nodes, service):
"""Restart the named service on all DUTs.
:param node: Nodes in the topology.
:param service: Service unit name.
:type node: dict
:type service: str
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
DUTSetup.restart_service(node, service)
@staticmethod
def start_service(node, service):
"""Start up the named service on node.
:param node: Node in the topology.
:param service: Service unit name.
:type node: dict
:type service: str
"""
# TODO: change command to start once all parent function updated.
if DUTSetup.running_in_container(node):
command = 'supervisorctl restart {name}'.format(name=service)
else:
command = 'service {name} restart'.format(name=service)
message = 'Node {host} failed to start service {name}'.\
format(host=node['host'], name=service)
exec_cmd_no_error(
node, command, timeout=180, sudo=True, message=message)
DUTSetup.get_service_logs(node, service)
@staticmethod
def start_service_on_all_duts(nodes, service):
"""Start up the named service on all DUTs.
:param node: Nodes in the topology.
:param service: Service unit name.
:type node: dict
:type service: str
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
DUTSetup.start_service(node, service)
@staticmethod
def stop_service(node, service):
"""Stop the named service on node.
:param node: Node in the topology.
:param service: Service unit name.
:type node: dict
:type service: str
"""
if DUTSetup.running_in_container(node):
command = 'supervisorctl stop {name}'.format(name=service)
else:
command = 'service {name} stop'.format(name=service)
message = 'Node {host} failed to stop service {name}'.\
format(host=node['host'], name=service)
exec_cmd_no_error(
node, command, timeout=180, sudo=True, message=message)
DUTSetup.get_service_logs(node, service)
@staticmethod
def stop_service_on_all_duts(nodes, service):
"""Stop the named service on all DUTs.
:param node: Nodes in the topology.
:param service: Service unit name.
:type node: dict
:type service: str
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
DUTSetup.stop_service(node, service)
@staticmethod
def get_vpp_pid(node):
"""Get PID of running VPP process.
:param node: DUT node.
:type node: dict
:returns: PID
:rtype: int
:raises RuntimeError: If it is not possible to get the PID.
"""
ssh = SSH()
ssh.connect(node)
for i in range(3):
logger.trace('Try {}: Get VPP PID'.format(i))
ret_code, stdout, stderr = ssh.exec_command('pidof vpp')
if int(ret_code):
raise RuntimeError('Not possible to get PID of VPP process '
'on node: {0}\n {1}'.
format(node['host'], stdout + stderr))
pid_list = stdout.split()
if len(pid_list) == 1:
return int(stdout)
elif not pid_list:
logger.debug("No VPP PID found on node {0}".
format(node['host']))
continue
else:
logger.debug("More then one VPP PID found on node {0}".
format(node['host']))
return [int(pid) for pid in pid_list]
return None
@staticmethod
def get_vpp_pids(nodes):
"""Get PID of running VPP process on all DUTs.
:param nodes: DUT nodes.
:type nodes: dict
:returns: PIDs
:rtype: dict
"""
pids = dict()
for node in nodes.values():
if node['type'] == NodeType.DUT:
pids[node['host']] = DUTSetup.get_vpp_pid(node)
return pids
@staticmethod
def crypto_device_verify(node, crypto_type, numvfs, force_init=False):
"""Verify if Crypto QAT device virtual functions are initialized on all
DUTs. If parameter force initialization is set to True, then try to
initialize or remove VFs on QAT.
:param node: DUT node.
:crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
:param numvfs: Number of VFs to initialize, 0 - disable the VFs.
:param force_init: If True then try to initialize to specific value.
:type node: dict
:type crypto_type: string
:type numvfs: int
:type force_init: bool
:returns: nothing
:raises RuntimeError: If QAT VFs are not created and force init is set
to False.
"""
pci_addr = Topology.get_cryptodev(node)
sriov_numvfs = DUTSetup.get_sriov_numvfs(node, pci_addr)
if sriov_numvfs != numvfs:
if force_init:
# QAT is not initialized and we want to initialize with numvfs
DUTSetup.crypto_device_init(node, crypto_type, numvfs)
else:
raise RuntimeError('QAT device failed to create VFs on {host}'.
format(host=node['host']))
@staticmethod
def crypto_device_init(node, crypto_type, numvfs):
"""Init Crypto QAT device virtual functions on DUT.
:param node: DUT node.
:crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
:param numvfs: Number of VFs to initialize, 0 - disable the VFs.
:type node: dict
:type crypto_type: string
:type numvfs: int
:returns: nothing
:raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
"""
if crypto_type == "HW_DH895xcc":
kernel_mod = "qat_dh895xcc"
kernel_drv = "dh895xcc"
elif crypto_type == "HW_C3xxx":
kernel_mod = "qat_c3xxx"
kernel_drv = "c3xxx"
else:
raise RuntimeError('Unsupported crypto device type on {host}'.
format(host=node['host']))
pci_addr = Topology.get_cryptodev(node)
# QAT device must be re-bound to kernel driver before initialization.
DUTSetup.verify_kernel_module(node, kernel_mod, force_load=True)
# Stop VPP to prevent deadlock.
DUTSetup.stop_service(node, Constants.VPP_UNIT)
current_driver = DUTSetup.get_pci_dev_driver(
node, pci_addr.replace(':', r'\:'))
if current_driver is not None:
DUTSetup.pci_driver_unbind(node, pci_addr)
# Bind to kernel driver.
DUTSetup.pci_driver_bind(node, pci_addr, kernel_drv)
# Initialize QAT VFs.
if numvfs > 0:
DUTSetup.set_sriov_numvfs(node, pci_addr, numvfs)
@staticmethod
def get_virtfn_pci_addr(node, pf_pci_addr, vf_id):
"""Get PCI address of Virtual Function.
:param node: DUT node.
:param pf_pci_addr: Physical Function PCI address.
:param vf_id: Virtual Function number.
:type node: dict
:type pf_pci_addr: str
:type vf_id: int
:returns: Virtual Function PCI address.
:rtype: int
:raises RuntimeError: If failed to get Virtual Function PCI address.
"""
command = "sh -c "\
"'basename $(readlink /sys/bus/pci/devices/{pci}/virtfn{vf_id})'".\
format(pci=pf_pci_addr, vf_id=vf_id)
message = 'Failed to get virtual function PCI address.'
stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
message=message)
return stdout.strip()
@staticmethod
def get_sriov_numvfs(node, pf_pci_addr):
"""Get number of SR-IOV VFs.
:param node: DUT node.
:param pf_pci_addr: Physical Function PCI device address.
:type node: dict
:type pf_pci_addr: str
:returns: Number of VFs.
:rtype: int
:raises RuntimeError: If PCI device is not SR-IOV capable.
"""
command = 'cat /sys/bus/pci/devices/{pci}/sriov_numvfs'.\
format(pci=pf_pci_addr.replace(':', r'\:'))
message = 'PCI device {pci} is not a SR-IOV device.'.\
format(pci=pf_pci_addr)
for _ in range(3):
stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
message=message)
try:
sriov_numvfs = int(stdout)
except ValueError:
logger.trace('Reading sriov_numvfs info failed on {host}'.
format(host=node['host']))
else:
return sriov_numvfs
@staticmethod
def set_sriov_numvfs(node, pf_pci_addr, numvfs=0):
"""Init or reset SR-IOV virtual functions by setting its number on PCI
device on DUT. Setting to zero removes all VFs.
:param node: DUT node.
:param pf_pci_addr: Physical Function PCI device address.
:param numvfs: Number of VFs to initialize, 0 - removes the VFs.
:type node: dict
:type pf_pci_addr: str
:type numvfs: int
:raises RuntimeError: Failed to create VFs on PCI.
"""
command = "sh -c "\
"'echo {num} | tee /sys/bus/pci/devices/{pci}/sriov_numvfs'".\
format(num=numvfs, pci=pf_pci_addr.replace(':', r'\:'))
message = 'Failed to create {num} VFs on {pci} device on {host}'.\
format(num=numvfs, pci=pf_pci_addr, host=node['host'])
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
@staticmethod
def pci_driver_unbind(node, pci_addr):
"""Unbind PCI device from current driver on node.
:param node: DUT node.
:param pci_addr: PCI device address.
:type node: dict
:type pci_addr: str
:raises RuntimeError: If PCI device unbind failed.
"""
command = "sh -c "\
"'echo {pci} | tee /sys/bus/pci/devices/{pcie}/driver/unbind'".\
format(pci=pci_addr, pcie=pci_addr.replace(':', r'\:'))
message = 'Failed to unbind PCI device {pci} on {host}'.\
format(pci=pci_addr, host=node['host'])
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
@staticmethod
def pci_driver_bind(node, pci_addr, driver):
"""Bind PCI device to driver on node.
:param node: DUT node.
:param pci_addr: PCI device address.
:param driver: Driver to bind.
:type node: dict
:type pci_addr: str
:type driver: str
:raises RuntimeError: If PCI device bind failed.
"""
message = 'Failed to bind PCI device {pci} to {driver} on host {host}'.\
format(pci=pci_addr, driver=driver, host=node['host'])
command = "sh -c "\
"'echo {driver} | tee /sys/bus/pci/devices/{pci}/driver_override'".\
format(driver=driver, pci=pci_addr.replace(':', r'\:'))
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
command = "sh -c "\
"'echo {pci} | tee /sys/bus/pci/drivers/{driver}/bind'".\
format(pci=pci_addr, driver=driver)
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
command = "sh -c "\
"'echo | tee /sys/bus/pci/devices/{pci}/driver_override'".\
format(pci=pci_addr.replace(':', r'\:'))
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
@staticmethod
def pci_vf_driver_unbind(node, pf_pci_addr, vf_id):
"""Unbind Virtual Function from driver on node.
:param node: DUT node.
:param pf_pci_addr: PCI device address.
:param vf_id: Virtual Function ID.
:type node: dict
:type pf_pci_addr: str
:type vf_id: int
:raises RuntimeError: If Virtual Function unbind failed.
"""
vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
command = "sh -c "\
"'echo {vf_pci_addr} | tee {vf_path}/driver/unbind'".\
format(vf_pci_addr=vf_pci_addr, vf_path=vf_path)
message = 'Failed to unbind VF {vf_pci_addr} to on {host}'.\
format(vf_pci_addr=vf_pci_addr, host=node['host'])
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
@staticmethod
def pci_vf_driver_bind(node, pf_pci_addr, vf_id, driver):
"""Bind Virtual Function to driver on node.
:param node: DUT node.
:param pf_pci_addr: PCI device address.
:param vf_id: Virtual Function ID.
:param driver: Driver to bind.
:type node: dict
:type pf_pci_addr: str
:type vf_id: int
:type driver: str
:raises RuntimeError: If PCI device bind failed.
"""
vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
message = 'Failed to bind VF {vf_pci_addr} to {driver} on {host}'.\
format(vf_pci_addr=vf_pci_addr, driver=driver, host=node['host'])
command = "sh -c "\
"'echo {driver} | tee {vf_path}/driver_override'".\
format(driver=driver, vf_path=vf_path)
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
command = "sh -c "\
"'echo {vf_pci_addr} | tee /sys/bus/pci/drivers/{driver}/bind'".\
format(vf_pci_addr=vf_pci_addr, driver=driver)
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
command = "sh -c "\
"'echo | tee {vf_path}/driver_override'".\
format(vf_path=vf_path)
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
@staticmethod
def get_pci_dev_driver(node, pci_addr):
"""Get current PCI device driver on node.
.. note::
# lspci -vmmks 0000:00:05.0
Slot: 00:05.0
Class: Ethernet controller
Vendor: Red Hat, Inc
Device: Virtio network device
SVendor: Red Hat, Inc
SDevice: Device 0001
PhySlot: 5
Driver: virtio-pci
:param node: DUT node.
:param pci_addr: PCI device address.
:type node: dict
:type pci_addr: str
:returns: Driver or None
:raises RuntimeError: If PCI rescan or lspci command execution failed.
:raises RuntimeError: If it is not possible to get the interface driver
information from the node.
"""
ssh = SSH()
ssh.connect(node)
for i in range(3):
logger.trace('Try number {0}: Get PCI device driver'.format(i))
cmd = 'lspci -vmmks {0}'.format(pci_addr)
ret_code, stdout, _ = ssh.exec_command(cmd)
if int(ret_code):
raise RuntimeError("'{0}' failed on '{1}'"
.format(cmd, node['host']))
for line in stdout.splitlines():
if not line:
continue
name = None
value = None
try:
name, value = line.split("\t", 1)
except ValueError:
if name == "Driver:":
return None
if name == 'Driver:':
return value
if i < 2:
logger.trace('Driver for PCI device {} not found, executing '
'pci rescan and retrying'.format(pci_addr))
cmd = 'sh -c "echo 1 > /sys/bus/pci/rescan"'
ret_code, _, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
raise RuntimeError("'{0}' failed on '{1}'"
.format(cmd, node['host']))
return None
@staticmethod
def verify_kernel_module(node, module, force_load=False):
"""Verify if kernel module is loaded on node. If parameter force
load is set to True, then try to load the modules.
:param node: Node.
:param module: Module to verify.
:param force_load: If True then try to load module.
:type node: dict
:type module: str
:type force_load: bool
:raises RuntimeError: If module is not loaded or failed to load.
"""
command = 'grep -w {module} /proc/modules'.format(module=module)
message = 'Kernel module {module} is not loaded on host {host}'.\
format(module=module, host=node['host'])
try:
exec_cmd_no_error(node, command, timeout=30, sudo=False,
message=message)
except RuntimeError:
if force_load:
# Module is not loaded and we want to load it
DUTSetup.load_kernel_module(node, module)
else:
raise
@staticmethod
def verify_kernel_module_on_all_duts(nodes, module, force_load=False):
"""Verify if kernel module is loaded on all DUTs. If parameter force
load is set to True, then try to load the modules.
:param node: DUT nodes.
:param module: Module to verify.
:param force_load: If True then try to load module.
:type node: dict
:type module: str
:type force_load: bool
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
DUTSetup.verify_kernel_module(node, module, force_load)
@staticmethod
def verify_uio_driver_on_all_duts(nodes):
"""Verify if uio driver kernel module is loaded on all DUTs. If module
is not present it will try to load it.
:param node: DUT nodes.
:type node: dict
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
uio_driver = Topology.get_uio_driver(node)
DUTSetup.verify_kernel_module(node, uio_driver, force_load=True)
@staticmethod
def load_kernel_module(node, module):
"""Load kernel module on node.
:param node: DUT node.
:param module: Module to load.
:type node: dict
:type module: str
:returns: nothing
:raises RuntimeError: If loading failed.
"""
command = 'modprobe {module}'.format(module=module)
message = 'Failed to load {module} on host {host}'.\
format(module=module, host=node['host'])
exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
@staticmethod
def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
"""Install VPP on all DUT nodes. Start the VPP service in case of
systemd is not available or does not support autostart.
:param nodes: Nodes in the topology.
:param vpp_pkg_dir: Path to directory where VPP packages are stored.
:type nodes: dict
:type vpp_pkg_dir: str
:raises RuntimeError: If failed to remove or install VPP.
"""
for node in nodes.values():
message = 'Failed to install VPP on host {host}!'.\
format(host=node['host'])
if node['type'] == NodeType.DUT:
command = 'ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true'
exec_cmd_no_error(node, command, sudo=True)
command = '. /etc/lsb-release; echo "${DISTRIB_ID}"'
stdout, _ = exec_cmd_no_error(node, command)
if stdout.strip() == 'Ubuntu':
exec_cmd_no_error(node, 'apt-get purge -y "*vpp*" || true',
timeout=120, sudo=True)
exec_cmd_no_error(node, 'dpkg -i --force-all {dir}*.deb'.
format(dir=vpp_pkg_dir), timeout=120,
sudo=True, message=message)
exec_cmd_no_error(node, 'dpkg -l | grep vpp', sudo=True)
if DUTSetup.running_in_container(node):
DUTSetup.restart_service(node, Constants.VPP_UNIT)
else:
exec_cmd_no_error(node, 'yum -y remove "*vpp*" || true',
timeout=120, sudo=True)
exec_cmd_no_error(node, 'rpm -ivh {dir}*.rpm'.
format(dir=vpp_pkg_dir), timeout=120,
sudo=True, message=message)
exec_cmd_no_error(node, 'rpm -qai *vpp*', sudo=True)
DUTSetup.restart_service(node, Constants.VPP_UNIT)
@staticmethod
def running_in_container(node):
"""This method tests if topology node is running inside container.
:param node: Topology node.
:type node: dict
:returns: True if running in docker container, false if not or failed
to detect.
:rtype: bool
"""
command = "fgrep docker /proc/1/cgroup"
message = 'Failed to get cgroup settings.'
try:
exec_cmd_no_error(node, command, timeout=30, sudo=False,
message=message)
except RuntimeError:
return False
return True
@staticmethod
def get_docker_mergeddir(node, uuid):
"""Get Docker overlay for MergedDir diff.
:param node: DUT node.
:param uuid: Docker UUID.
:type node: dict
:type uuid: str
:returns: Docker container MergedDir.
:rtype: str
:raises RuntimeError: If getting output failed.
"""
command = "docker inspect --format='"\
"{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}".format(uuid=uuid)
message = 'Failed to get directory of {uuid} on host {host}'.\
format(uuid=uuid, host=node['host'])
stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message)
return stdout.strip()
@staticmethod
def get_huge_page_size(node):
"""Get default size of huge pages in system.
:param node: Node in the topology.
:type node: dict
:returns: Default size of free huge pages in system.
:rtype: int
:raises RuntimeError: If reading failed for three times.
"""
ssh = SSH()
ssh.connect(node)
for _ in range(3):
ret_code, stdout, _ = ssh.exec_command_sudo(
"grep Hugepagesize /proc/meminfo | awk '{ print $2 }'")
if ret_code == 0:
try:
huge_size = int(stdout)
except ValueError:
logger.trace('Reading huge page size information failed')
else:
break
else:
raise RuntimeError('Getting huge page size information failed.')
return huge_size
@staticmethod
def get_huge_page_free(node, huge_size):
"""Get number of free huge pages in system.
:param node: Node in the topology.
:param huge_size: Size of hugepages.
:type node: dict
:type huge_size: int
:returns: Number of free huge pages in system.
:rtype: int
:raises RuntimeError: If reading failed for three times.
"""
# TODO: add numa aware option
ssh = SSH()
ssh.connect(node)
for _ in range(3):
ret_code, stdout, _ = ssh.exec_command_sudo(
'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/free_hugepages'.
format(huge_size))
if ret_code == 0:
try:
huge_free = int(stdout)
except ValueError:
logger.trace('Reading free huge pages information failed')
else:
break
else:
raise RuntimeError('Getting free huge pages information failed.')
return huge_free
@staticmethod
def get_huge_page_total(node, huge_size):
"""Get total number of huge pages in system.
:param node: Node in the topology.
:param huge_size: Size of hugepages.
:type node: dict
:type huge_size: int
:returns: Total number of huge pages in system.
:rtype: int
:raises RuntimeError: If reading failed for three times.
"""
# TODO: add numa aware option
ssh = SSH()
ssh.connect(node)
for _ in range(3):
ret_code, stdout, _ = ssh.exec_command_sudo(
'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/nr_hugepages'.
format(huge_size))
if ret_code == 0:
try:
huge_total = int(stdout)
except ValueError:
logger.trace('Reading total huge pages information failed')
else:
break
else:
raise RuntimeError('Getting total huge pages information failed.')
return huge_total
@staticmethod
def check_huge_page(node, huge_mnt, mem_size, allocate=False):
"""Check if there is enough HugePages in system. If allocate is set to
true, try to allocate more HugePages.
:param node: Node in the topology.
:param huge_mnt: HugePage mount point.
:param mem_size: Requested memory in MB.
:param allocate: Whether to allocate more memory if not enough.
:type node: dict
:type huge_mnt: str
:type mem_size: str
:type allocate: bool
:raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages
or increasing map count failed.
"""
# TODO: split function into smaller parts.
ssh = SSH()
ssh.connect(node)
# Get huge pages information
huge_size = DUTSetup.get_huge_page_size(node)
huge_free = DUTSetup.get_huge_page_free(node, huge_size)
huge_total = DUTSetup.get_huge_page_total(node, huge_size)
# Check if memory reqested is available on host
if (mem_size * 1024) > (huge_free * huge_size):
# If we want to allocate hugepage dynamically
if allocate:
mem_needed = (mem_size * 1024) - (huge_free * huge_size)
huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total
max_map_count = huge_to_allocate*4
# Increase maximum number of memory map areas a process may have
ret_code, _, _ = ssh.exec_command_sudo(
'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.
format(max_map_count))
if int(ret_code) != 0:
raise RuntimeError('Increase map count failed on {host}'.
format(host=node['host']))
# Increase hugepage count
ret_code, _, _ = ssh.exec_command_sudo(
'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.
format(huge_to_allocate))
if int(ret_code) != 0:
raise RuntimeError('Mount huge pages failed on {host}'.
format(host=node['host']))
# If we do not want to allocate dynamicaly end with error
else:
raise RuntimeError('Not enough free huge pages: {0}, {1} MB'.
format(huge_free, huge_free * huge_size))
# Check if huge pages mount point exist
has_huge_mnt = False
ret_code, stdout, _ = ssh.exec_command('cat /proc/mounts')
if int(ret_code) == 0:
for line in stdout.splitlines():
# Try to find something like:
# none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
mount = line.split()
if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
has_huge_mnt = True
break
# If huge page mount point not exist create one
if not has_huge_mnt:
ret_code, _, _ = ssh.exec_command_sudo(
'mkdir -p {mnt}'.format(mnt=huge_mnt))
if int(ret_code) != 0:
raise RuntimeError('Create mount dir failed on {host}'.
format(host=node['host']))
ret_code, _, _ = ssh.exec_command_sudo(
'mount -t hugetlbfs -o pagesize=2048k none {mnt}'.
format(mnt=huge_mnt))
if int(ret_code) != 0:
raise RuntimeError('Mount huge pages failed on {host}'.
format(host=node['host']))
| 38.048724 | 80 | 0.566071 | # Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DUT setup library."""
from robot.api import logger
from resources.libraries.python.Constants import Constants
from resources.libraries.python.ssh import SSH, exec_cmd_no_error
from resources.libraries.python.topology import NodeType, Topology
class DUTSetup(object):
"""Contains methods for setting up DUTs."""
@staticmethod
def get_service_logs(node, service):
"""Get specific service unit logs from node.
:param node: Node in the topology.
:param service: Service unit name.
:type node: dict
:type service: str
"""
if DUTSetup.running_in_container(node):
command = ('echo $(< /var/log/supervisord.log);'
'echo $(< /tmp/*supervisor*.log)')
else:
command = ('journalctl --no-pager --unit={name} '
'--since="$(echo `systemctl show -p '
'ActiveEnterTimestamp {name}` | '
'awk \'{{print $2 $3}}\')"'.
format(name=service))
message = 'Node {host} failed to get logs from unit {name}'.\
format(host=node['host'], name=service)
exec_cmd_no_error(node, command, timeout=30, sudo=True,
message=message)
@staticmethod
def get_service_logs_on_all_duts(nodes, service):
"""Get specific service unit logs from all DUTs.
:param nodes: Nodes in the topology.
:param service: Service unit name.
:type nodes: dict
:type service: str
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
DUTSetup.get_service_logs(node, service)
@staticmethod
def restart_service(node, service):
"""Restart the named service on node.
:param node: Node in the topology.
:param service: Service unit name.
:type node: dict
:type service: str
"""
if DUTSetup.running_in_container(node):
command = 'supervisorctl restart {name}'.format(name=service)
else:
command = 'service {name} restart'.format(name=service)
message = 'Node {host} failed to restart service {name}'.\
format(host=node['host'], name=service)
exec_cmd_no_error(
node, command, timeout=180, sudo=True, message=message)
DUTSetup.get_service_logs(node, service)
@staticmethod
def restart_service_on_all_duts(nodes, service):
"""Restart the named service on all DUTs.
:param node: Nodes in the topology.
:param service: Service unit name.
:type node: dict
:type service: str
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
DUTSetup.restart_service(node, service)
@staticmethod
def start_service(node, service):
"""Start up the named service on node.
:param node: Node in the topology.
:param service: Service unit name.
:type node: dict
:type service: str
"""
# TODO: change command to start once all parent function updated.
if DUTSetup.running_in_container(node):
command = 'supervisorctl restart {name}'.format(name=service)
else:
command = 'service {name} restart'.format(name=service)
message = 'Node {host} failed to start service {name}'.\
format(host=node['host'], name=service)
exec_cmd_no_error(
node, command, timeout=180, sudo=True, message=message)
DUTSetup.get_service_logs(node, service)
@staticmethod
def start_service_on_all_duts(nodes, service):
"""Start up the named service on all DUTs.
:param node: Nodes in the topology.
:param service: Service unit name.
:type node: dict
:type service: str
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
DUTSetup.start_service(node, service)
@staticmethod
def stop_service(node, service):
"""Stop the named service on node.
:param node: Node in the topology.
:param service: Service unit name.
:type node: dict
:type service: str
"""
if DUTSetup.running_in_container(node):
command = 'supervisorctl stop {name}'.format(name=service)
else:
command = 'service {name} stop'.format(name=service)
message = 'Node {host} failed to stop service {name}'.\
format(host=node['host'], name=service)
exec_cmd_no_error(
node, command, timeout=180, sudo=True, message=message)
DUTSetup.get_service_logs(node, service)
@staticmethod
def stop_service_on_all_duts(nodes, service):
"""Stop the named service on all DUTs.
:param node: Nodes in the topology.
:param service: Service unit name.
:type node: dict
:type service: str
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
DUTSetup.stop_service(node, service)
@staticmethod
def get_vpp_pid(node):
"""Get PID of running VPP process.
:param node: DUT node.
:type node: dict
:returns: PID
:rtype: int
:raises RuntimeError: If it is not possible to get the PID.
"""
ssh = SSH()
ssh.connect(node)
for i in range(3):
logger.trace('Try {}: Get VPP PID'.format(i))
ret_code, stdout, stderr = ssh.exec_command('pidof vpp')
if int(ret_code):
raise RuntimeError('Not possible to get PID of VPP process '
'on node: {0}\n {1}'.
format(node['host'], stdout + stderr))
pid_list = stdout.split()
if len(pid_list) == 1:
return int(stdout)
elif not pid_list:
logger.debug("No VPP PID found on node {0}".
format(node['host']))
continue
else:
logger.debug("More then one VPP PID found on node {0}".
format(node['host']))
return [int(pid) for pid in pid_list]
return None
@staticmethod
def get_vpp_pids(nodes):
"""Get PID of running VPP process on all DUTs.
:param nodes: DUT nodes.
:type nodes: dict
:returns: PIDs
:rtype: dict
"""
pids = dict()
for node in nodes.values():
if node['type'] == NodeType.DUT:
pids[node['host']] = DUTSetup.get_vpp_pid(node)
return pids
@staticmethod
def crypto_device_verify(node, crypto_type, numvfs, force_init=False):
"""Verify if Crypto QAT device virtual functions are initialized on all
DUTs. If parameter force initialization is set to True, then try to
initialize or remove VFs on QAT.
:param node: DUT node.
:crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
:param numvfs: Number of VFs to initialize, 0 - disable the VFs.
:param force_init: If True then try to initialize to specific value.
:type node: dict
:type crypto_type: string
:type numvfs: int
:type force_init: bool
:returns: nothing
:raises RuntimeError: If QAT VFs are not created and force init is set
to False.
"""
pci_addr = Topology.get_cryptodev(node)
sriov_numvfs = DUTSetup.get_sriov_numvfs(node, pci_addr)
if sriov_numvfs != numvfs:
if force_init:
# QAT is not initialized and we want to initialize with numvfs
DUTSetup.crypto_device_init(node, crypto_type, numvfs)
else:
raise RuntimeError('QAT device failed to create VFs on {host}'.
format(host=node['host']))
@staticmethod
def crypto_device_init(node, crypto_type, numvfs):
"""Init Crypto QAT device virtual functions on DUT.
:param node: DUT node.
:crypto_type: Crypto device type - HW_DH895xcc or HW_C3xxx.
:param numvfs: Number of VFs to initialize, 0 - disable the VFs.
:type node: dict
:type crypto_type: string
:type numvfs: int
:returns: nothing
:raises RuntimeError: If failed to stop VPP or QAT failed to initialize.
"""
if crypto_type == "HW_DH895xcc":
kernel_mod = "qat_dh895xcc"
kernel_drv = "dh895xcc"
elif crypto_type == "HW_C3xxx":
kernel_mod = "qat_c3xxx"
kernel_drv = "c3xxx"
else:
raise RuntimeError('Unsupported crypto device type on {host}'.
format(host=node['host']))
pci_addr = Topology.get_cryptodev(node)
# QAT device must be re-bound to kernel driver before initialization.
DUTSetup.verify_kernel_module(node, kernel_mod, force_load=True)
# Stop VPP to prevent deadlock.
DUTSetup.stop_service(node, Constants.VPP_UNIT)
current_driver = DUTSetup.get_pci_dev_driver(
node, pci_addr.replace(':', r'\:'))
if current_driver is not None:
DUTSetup.pci_driver_unbind(node, pci_addr)
# Bind to kernel driver.
DUTSetup.pci_driver_bind(node, pci_addr, kernel_drv)
# Initialize QAT VFs.
if numvfs > 0:
DUTSetup.set_sriov_numvfs(node, pci_addr, numvfs)
@staticmethod
def get_virtfn_pci_addr(node, pf_pci_addr, vf_id):
"""Get PCI address of Virtual Function.
:param node: DUT node.
:param pf_pci_addr: Physical Function PCI address.
:param vf_id: Virtual Function number.
:type node: dict
:type pf_pci_addr: str
:type vf_id: int
:returns: Virtual Function PCI address.
:rtype: int
:raises RuntimeError: If failed to get Virtual Function PCI address.
"""
command = "sh -c "\
"'basename $(readlink /sys/bus/pci/devices/{pci}/virtfn{vf_id})'".\
format(pci=pf_pci_addr, vf_id=vf_id)
message = 'Failed to get virtual function PCI address.'
stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
message=message)
return stdout.strip()
@staticmethod
def get_sriov_numvfs(node, pf_pci_addr):
"""Get number of SR-IOV VFs.
:param node: DUT node.
:param pf_pci_addr: Physical Function PCI device address.
:type node: dict
:type pf_pci_addr: str
:returns: Number of VFs.
:rtype: int
:raises RuntimeError: If PCI device is not SR-IOV capable.
"""
command = 'cat /sys/bus/pci/devices/{pci}/sriov_numvfs'.\
format(pci=pf_pci_addr.replace(':', r'\:'))
message = 'PCI device {pci} is not a SR-IOV device.'.\
format(pci=pf_pci_addr)
for _ in range(3):
stdout, _ = exec_cmd_no_error(node, command, timeout=30, sudo=True,
message=message)
try:
sriov_numvfs = int(stdout)
except ValueError:
logger.trace('Reading sriov_numvfs info failed on {host}'.
format(host=node['host']))
else:
return sriov_numvfs
@staticmethod
def set_sriov_numvfs(node, pf_pci_addr, numvfs=0):
"""Init or reset SR-IOV virtual functions by setting its number on PCI
device on DUT. Setting to zero removes all VFs.
:param node: DUT node.
:param pf_pci_addr: Physical Function PCI device address.
:param numvfs: Number of VFs to initialize, 0 - removes the VFs.
:type node: dict
:type pf_pci_addr: str
:type numvfs: int
:raises RuntimeError: Failed to create VFs on PCI.
"""
command = "sh -c "\
"'echo {num} | tee /sys/bus/pci/devices/{pci}/sriov_numvfs'".\
format(num=numvfs, pci=pf_pci_addr.replace(':', r'\:'))
message = 'Failed to create {num} VFs on {pci} device on {host}'.\
format(num=numvfs, pci=pf_pci_addr, host=node['host'])
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
@staticmethod
def pci_driver_unbind(node, pci_addr):
"""Unbind PCI device from current driver on node.
:param node: DUT node.
:param pci_addr: PCI device address.
:type node: dict
:type pci_addr: str
:raises RuntimeError: If PCI device unbind failed.
"""
command = "sh -c "\
"'echo {pci} | tee /sys/bus/pci/devices/{pcie}/driver/unbind'".\
format(pci=pci_addr, pcie=pci_addr.replace(':', r'\:'))
message = 'Failed to unbind PCI device {pci} on {host}'.\
format(pci=pci_addr, host=node['host'])
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
@staticmethod
def pci_driver_bind(node, pci_addr, driver):
"""Bind PCI device to driver on node.
:param node: DUT node.
:param pci_addr: PCI device address.
:param driver: Driver to bind.
:type node: dict
:type pci_addr: str
:type driver: str
:raises RuntimeError: If PCI device bind failed.
"""
message = 'Failed to bind PCI device {pci} to {driver} on host {host}'.\
format(pci=pci_addr, driver=driver, host=node['host'])
command = "sh -c "\
"'echo {driver} | tee /sys/bus/pci/devices/{pci}/driver_override'".\
format(driver=driver, pci=pci_addr.replace(':', r'\:'))
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
command = "sh -c "\
"'echo {pci} | tee /sys/bus/pci/drivers/{driver}/bind'".\
format(pci=pci_addr, driver=driver)
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
command = "sh -c "\
"'echo | tee /sys/bus/pci/devices/{pci}/driver_override'".\
format(pci=pci_addr.replace(':', r'\:'))
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
@staticmethod
def pci_vf_driver_unbind(node, pf_pci_addr, vf_id):
"""Unbind Virtual Function from driver on node.
:param node: DUT node.
:param pf_pci_addr: PCI device address.
:param vf_id: Virtual Function ID.
:type node: dict
:type pf_pci_addr: str
:type vf_id: int
:raises RuntimeError: If Virtual Function unbind failed.
"""
vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
command = "sh -c "\
"'echo {vf_pci_addr} | tee {vf_path}/driver/unbind'".\
format(vf_pci_addr=vf_pci_addr, vf_path=vf_path)
message = 'Failed to unbind VF {vf_pci_addr} to on {host}'.\
format(vf_pci_addr=vf_pci_addr, host=node['host'])
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
@staticmethod
def pci_vf_driver_bind(node, pf_pci_addr, vf_id, driver):
"""Bind Virtual Function to driver on node.
:param node: DUT node.
:param pf_pci_addr: PCI device address.
:param vf_id: Virtual Function ID.
:param driver: Driver to bind.
:type node: dict
:type pf_pci_addr: str
:type vf_id: int
:type driver: str
:raises RuntimeError: If PCI device bind failed.
"""
vf_pci_addr = DUTSetup.get_virtfn_pci_addr(node, pf_pci_addr, vf_id)
vf_path = "/sys/bus/pci/devices/{pf_pci_addr}/virtfn{vf_id}".\
format(pf_pci_addr=pf_pci_addr.replace(':', r'\:'), vf_id=vf_id)
message = 'Failed to bind VF {vf_pci_addr} to {driver} on {host}'.\
format(vf_pci_addr=vf_pci_addr, driver=driver, host=node['host'])
command = "sh -c "\
"'echo {driver} | tee {vf_path}/driver_override'".\
format(driver=driver, vf_path=vf_path)
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
command = "sh -c "\
"'echo {vf_pci_addr} | tee /sys/bus/pci/drivers/{driver}/bind'".\
format(vf_pci_addr=vf_pci_addr, driver=driver)
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
command = "sh -c "\
"'echo | tee {vf_path}/driver_override'".\
format(vf_path=vf_path)
exec_cmd_no_error(node, command, timeout=120, sudo=True,
message=message)
@staticmethod
def get_pci_dev_driver(node, pci_addr):
"""Get current PCI device driver on node.
.. note::
# lspci -vmmks 0000:00:05.0
Slot: 00:05.0
Class: Ethernet controller
Vendor: Red Hat, Inc
Device: Virtio network device
SVendor: Red Hat, Inc
SDevice: Device 0001
PhySlot: 5
Driver: virtio-pci
:param node: DUT node.
:param pci_addr: PCI device address.
:type node: dict
:type pci_addr: str
:returns: Driver or None
:raises RuntimeError: If PCI rescan or lspci command execution failed.
:raises RuntimeError: If it is not possible to get the interface driver
information from the node.
"""
ssh = SSH()
ssh.connect(node)
for i in range(3):
logger.trace('Try number {0}: Get PCI device driver'.format(i))
cmd = 'lspci -vmmks {0}'.format(pci_addr)
ret_code, stdout, _ = ssh.exec_command(cmd)
if int(ret_code):
raise RuntimeError("'{0}' failed on '{1}'"
.format(cmd, node['host']))
for line in stdout.splitlines():
if not line:
continue
name = None
value = None
try:
name, value = line.split("\t", 1)
except ValueError:
if name == "Driver:":
return None
if name == 'Driver:':
return value
if i < 2:
logger.trace('Driver for PCI device {} not found, executing '
'pci rescan and retrying'.format(pci_addr))
cmd = 'sh -c "echo 1 > /sys/bus/pci/rescan"'
ret_code, _, _ = ssh.exec_command_sudo(cmd)
if int(ret_code) != 0:
raise RuntimeError("'{0}' failed on '{1}'"
.format(cmd, node['host']))
return None
@staticmethod
def verify_kernel_module(node, module, force_load=False):
"""Verify if kernel module is loaded on node. If parameter force
load is set to True, then try to load the modules.
:param node: Node.
:param module: Module to verify.
:param force_load: If True then try to load module.
:type node: dict
:type module: str
:type force_load: bool
:raises RuntimeError: If module is not loaded or failed to load.
"""
command = 'grep -w {module} /proc/modules'.format(module=module)
message = 'Kernel module {module} is not loaded on host {host}'.\
format(module=module, host=node['host'])
try:
exec_cmd_no_error(node, command, timeout=30, sudo=False,
message=message)
except RuntimeError:
if force_load:
# Module is not loaded and we want to load it
DUTSetup.load_kernel_module(node, module)
else:
raise
@staticmethod
def verify_kernel_module_on_all_duts(nodes, module, force_load=False):
"""Verify if kernel module is loaded on all DUTs. If parameter force
load is set to True, then try to load the modules.
:param node: DUT nodes.
:param module: Module to verify.
:param force_load: If True then try to load module.
:type node: dict
:type module: str
:type force_load: bool
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
DUTSetup.verify_kernel_module(node, module, force_load)
@staticmethod
def verify_uio_driver_on_all_duts(nodes):
"""Verify if uio driver kernel module is loaded on all DUTs. If module
is not present it will try to load it.
:param node: DUT nodes.
:type node: dict
"""
for node in nodes.values():
if node['type'] == NodeType.DUT:
uio_driver = Topology.get_uio_driver(node)
DUTSetup.verify_kernel_module(node, uio_driver, force_load=True)
@staticmethod
def load_kernel_module(node, module):
"""Load kernel module on node.
:param node: DUT node.
:param module: Module to load.
:type node: dict
:type module: str
:returns: nothing
:raises RuntimeError: If loading failed.
"""
command = 'modprobe {module}'.format(module=module)
message = 'Failed to load {module} on host {host}'.\
format(module=module, host=node['host'])
exec_cmd_no_error(node, command, timeout=30, sudo=True, message=message)
@staticmethod
def install_vpp_on_all_duts(nodes, vpp_pkg_dir):
"""Install VPP on all DUT nodes. Start the VPP service in case of
systemd is not available or does not support autostart.
:param nodes: Nodes in the topology.
:param vpp_pkg_dir: Path to directory where VPP packages are stored.
:type nodes: dict
:type vpp_pkg_dir: str
:raises RuntimeError: If failed to remove or install VPP.
"""
for node in nodes.values():
message = 'Failed to install VPP on host {host}!'.\
format(host=node['host'])
if node['type'] == NodeType.DUT:
command = 'ln -s /dev/null /etc/sysctl.d/80-vpp.conf || true'
exec_cmd_no_error(node, command, sudo=True)
command = '. /etc/lsb-release; echo "${DISTRIB_ID}"'
stdout, _ = exec_cmd_no_error(node, command)
if stdout.strip() == 'Ubuntu':
exec_cmd_no_error(node, 'apt-get purge -y "*vpp*" || true',
timeout=120, sudo=True)
exec_cmd_no_error(node, 'dpkg -i --force-all {dir}*.deb'.
format(dir=vpp_pkg_dir), timeout=120,
sudo=True, message=message)
exec_cmd_no_error(node, 'dpkg -l | grep vpp', sudo=True)
if DUTSetup.running_in_container(node):
DUTSetup.restart_service(node, Constants.VPP_UNIT)
else:
exec_cmd_no_error(node, 'yum -y remove "*vpp*" || true',
timeout=120, sudo=True)
exec_cmd_no_error(node, 'rpm -ivh {dir}*.rpm'.
format(dir=vpp_pkg_dir), timeout=120,
sudo=True, message=message)
exec_cmd_no_error(node, 'rpm -qai *vpp*', sudo=True)
DUTSetup.restart_service(node, Constants.VPP_UNIT)
@staticmethod
def running_in_container(node):
"""This method tests if topology node is running inside container.
:param node: Topology node.
:type node: dict
:returns: True if running in docker container, false if not or failed
to detect.
:rtype: bool
"""
command = "fgrep docker /proc/1/cgroup"
message = 'Failed to get cgroup settings.'
try:
exec_cmd_no_error(node, command, timeout=30, sudo=False,
message=message)
except RuntimeError:
return False
return True
@staticmethod
def get_docker_mergeddir(node, uuid):
"""Get Docker overlay for MergedDir diff.
:param node: DUT node.
:param uuid: Docker UUID.
:type node: dict
:type uuid: str
:returns: Docker container MergedDir.
:rtype: str
:raises RuntimeError: If getting output failed.
"""
command = "docker inspect --format='"\
"{{{{.GraphDriver.Data.MergedDir}}}}' {uuid}".format(uuid=uuid)
message = 'Failed to get directory of {uuid} on host {host}'.\
format(uuid=uuid, host=node['host'])
stdout, _ = exec_cmd_no_error(node, command, sudo=True, message=message)
return stdout.strip()
@staticmethod
def get_huge_page_size(node):
"""Get default size of huge pages in system.
:param node: Node in the topology.
:type node: dict
:returns: Default size of free huge pages in system.
:rtype: int
:raises RuntimeError: If reading failed for three times.
"""
ssh = SSH()
ssh.connect(node)
for _ in range(3):
ret_code, stdout, _ = ssh.exec_command_sudo(
"grep Hugepagesize /proc/meminfo | awk '{ print $2 }'")
if ret_code == 0:
try:
huge_size = int(stdout)
except ValueError:
logger.trace('Reading huge page size information failed')
else:
break
else:
raise RuntimeError('Getting huge page size information failed.')
return huge_size
@staticmethod
def get_huge_page_free(node, huge_size):
"""Get number of free huge pages in system.
:param node: Node in the topology.
:param huge_size: Size of hugepages.
:type node: dict
:type huge_size: int
:returns: Number of free huge pages in system.
:rtype: int
:raises RuntimeError: If reading failed for three times.
"""
# TODO: add numa aware option
ssh = SSH()
ssh.connect(node)
for _ in range(3):
ret_code, stdout, _ = ssh.exec_command_sudo(
'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/free_hugepages'.
format(huge_size))
if ret_code == 0:
try:
huge_free = int(stdout)
except ValueError:
logger.trace('Reading free huge pages information failed')
else:
break
else:
raise RuntimeError('Getting free huge pages information failed.')
return huge_free
@staticmethod
def get_huge_page_total(node, huge_size):
"""Get total number of huge pages in system.
:param node: Node in the topology.
:param huge_size: Size of hugepages.
:type node: dict
:type huge_size: int
:returns: Total number of huge pages in system.
:rtype: int
:raises RuntimeError: If reading failed for three times.
"""
# TODO: add numa aware option
ssh = SSH()
ssh.connect(node)
for _ in range(3):
ret_code, stdout, _ = ssh.exec_command_sudo(
'cat /sys/kernel/mm/hugepages/hugepages-{0}kB/nr_hugepages'.
format(huge_size))
if ret_code == 0:
try:
huge_total = int(stdout)
except ValueError:
logger.trace('Reading total huge pages information failed')
else:
break
else:
raise RuntimeError('Getting total huge pages information failed.')
return huge_total
@staticmethod
def check_huge_page(node, huge_mnt, mem_size, allocate=False):
"""Check if there is enough HugePages in system. If allocate is set to
true, try to allocate more HugePages.
:param node: Node in the topology.
:param huge_mnt: HugePage mount point.
:param mem_size: Requested memory in MB.
:param allocate: Whether to allocate more memory if not enough.
:type node: dict
:type huge_mnt: str
:type mem_size: str
:type allocate: bool
:raises RuntimeError: Mounting hugetlbfs failed or not enough HugePages
or increasing map count failed.
"""
# TODO: split function into smaller parts.
ssh = SSH()
ssh.connect(node)
# Get huge pages information
huge_size = DUTSetup.get_huge_page_size(node)
huge_free = DUTSetup.get_huge_page_free(node, huge_size)
huge_total = DUTSetup.get_huge_page_total(node, huge_size)
# Check if memory reqested is available on host
if (mem_size * 1024) > (huge_free * huge_size):
# If we want to allocate hugepage dynamically
if allocate:
mem_needed = (mem_size * 1024) - (huge_free * huge_size)
huge_to_allocate = ((mem_needed / huge_size) * 2) + huge_total
max_map_count = huge_to_allocate*4
# Increase maximum number of memory map areas a process may have
ret_code, _, _ = ssh.exec_command_sudo(
'echo "{0}" | sudo tee /proc/sys/vm/max_map_count'.
format(max_map_count))
if int(ret_code) != 0:
raise RuntimeError('Increase map count failed on {host}'.
format(host=node['host']))
# Increase hugepage count
ret_code, _, _ = ssh.exec_command_sudo(
'echo "{0}" | sudo tee /proc/sys/vm/nr_hugepages'.
format(huge_to_allocate))
if int(ret_code) != 0:
raise RuntimeError('Mount huge pages failed on {host}'.
format(host=node['host']))
# If we do not want to allocate dynamicaly end with error
else:
raise RuntimeError('Not enough free huge pages: {0}, {1} MB'.
format(huge_free, huge_free * huge_size))
# Check if huge pages mount point exist
has_huge_mnt = False
ret_code, stdout, _ = ssh.exec_command('cat /proc/mounts')
if int(ret_code) == 0:
for line in stdout.splitlines():
# Try to find something like:
# none /mnt/huge hugetlbfs rw,relatime,pagesize=2048k 0 0
mount = line.split()
if mount[2] == 'hugetlbfs' and mount[1] == huge_mnt:
has_huge_mnt = True
break
# If huge page mount point not exist create one
if not has_huge_mnt:
ret_code, _, _ = ssh.exec_command_sudo(
'mkdir -p {mnt}'.format(mnt=huge_mnt))
if int(ret_code) != 0:
raise RuntimeError('Create mount dir failed on {host}'.
format(host=node['host']))
ret_code, _, _ = ssh.exec_command_sudo(
'mount -t hugetlbfs -o pagesize=2048k none {mnt}'.
format(mnt=huge_mnt))
if int(ret_code) != 0:
raise RuntimeError('Mount huge pages failed on {host}'.
format(host=node['host']))
| 0 | 0 | 0 |
ce31817538eccc99f0a4ef09f78d47d2ed58961a | 313 | py | Python | tests/test_card.py | joumaico/coreutil | 9388e7426df96175f9a30d841b4c280179f37633 | [
"MIT"
] | null | null | null | tests/test_card.py | joumaico/coreutil | 9388e7426df96175f9a30d841b4c280179f37633 | [
"MIT"
] | null | null | null | tests/test_card.py | joumaico/coreutil | 9388e7426df96175f9a30d841b4c280179f37633 | [
"MIT"
] | null | null | null | import coreutil
import unittest
| 28.454545 | 66 | 0.696486 | import coreutil
import unittest
class TestCard(unittest.TestCase):
def test_credit(self):
card = coreutil.card.credit('378282246310005')
self.assertTrue(card.isvalid())
self.assertEqual(card.network().short, 'amex')
self.assertEqual(card.network().brand, 'American Express')
| 218 | 13 | 49 |
21e754d69abc89769daf7e31f441ea29a015848c | 171 | py | Python | book_crawler/run.py | zheng-zy/book_crawler | a092607a097986e9cd242809066e0948e64d8bcb | [
"Apache-2.0"
] | null | null | null | book_crawler/run.py | zheng-zy/book_crawler | a092607a097986e9cd242809066e0948e64d8bcb | [
"Apache-2.0"
] | null | null | null | book_crawler/run.py | zheng-zy/book_crawler | a092607a097986e9cd242809066e0948e64d8bcb | [
"Apache-2.0"
] | null | null | null | #!usr/bin/env python
# coding=utf-8
# Created by zhezhiyong@163.com on 2016/11/16.
"""
启动
"""
from scrapy import cmdline
cmdline.execute("scrapy crawl biquge".split())
| 14.25 | 46 | 0.707602 | #!usr/bin/env python
# coding=utf-8
# Created by zhezhiyong@163.com on 2016/11/16.
"""
启动
"""
from scrapy import cmdline
cmdline.execute("scrapy crawl biquge".split())
| 0 | 0 | 0 |
54e66a5e54bee3047e85fcfaef77f2b1ac20bf82 | 302 | py | Python | pykage/setup.py | antoineB24/pykage | 472bd9b80f93b3a1c61851370915b843ff2719c9 | [
"MIT"
] | null | null | null | pykage/setup.py | antoineB24/pykage | 472bd9b80f93b3a1c61851370915b843ff2719c9 | [
"MIT"
] | null | null | null | pykage/setup.py | antoineB24/pykage | 472bd9b80f93b3a1c61851370915b843ff2719c9 | [
"MIT"
] | null | null | null | from cx_Freeze import setup, Executable
files = {"include_files": [
"command/",
], "packages": []}
setup(
name="pykage",
version="0.1.5",
description="npmjs en python",
options={'build_exe': files},
executables=[Executable("pykage.py", base=None)])
| 23.230769 | 50 | 0.576159 | from cx_Freeze import setup, Executable
files = {"include_files": [
"command/",
], "packages": []}
setup(
name="pykage",
version="0.1.5",
description="npmjs en python",
options={'build_exe': files},
executables=[Executable("pykage.py", base=None)])
| 0 | 0 | 0 |
f59eeebd98b7f8a8cf9843ade228600b961393ee | 1,454 | py | Python | src/onegov/core/security/roles.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/core/security/roles.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/core/security/roles.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | from onegov.core.framework import Framework
from onegov.core.security import Public, Personal, Private, Secret
@Framework.setting_section(section="roles")
def get_roles_setting():
""" Returns the default roles available to onegov.core applications.
Applications building on onegov.core may add more roles and permissions,
or replace the existing ones entirely, though it's not something that
one should do carelessly.
The default roles are:
**admin**
Has access to everything
**editor**
Has access to most things
**member**
Has access their own data. Be careful though, core doesn't know about
personal data, so this is just a role to implement registered users.
As with all permissions, making sure the right information is shown
is up to the applications.
**anonymous**
Has access to public things
"""
return {
# the admin role has access to everything
'admin': set((
Public,
Private,
Personal,
Secret
)),
# the editor can do most things
'editor': set((
Public,
Private,
Personal,
)),
# registered users can do a few things
'member': set((
Public,
Personal,
)),
# the public has some access
'anonymous': set((
Public,
))
}
| 26.436364 | 77 | 0.591472 | from onegov.core.framework import Framework
from onegov.core.security import Public, Personal, Private, Secret
@Framework.setting_section(section="roles")
def get_roles_setting():
""" Returns the default roles available to onegov.core applications.
Applications building on onegov.core may add more roles and permissions,
or replace the existing ones entirely, though it's not something that
one should do carelessly.
The default roles are:
**admin**
Has access to everything
**editor**
Has access to most things
**member**
Has access their own data. Be careful though, core doesn't know about
personal data, so this is just a role to implement registered users.
As with all permissions, making sure the right information is shown
is up to the applications.
**anonymous**
Has access to public things
"""
return {
# the admin role has access to everything
'admin': set((
Public,
Private,
Personal,
Secret
)),
# the editor can do most things
'editor': set((
Public,
Private,
Personal,
)),
# registered users can do a few things
'member': set((
Public,
Personal,
)),
# the public has some access
'anonymous': set((
Public,
))
}
| 0 | 0 | 0 |
84900ee8ef51e507d9d9ff1b73f2ced60e75a3c0 | 732 | py | Python | pkgcore/test/cache/test_flat_hash.py | pombreda/pkgcore | b438fc573af1a031d7ce12adbbf299bab5338451 | [
"BSD-3-Clause"
] | 1 | 2021-07-05T13:10:18.000Z | 2021-07-05T13:10:18.000Z | pkgcore/test/cache/test_flat_hash.py | vapier/pkgcore | 35a7e4f4f0fc61dd9c4dc72d35a57e2e9d5b832f | [
"BSD-3-Clause"
] | 8 | 2015-03-24T14:21:44.000Z | 2015-03-24T14:21:44.000Z | pkgcore/test/cache/test_flat_hash.py | vapier/pkgcore | 35a7e4f4f0fc61dd9c4dc72d35a57e2e9d5b832f | [
"BSD-3-Clause"
] | null | null | null | # Copyright: 2006 Brian Harring <ferringb@gmail.com>
# License: GPL2/BSD
from snakeoil.test.mixins import TempDirMixin
from pkgcore.cache import flat_hash
from pkgcore.test.cache import util, test_base
| 27.111111 | 71 | 0.703552 | # Copyright: 2006 Brian Harring <ferringb@gmail.com>
# License: GPL2/BSD
from snakeoil.test.mixins import TempDirMixin
from pkgcore.cache import flat_hash
from pkgcore.test.cache import util, test_base
class db(flat_hash.database):
def __setitem__(self, cpv, data):
data['_chf_'] = test_base._chf_obj
return flat_hash.database.__setitem__(self, cpv, data)
def __getitem__(self, cpv):
d = dict(flat_hash.database.__getitem__(self, cpv).iteritems())
d.pop('_%s_' % self.chf_type, None)
return d
class TestFlatHash(util.GenericCacheMixin, TempDirMixin):
def get_db(self, readonly=False):
return db(self.dir,
auxdbkeys=self.cache_keys, readonly=readonly)
| 355 | 44 | 127 |
3de6852662f7918708904ec09050295efde9728e | 3,616 | py | Python | rqalpha/utils/functools.py | tranzwalle/rqalphax | 5a85d5c8c9df988ace26b68852c9ec39ad2de6df | [
"Apache-2.0"
] | 1 | 2022-01-27T15:37:31.000Z | 2022-01-27T15:37:31.000Z | rqalpha/utils/functools.py | tranzwalle/rqalphax | 5a85d5c8c9df988ace26b68852c9ec39ad2de6df | [
"Apache-2.0"
] | 1 | 2021-12-21T08:14:51.000Z | 2021-12-21T08:14:51.000Z | rqalpha/utils/functools.py | tranzwalle/rqalphax | 5a85d5c8c9df988ace26b68852c9ec39ad2de6df | [
"Apache-2.0"
] | 1 | 2022-03-30T11:50:20.000Z | 2022-03-30T11:50:20.000Z | # -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),
# 您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、
# 本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,
# 否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 public@ricequant.com 获取。
from inspect import signature
from typing import Callable, Union, Iterable
from functools import wraps, lru_cache as origin_lru_cache
cached_functions = []
| 32.576577 | 104 | 0.625 | # -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),
# 您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、
# 本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,
# 否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 public@ricequant.com 获取。
from inspect import signature
from typing import Callable, Union, Iterable
from functools import wraps, lru_cache as origin_lru_cache
cached_functions = []
def lru_cache(*args, **kwargs):
def decorator(func):
func = origin_lru_cache(*args, **kwargs)(func)
cached_functions.append(func)
return func
return decorator
def clear_all_cached_functions():
for func in cached_functions:
func.cache_clear()
def instype_singledispatch(func):
from rqalpha.model.instrument import Instrument
from rqalpha.const import INSTRUMENT_TYPE
from rqalpha.utils.exception import RQInvalidArgument, RQApiNotSupportedError
from rqalpha.utils.i18n import gettext as _
registry = {}
data_proxy = None
def rq_invalid_argument(arg):
if registry:
return RQInvalidArgument(_(
u"function {}: invalid {} argument, "
u"expected an order_book_id or instrument with types {}, got {} (type: {})"
).format(funcname, argname, [getattr(i, "name", str(i)) for i in registry], arg, type(arg)))
else:
return RQApiNotSupportedError(_(
"function {} is not supported, please check your account or mod config"
).format(funcname))
@lru_cache(1024)
def dispatch(id_or_ins):
nonlocal data_proxy
if isinstance(id_or_ins, Instrument):
instype = id_or_ins.type
else:
if not data_proxy:
from rqalpha.environment import Environment
data_proxy = Environment.get_instance().data_proxy
ins = data_proxy.instruments(id_or_ins)
if not ins:
raise rq_invalid_argument(id_or_ins)
instype = ins.type
try:
return registry[instype]
except KeyError:
raise rq_invalid_argument(id_or_ins)
def register(instypes):
# type: (Union[INSTRUMENT_TYPE, Iterable[INSTRUMENT_TYPE]]) -> Callable
if isinstance(instypes, str):
instypes = [instypes]
def register_wrapper(f):
for instype in instypes:
registry[instype] = f
return f
return register_wrapper
@wraps(func)
def wrapper(*args, **kwargs):
if not args:
try:
arg = kwargs[argname]
except KeyError:
raise TypeError('{}() missing 1 required positional argument: \'{}\''.format(
funcname, argname
))
else:
arg = args[0]
try:
impl = dispatch(arg)
except TypeError:
raise rq_invalid_argument(arg)
return impl(*args, **kwargs)
funcname = getattr(func, '__name__', 'instype_singledispatch function')
argname = next(iter(signature(func).parameters))
wrapper.register = register
return wrapper
| 2,694 | 0 | 69 |
f09f90174f6ec9d109747da36951eb742860b8c9 | 14,593 | py | Python | bomeba0/templates/glycans.py | aloctavodia/bomeba0 | e212986d8ee60be1da91d63a7a889db14ec851c3 | [
"Apache-2.0"
] | null | null | null | bomeba0/templates/glycans.py | aloctavodia/bomeba0 | e212986d8ee60be1da91d63a7a889db14ec851c3 | [
"Apache-2.0"
] | 28 | 2017-06-01T15:46:33.000Z | 2021-07-01T18:28:36.000Z | bomeba0/templates/glycans.py | aloctavodia/bomeba0 | e212986d8ee60be1da91d63a7a889db14ec851c3 | [
"Apache-2.0"
] | 6 | 2017-09-30T13:26:08.000Z | 2022-02-13T10:01:18.000Z | from collections import namedtuple
import numpy as np
"""
Templates for glycan residues
"""
AA_info = namedtuple('AA_info', 'coords atom_names bonds bb sc offset')
BDP_info = AA_info(coords=np.array([[-14.69, 10.15, -18.15],
[-15.46, 11.47, -18.33],
[-16.33, 11.35, -19.46],
[-14.45, 12.62, -18.54],
[-15.15, 13.88, -18.62],
[-13.41, 12.66, -17.39],
[-12.36, 13.57, -17.75],
[-12.76, 11.28, -17.17],
[-11.74, 11.28, -16.03],
[-10.58, 11.62, -16.19],
[-12.26, 11.06, -14.85],
[-13.8, 10.27, -17.01],
[-14.09, 9.95, -19.04],
[-16.06, 11.66, -17.43],
[-13.92, 12.44, -19.49],
[-13.9, 12.98, -16.46],
[-12.19, 11.01, -18.07],
[-16.99, 10.64, -19.31],
[-14.59, 14.56, -19.03]]),
atom_names=['C1', 'C2', 'O2', 'C3', 'O3', 'C4', 'O4', 'C5',
'C6', 'O6A', 'O6B', 'OR', 'H1', 'H2', 'H3',
'H4', 'H5', 'H2o', 'H3o'],
bb=[],
sc=[],
bonds=[(0, 1), (0, 11), (0, 12), (1, 2), (1, 3), (1, 13),
(2, 17), (3, 4), (3, 5), (3, 14), (4, 18), (5, 6),
(5, 7), (5, 15), (7, 8), (7, 11), (7, 16), (8, 9),
(8, 10)],
offset=19)
NGA_info = AA_info(coords=np.array([[-15., 5.88, -16.15],
[-15.3, 7.39, -16.33],
[-16.63, 7.71, -15.78],
[-15.24, 7.7, -17.85],
[-15.52, 9.08, -18.1],
[-13.87, 7.26, -18.45],
[-12.73, 7.93, -17.9],
[-13.71, 5.74, -18.23],
[-12.4, 5.17, -18.83],
[-12.36, 3.77, -18.58],
[-17.03, 8.94, -15.35],
[-16.27, 9.85, -15.15],
[-18.54, 9.06, -15.17],
[-13.75, 5.49, -16.79],
[-17.3, 6.97, -15.77],
[-15.8, 5.34, -16.66],
[-14.56, 7.98, -15.81],
[-16.02, 7.14, -18.36],
[-13.91, 7.52, -19.51],
[-14.54, 5.23, -18.71],
[-19.05, 8.97, -16.13],
[-18.78, 10.05, -14.75],
[-18.91, 8.29, -14.49],
[-12.37, 5.36, -19.91],
[-11.52, 5.64, -18.37],
[-12.52, 7.65, -16.98],
[-12.35, 3.64, -17.61]]),
atom_names=['C1', 'C2', 'N2', 'C3', 'O3', 'C4', 'O4', 'C5',
'C6', 'O6', 'C7', 'O7', 'C8', 'OR', 'HN2', 'H1',
'H2', 'H3', 'H4', 'H5', 'H81', 'H82', 'H83',
'H6R', 'H6S', 'H4o', 'H6o'],
bb=[],
sc=[],
bonds=[(0, 1), (0, 13), (0, 15), (1, 2), (1, 3), (1, 16),
(2, 10), (2, 14), (3, 4), (3, 5), (3, 17), (5, 6),
(5, 7), (5, 18), (6, 25), (7, 8), (7, 13), (7, 19),
(8, 9), (8, 23), (8, 24), (9, 26), (10, 11), (10, 12),
(12, 20), (12, 21), (12, 22)],
offset=27)
AFL_info = AA_info(coords=np.array([[ 3.54, -4.94, -9.76],
[ 3.83, -3.48, -10.16],
[ 3.92, -2.65, -9.01],
[ 5.17, -3.41, -10.92],
[ 5.41, -2.05, -11.31],
[ 5.13, -4.33, -12.16],
[ 4.22, -3.76, -13.11],
[ 4.71, -5.76, -11.76],
[ 4.44, -6.66, -12.99],
[ 3.49, -5.74, -10.97],
[ 2.58, -5. , -9.26],
[ 3.03, -3.12, -10.81],
[ 5.98, -3.74, -10.26],
[ 6.13, -4.36, -12.61],
[ 5.5 , -6.22, -11.15],
[ 5.32, -6.68, -13.65],
[ 4.23, -7.68, -12.67],
[ 3.59, -6.29, -13.56],
[ 4.24, -1.79, -9.36],
[ 6.3 , -2.01, -11.67],
[ 4.27, -2.8 , -13. ]]),
atom_names = ['C1', 'C2', 'O2', 'C3', 'O3', 'C4', 'O4', 'C5',
'C6', 'OR', 'H1', 'H2', 'H3', 'H4', 'H5', 'H61',
'H62', 'H63', 'H2o', 'H3o', 'H4o'],
bb = [],
sc = [],
bonds = [(0, 1), (0, 9), (0, 10), (1, 2), (1, 3), (1, 11),
(2, 18), (3, 4), (3, 5), (3, 12), (4, 19), (5, 6),
(5, 7), (5, 13), (6, 20), (7, 8), (7, 9), (7, 14),
(8, 15), (8, 16), (8, 17)],
offset = 21)
NAG_info = AA_info(coords=np.array([[ 0.7 , -7.06, -8.68],
[ -0.47, -6.53, -7.82],
[ -0.6 , -7.33, -6.59],
[ -1.77, -6.52, -8.68],
[ -2.86, -5.83, -8.05],
[ -1.54, -5.79, -10.02],
[ -2.72, -5.91, -10.8 ],
[ -0.32, -6.38, -10.77],
[ -0.01, -5.62, -12.08],
[ 0.03, -4.21, -11.88],
[ -1.35, -6.97, -5.53],
[ -2.06, -6. , -5.51],
[ -1.26, -7.89, -4.34],
[ 0.83, -6.29, -9.9 ],
[ 0.53, -8.1 , -8.97],
[ -0.24, -5.51, -7.51],
[ -2.09, -7.55, -8.86],
[ -1.37, -4.73, -9.8 ],
[ -0.5 , -7.43, -11. ],
[ -0.35, -8.49, -4.36],
[ 0.04, -8.12, -6.49],
[ -2.13, -8.56, -4.33],
[ -1.25, -7.3 , -3.42],
[ -2.7 , -5.79, -7.09],
[ -0.78, -5.86, -12.82],
[ 0.96, -5.95, -12.45],
[ -3.45, -5.7 , -10.19],
[ -0.89, -3.9 , -11.9 ]]),
atom_names = ['C1', 'C2', 'N2', 'C3', 'O3', 'C4', 'O4', 'C5',
'C6', 'O6', 'C7', 'O7', 'C8', 'OR', 'H1', 'H2',
'H3', 'H4', 'H5', 'H81', 'H2n', 'H82', 'H83',
'H3o', 'H6R', 'H6S', 'H4o', 'H6o'],
bb = [],
sc = [],
bonds = [(0, 1), (0, 13), (0, 14), (1, 2), (1, 3), (1, 15),
(2, 10), (2, 20), (3, 4), (3, 5), (3, 16), (4, 23),
(5, 6), (5, 7), (5, 17), (6, 26), (7, 8), (7, 13),
(7, 18), (8, 9), (8, 24), (8, 25), (9, 27), (10, 11),
(10, 12), (12, 19), (12, 21), (12, 22)],
offset = 28)
MAG_info = AA_info(coords=np.array([[ 5.63, -8.87, -7.81],
[ 6.82, -9.65, -8.03],
[ 5.68, -7.48, -8.46],
[ 6.84, -6.71, -7.97],
[ 4.37, -6.71, -8.2 ],
[ 4.45, -5.46, -8.88],
[ 3.1 , -7.58, -8.52],
[ 1.94, -6.96, -7.95],
[ 3.26, -8.98, -7.84],
[ 2.14, -10.01, -8.14],
[ 1.97, -10.17, -9.54],
[ 7.69, -6.03, -8.75],
[ 7.65, -6.05, -9.98],
[ 8.75, -5.26, -8.01],
[ 7.14, -9.94, -9.4 ],
[ 4.48, -9.59, -8.28],
[ 7.47, -9.03, -9.9 ],
[ 6.26, -10.36, -9.91],
[ 7.95, -10.67, -9.41],
[ 5.54, -8.78, -6.72],
[ 5.79, -7.59, -9.54],
[ 4.3 , -6.47, -7.13],
[ 2.96, -7.7 , -9.6 ],
[ 3.31, -8.86, -6.76],
[ 9.47, -5.95, -7.57],
[ 6.93, -6.62, -6.96],
[ 8.29, -4.67, -7.22],
[ 9.27, -4.58, -8.69],
[ 1.21, -9.7 , -7.67],
[ 2.44, -10.98, -7.71],
[ 2.82, -10.46, -9.9 ]]),
atom_names = ['C1', 'O1', 'C2', 'N2', 'C3', 'O3', 'C4', 'O4',
'C5', 'C6', 'O6', 'C7', 'O7', 'C8', 'CO1', 'OR',
'HCO1', 'HCO2', 'HCO3', 'H1', 'H2', 'H3', 'H4',
'H5', 'H81', 'H2n', 'H82', 'H83', 'H6R', 'H6S',
'H6o'],
bb = [],
sc = [],
bonds = [(0, 1), (0, 2), (0, 15), (0, 19), (1, 14), (2, 3),
(2, 4), (2, 20), (3, 11), (3, 25), (4, 5), (4, 6),
(4, 21), (6, 7), (6, 8), (6, 22), (8, 9), (8, 15),
(8, 23), (9, 10), (9, 28), (9, 29), (10, 30), (11, 12),
(11, 13), (13, 24), (13, 26), (13, 27), (14, 16),
(14, 17), (14, 18)],
offset = 31)
B6D_info = AA_info(coords=np.array([[ 1.95e+01, 3.45e+01, 1.77e+00],
[ 1.85e+01, 3.49e+01, 2.70e+00],
[ 1.99e+01, 3.57e+01, 8.50e-01],
[ 1.88e+01, 3.62e+01, 1.03e-01],
[ 2.11e+01, 3.53e+01, -5.60e-02],
[ 2.16e+01, 3.64e+01, -7.50e-01],
[ 2.23e+01, 3.47e+01, 7.93e-01],
[ 2.35e+01, 3.43e+01, -1.70e-02],
[ 2.18e+01, 3.35e+01, 1.71e+00],
[ 2.29e+01, 3.30e+01, 2.67e+00],
[ 1.80e+01, 3.55e+01, -7.76e-01],
[ 1.82e+01, 3.44e+01, -1.05e+00],
[ 1.69e+01, 3.63e+01, -1.37e+00],
[ 2.48e+01, 3.32e+01, -1.65e+00],
[ 2.35e+01, 3.33e+01, -9.47e-01],
[ 2.25e+01, 3.26e+01, -1.18e+00],
[ 2.07e+01, 3.40e+01, 2.52e+00],
[ 1.91e+01, 3.36e+01, 1.20e+00],
[ 2.03e+01, 3.65e+01, 1.51e+00],
[ 2.08e+01, 3.45e+01, -7.86e-01],
[ 2.26e+01, 3.55e+01, 1.46e+00],
[ 2.14e+01, 3.27e+01, 1.11e+00],
[ 1.86e+01, 3.72e+01, 2.98e-01],
[ 2.24e+01, 3.23e+01, 3.35e+00],
[ 2.37e+01, 3.26e+01, 2.12e+00],
[ 2.33e+01, 3.38e+01, 3.27e+00],
[ 2.43e+01, 3.49e+01, 1.15e-01],
[ 1.84e+01, 3.42e+01, 3.32e+00],
[ 1.73e+01, 3.73e+01, -1.75e+00],
[ 1.61e+01, 3.65e+01, -6.10e-01],
[ 1.64e+01, 3.58e+01, -2.20e+00],
[ 2.55e+01, 3.27e+01, -9.86e-01],
[ 2.47e+01, 3.25e+01, -2.54e+00],
[ 2.52e+01, 3.41e+01, -1.96e+00]]),
atom_names = ['C1', 'O1', 'C2', 'N2', 'C3', 'O3', 'C4', 'N4', 'C5', 'C6', 'C7', 'O7', 'C8', 'C9', 'C10', 'O10', 'OR', 'H1', 'H2', 'H3', 'H4', 'H5', 'H2N', 'H61', 'H62', 'H63', 'H4N', 'H1o', 'H81', 'H82', 'H83', 'H91', 'H92', 'H93'],
bb = [],
sc = [],
bonds = [(0, 1), (0, 2), (0, 16), (0, 17), (1, 27), (2, 3), (2, 4), (2, 18), (3, 10), (3, 22), (4, 5), (4, 6), (4, 19), (6, 7), (6, 8), (6, 20), (7, 14), (7, 26), (8, 9), (8, 16), (8, 21), (9, 23), (9, 24), (9, 25), (10, 11), (10, 12), (12, 28), (12, 29), (12, 30), (13, 14), (13, 31), (13, 32), (13, 33), (14, 15)],
offset = 34)
A2G_info = AA_info(coords=np.array([[ 2.25e+01, 3.67e+01, -2.07e+00],
[ 2.33e+01, 3.80e+01, -2.32e+00],
[ 2.43e+01, 3.82e+01, -1.26e+00],
[ 2.23e+01, 3.92e+01, -2.39e+00],
[ 2.30e+01, 4.04e+01, -2.61e+00],
[ 2.12e+01, 3.90e+01, -3.45e+00],
[ 2.18e+01, 3.89e+01, -4.76e+00],
[ 2.05e+01, 3.76e+01, -3.14e+00],
[ 1.94e+01, 3.73e+01, -4.19e+00],
[ 1.87e+01, 3.61e+01, -3.81e+00],
[ 2.54e+01, 3.75e+01, -1.15e+00],
[ 2.57e+01, 3.65e+01, -1.90e+00],
[ 2.63e+01, 3.79e+01, -2.70e-02],
[ 2.14e+01, 3.66e+01, -3.09e+00],
[ 2.31e+01, 3.58e+01, -2.09e+00],
[ 2.38e+01, 3.79e+01, -3.28e+00],
[ 2.05e+01, 3.98e+01, -3.38e+00],
[ 2.00e+01, 3.77e+01, -2.16e+00],
[ 2.41e+01, 3.89e+01, -6.34e-01],
[ 1.99e+01, 3.72e+01, -5.17e+00],
[ 1.87e+01, 3.81e+01, -4.26e+00],
[ 2.58e+01, 3.78e+01, 9.29e-01],
[ 2.67e+01, 3.89e+01, -1.90e-01],
[ 2.72e+01, 3.72e+01, 2.90e-02],
[ 1.94e+01, 3.54e+01, -3.68e+00]]),
atom_names = ['C1', 'C2', 'N2', 'C3', 'O3', 'C4', 'O4', 'C5', 'C6', 'O6', 'C7', 'O7', 'C8', 'OR', 'H1', 'H2', 'H4', 'H5', 'H2N', 'H61', 'H62', 'H81', 'H82', 'H83', 'H6o'],
bb = [],
sc = [],
bonds = [(0, 1), (0, 13), (0, 14), (1, 2), (1, 3), (1, 15), (2, 10), (2, 18), (3, 4), (3, 5), (5, 6), (5, 7), (5, 16), (7, 8), (7, 13), (7, 17), (8, 9), (8, 19), (8, 20), (9, 24), (10, 11), (10, 12), (12, 21), (12, 22), (12, 23)],
offset = 25)
BGC_info = AA_info(coords=np.array([[ 20.45, 43.74, -11.36],
[ 19.72, 45.09, -11.42],
[ 19.65, 45.66, -10.11],
[ 18.29, 44.89, -11.94],
[ 17.63, 46.15, -12.08],
[ 18.32, 44.19, -13.32],
[ 16.97, 43.96, -13.73],
[ 19.11, 42.86, -13.22],
[ 19.33, 42.2 , -14.6 ],
[ 18.08, 41.74, -15.13],
[ 20.43, 43.13, -12.67],
[ 19.97, 43.07, -10.64],
[ 20.27, 45.77, -12.08],
[ 17.73, 44.27, -11.24],
[ 18.81, 44.85, -14.05],
[ 18.59, 42.16, -12.55],
[ 18.22, 41.54, -16.07],
[ 18.99, 46.38, -10.17],
[ 16.82, 45.99, -12.59],
[ 19.77, 42.93, -15.28],
[ 20.01, 41.36, -14.49],
[ 17. , 43.31, -14.46]]),
atom_names = ['C1', 'C2', 'O2', 'C3', 'O3', 'C4', 'O4', 'C5', 'C6', 'O6', 'OR', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'H2o', 'H3o', 'H6R', 'H6S', 'H4o'],
bb = [],
sc = [],
bonds = [(0, 1), (0, 10), (0, 11), (1, 2), (1, 3), (1, 12), (2, 17), (3, 4), (3, 5), (3, 13), (4, 18), (5, 6), (5, 7), (5, 14), (6, 21), (7, 8), (7, 10), (7, 15), (8, 9), (8, 19), (8, 20), (9, 16)],
offset = 22)
templates_gl = {'B': BDP_info, 'N': NGA_info,
'A':AFL_info, 'G':NAG_info, 'M':MAG_info,
'D':B6D_info, 'C':A2G_info, 'F':BGC_info}
one_to_three_gl = {'B': 'BDP', 'N': 'NGA',
'A':'AFL', 'G':'NAG', 'M':'MAG',
'D':'B6D', 'C':'A2G', 'F':'BGC'}
three_to_one_gl = {val: key for key, val in one_to_three_gl.items()}
| 47.37987 | 329 | 0.299459 | from collections import namedtuple
import numpy as np
"""
Templates for glycan residues
"""
AA_info = namedtuple('AA_info', 'coords atom_names bonds bb sc offset')
BDP_info = AA_info(coords=np.array([[-14.69, 10.15, -18.15],
[-15.46, 11.47, -18.33],
[-16.33, 11.35, -19.46],
[-14.45, 12.62, -18.54],
[-15.15, 13.88, -18.62],
[-13.41, 12.66, -17.39],
[-12.36, 13.57, -17.75],
[-12.76, 11.28, -17.17],
[-11.74, 11.28, -16.03],
[-10.58, 11.62, -16.19],
[-12.26, 11.06, -14.85],
[-13.8, 10.27, -17.01],
[-14.09, 9.95, -19.04],
[-16.06, 11.66, -17.43],
[-13.92, 12.44, -19.49],
[-13.9, 12.98, -16.46],
[-12.19, 11.01, -18.07],
[-16.99, 10.64, -19.31],
[-14.59, 14.56, -19.03]]),
atom_names=['C1', 'C2', 'O2', 'C3', 'O3', 'C4', 'O4', 'C5',
'C6', 'O6A', 'O6B', 'OR', 'H1', 'H2', 'H3',
'H4', 'H5', 'H2o', 'H3o'],
bb=[],
sc=[],
bonds=[(0, 1), (0, 11), (0, 12), (1, 2), (1, 3), (1, 13),
(2, 17), (3, 4), (3, 5), (3, 14), (4, 18), (5, 6),
(5, 7), (5, 15), (7, 8), (7, 11), (7, 16), (8, 9),
(8, 10)],
offset=19)
NGA_info = AA_info(coords=np.array([[-15., 5.88, -16.15],
[-15.3, 7.39, -16.33],
[-16.63, 7.71, -15.78],
[-15.24, 7.7, -17.85],
[-15.52, 9.08, -18.1],
[-13.87, 7.26, -18.45],
[-12.73, 7.93, -17.9],
[-13.71, 5.74, -18.23],
[-12.4, 5.17, -18.83],
[-12.36, 3.77, -18.58],
[-17.03, 8.94, -15.35],
[-16.27, 9.85, -15.15],
[-18.54, 9.06, -15.17],
[-13.75, 5.49, -16.79],
[-17.3, 6.97, -15.77],
[-15.8, 5.34, -16.66],
[-14.56, 7.98, -15.81],
[-16.02, 7.14, -18.36],
[-13.91, 7.52, -19.51],
[-14.54, 5.23, -18.71],
[-19.05, 8.97, -16.13],
[-18.78, 10.05, -14.75],
[-18.91, 8.29, -14.49],
[-12.37, 5.36, -19.91],
[-11.52, 5.64, -18.37],
[-12.52, 7.65, -16.98],
[-12.35, 3.64, -17.61]]),
atom_names=['C1', 'C2', 'N2', 'C3', 'O3', 'C4', 'O4', 'C5',
'C6', 'O6', 'C7', 'O7', 'C8', 'OR', 'HN2', 'H1',
'H2', 'H3', 'H4', 'H5', 'H81', 'H82', 'H83',
'H6R', 'H6S', 'H4o', 'H6o'],
bb=[],
sc=[],
bonds=[(0, 1), (0, 13), (0, 15), (1, 2), (1, 3), (1, 16),
(2, 10), (2, 14), (3, 4), (3, 5), (3, 17), (5, 6),
(5, 7), (5, 18), (6, 25), (7, 8), (7, 13), (7, 19),
(8, 9), (8, 23), (8, 24), (9, 26), (10, 11), (10, 12),
(12, 20), (12, 21), (12, 22)],
offset=27)
AFL_info = AA_info(coords=np.array([[ 3.54, -4.94, -9.76],
[ 3.83, -3.48, -10.16],
[ 3.92, -2.65, -9.01],
[ 5.17, -3.41, -10.92],
[ 5.41, -2.05, -11.31],
[ 5.13, -4.33, -12.16],
[ 4.22, -3.76, -13.11],
[ 4.71, -5.76, -11.76],
[ 4.44, -6.66, -12.99],
[ 3.49, -5.74, -10.97],
[ 2.58, -5. , -9.26],
[ 3.03, -3.12, -10.81],
[ 5.98, -3.74, -10.26],
[ 6.13, -4.36, -12.61],
[ 5.5 , -6.22, -11.15],
[ 5.32, -6.68, -13.65],
[ 4.23, -7.68, -12.67],
[ 3.59, -6.29, -13.56],
[ 4.24, -1.79, -9.36],
[ 6.3 , -2.01, -11.67],
[ 4.27, -2.8 , -13. ]]),
atom_names = ['C1', 'C2', 'O2', 'C3', 'O3', 'C4', 'O4', 'C5',
'C6', 'OR', 'H1', 'H2', 'H3', 'H4', 'H5', 'H61',
'H62', 'H63', 'H2o', 'H3o', 'H4o'],
bb = [],
sc = [],
bonds = [(0, 1), (0, 9), (0, 10), (1, 2), (1, 3), (1, 11),
(2, 18), (3, 4), (3, 5), (3, 12), (4, 19), (5, 6),
(5, 7), (5, 13), (6, 20), (7, 8), (7, 9), (7, 14),
(8, 15), (8, 16), (8, 17)],
offset = 21)
NAG_info = AA_info(coords=np.array([[ 0.7 , -7.06, -8.68],
[ -0.47, -6.53, -7.82],
[ -0.6 , -7.33, -6.59],
[ -1.77, -6.52, -8.68],
[ -2.86, -5.83, -8.05],
[ -1.54, -5.79, -10.02],
[ -2.72, -5.91, -10.8 ],
[ -0.32, -6.38, -10.77],
[ -0.01, -5.62, -12.08],
[ 0.03, -4.21, -11.88],
[ -1.35, -6.97, -5.53],
[ -2.06, -6. , -5.51],
[ -1.26, -7.89, -4.34],
[ 0.83, -6.29, -9.9 ],
[ 0.53, -8.1 , -8.97],
[ -0.24, -5.51, -7.51],
[ -2.09, -7.55, -8.86],
[ -1.37, -4.73, -9.8 ],
[ -0.5 , -7.43, -11. ],
[ -0.35, -8.49, -4.36],
[ 0.04, -8.12, -6.49],
[ -2.13, -8.56, -4.33],
[ -1.25, -7.3 , -3.42],
[ -2.7 , -5.79, -7.09],
[ -0.78, -5.86, -12.82],
[ 0.96, -5.95, -12.45],
[ -3.45, -5.7 , -10.19],
[ -0.89, -3.9 , -11.9 ]]),
atom_names = ['C1', 'C2', 'N2', 'C3', 'O3', 'C4', 'O4', 'C5',
'C6', 'O6', 'C7', 'O7', 'C8', 'OR', 'H1', 'H2',
'H3', 'H4', 'H5', 'H81', 'H2n', 'H82', 'H83',
'H3o', 'H6R', 'H6S', 'H4o', 'H6o'],
bb = [],
sc = [],
bonds = [(0, 1), (0, 13), (0, 14), (1, 2), (1, 3), (1, 15),
(2, 10), (2, 20), (3, 4), (3, 5), (3, 16), (4, 23),
(5, 6), (5, 7), (5, 17), (6, 26), (7, 8), (7, 13),
(7, 18), (8, 9), (8, 24), (8, 25), (9, 27), (10, 11),
(10, 12), (12, 19), (12, 21), (12, 22)],
offset = 28)
MAG_info = AA_info(coords=np.array([[ 5.63, -8.87, -7.81],
[ 6.82, -9.65, -8.03],
[ 5.68, -7.48, -8.46],
[ 6.84, -6.71, -7.97],
[ 4.37, -6.71, -8.2 ],
[ 4.45, -5.46, -8.88],
[ 3.1 , -7.58, -8.52],
[ 1.94, -6.96, -7.95],
[ 3.26, -8.98, -7.84],
[ 2.14, -10.01, -8.14],
[ 1.97, -10.17, -9.54],
[ 7.69, -6.03, -8.75],
[ 7.65, -6.05, -9.98],
[ 8.75, -5.26, -8.01],
[ 7.14, -9.94, -9.4 ],
[ 4.48, -9.59, -8.28],
[ 7.47, -9.03, -9.9 ],
[ 6.26, -10.36, -9.91],
[ 7.95, -10.67, -9.41],
[ 5.54, -8.78, -6.72],
[ 5.79, -7.59, -9.54],
[ 4.3 , -6.47, -7.13],
[ 2.96, -7.7 , -9.6 ],
[ 3.31, -8.86, -6.76],
[ 9.47, -5.95, -7.57],
[ 6.93, -6.62, -6.96],
[ 8.29, -4.67, -7.22],
[ 9.27, -4.58, -8.69],
[ 1.21, -9.7 , -7.67],
[ 2.44, -10.98, -7.71],
[ 2.82, -10.46, -9.9 ]]),
atom_names = ['C1', 'O1', 'C2', 'N2', 'C3', 'O3', 'C4', 'O4',
'C5', 'C6', 'O6', 'C7', 'O7', 'C8', 'CO1', 'OR',
'HCO1', 'HCO2', 'HCO3', 'H1', 'H2', 'H3', 'H4',
'H5', 'H81', 'H2n', 'H82', 'H83', 'H6R', 'H6S',
'H6o'],
bb = [],
sc = [],
bonds = [(0, 1), (0, 2), (0, 15), (0, 19), (1, 14), (2, 3),
(2, 4), (2, 20), (3, 11), (3, 25), (4, 5), (4, 6),
(4, 21), (6, 7), (6, 8), (6, 22), (8, 9), (8, 15),
(8, 23), (9, 10), (9, 28), (9, 29), (10, 30), (11, 12),
(11, 13), (13, 24), (13, 26), (13, 27), (14, 16),
(14, 17), (14, 18)],
offset = 31)
B6D_info = AA_info(coords=np.array([[ 1.95e+01, 3.45e+01, 1.77e+00],
[ 1.85e+01, 3.49e+01, 2.70e+00],
[ 1.99e+01, 3.57e+01, 8.50e-01],
[ 1.88e+01, 3.62e+01, 1.03e-01],
[ 2.11e+01, 3.53e+01, -5.60e-02],
[ 2.16e+01, 3.64e+01, -7.50e-01],
[ 2.23e+01, 3.47e+01, 7.93e-01],
[ 2.35e+01, 3.43e+01, -1.70e-02],
[ 2.18e+01, 3.35e+01, 1.71e+00],
[ 2.29e+01, 3.30e+01, 2.67e+00],
[ 1.80e+01, 3.55e+01, -7.76e-01],
[ 1.82e+01, 3.44e+01, -1.05e+00],
[ 1.69e+01, 3.63e+01, -1.37e+00],
[ 2.48e+01, 3.32e+01, -1.65e+00],
[ 2.35e+01, 3.33e+01, -9.47e-01],
[ 2.25e+01, 3.26e+01, -1.18e+00],
[ 2.07e+01, 3.40e+01, 2.52e+00],
[ 1.91e+01, 3.36e+01, 1.20e+00],
[ 2.03e+01, 3.65e+01, 1.51e+00],
[ 2.08e+01, 3.45e+01, -7.86e-01],
[ 2.26e+01, 3.55e+01, 1.46e+00],
[ 2.14e+01, 3.27e+01, 1.11e+00],
[ 1.86e+01, 3.72e+01, 2.98e-01],
[ 2.24e+01, 3.23e+01, 3.35e+00],
[ 2.37e+01, 3.26e+01, 2.12e+00],
[ 2.33e+01, 3.38e+01, 3.27e+00],
[ 2.43e+01, 3.49e+01, 1.15e-01],
[ 1.84e+01, 3.42e+01, 3.32e+00],
[ 1.73e+01, 3.73e+01, -1.75e+00],
[ 1.61e+01, 3.65e+01, -6.10e-01],
[ 1.64e+01, 3.58e+01, -2.20e+00],
[ 2.55e+01, 3.27e+01, -9.86e-01],
[ 2.47e+01, 3.25e+01, -2.54e+00],
[ 2.52e+01, 3.41e+01, -1.96e+00]]),
atom_names = ['C1', 'O1', 'C2', 'N2', 'C3', 'O3', 'C4', 'N4', 'C5', 'C6', 'C7', 'O7', 'C8', 'C9', 'C10', 'O10', 'OR', 'H1', 'H2', 'H3', 'H4', 'H5', 'H2N', 'H61', 'H62', 'H63', 'H4N', 'H1o', 'H81', 'H82', 'H83', 'H91', 'H92', 'H93'],
bb = [],
sc = [],
bonds = [(0, 1), (0, 2), (0, 16), (0, 17), (1, 27), (2, 3), (2, 4), (2, 18), (3, 10), (3, 22), (4, 5), (4, 6), (4, 19), (6, 7), (6, 8), (6, 20), (7, 14), (7, 26), (8, 9), (8, 16), (8, 21), (9, 23), (9, 24), (9, 25), (10, 11), (10, 12), (12, 28), (12, 29), (12, 30), (13, 14), (13, 31), (13, 32), (13, 33), (14, 15)],
offset = 34)
A2G_info = AA_info(coords=np.array([[ 2.25e+01, 3.67e+01, -2.07e+00],
[ 2.33e+01, 3.80e+01, -2.32e+00],
[ 2.43e+01, 3.82e+01, -1.26e+00],
[ 2.23e+01, 3.92e+01, -2.39e+00],
[ 2.30e+01, 4.04e+01, -2.61e+00],
[ 2.12e+01, 3.90e+01, -3.45e+00],
[ 2.18e+01, 3.89e+01, -4.76e+00],
[ 2.05e+01, 3.76e+01, -3.14e+00],
[ 1.94e+01, 3.73e+01, -4.19e+00],
[ 1.87e+01, 3.61e+01, -3.81e+00],
[ 2.54e+01, 3.75e+01, -1.15e+00],
[ 2.57e+01, 3.65e+01, -1.90e+00],
[ 2.63e+01, 3.79e+01, -2.70e-02],
[ 2.14e+01, 3.66e+01, -3.09e+00],
[ 2.31e+01, 3.58e+01, -2.09e+00],
[ 2.38e+01, 3.79e+01, -3.28e+00],
[ 2.05e+01, 3.98e+01, -3.38e+00],
[ 2.00e+01, 3.77e+01, -2.16e+00],
[ 2.41e+01, 3.89e+01, -6.34e-01],
[ 1.99e+01, 3.72e+01, -5.17e+00],
[ 1.87e+01, 3.81e+01, -4.26e+00],
[ 2.58e+01, 3.78e+01, 9.29e-01],
[ 2.67e+01, 3.89e+01, -1.90e-01],
[ 2.72e+01, 3.72e+01, 2.90e-02],
[ 1.94e+01, 3.54e+01, -3.68e+00]]),
atom_names = ['C1', 'C2', 'N2', 'C3', 'O3', 'C4', 'O4', 'C5', 'C6', 'O6', 'C7', 'O7', 'C8', 'OR', 'H1', 'H2', 'H4', 'H5', 'H2N', 'H61', 'H62', 'H81', 'H82', 'H83', 'H6o'],
bb = [],
sc = [],
bonds = [(0, 1), (0, 13), (0, 14), (1, 2), (1, 3), (1, 15), (2, 10), (2, 18), (3, 4), (3, 5), (5, 6), (5, 7), (5, 16), (7, 8), (7, 13), (7, 17), (8, 9), (8, 19), (8, 20), (9, 24), (10, 11), (10, 12), (12, 21), (12, 22), (12, 23)],
offset = 25)
BGC_info = AA_info(coords=np.array([[ 20.45, 43.74, -11.36],
[ 19.72, 45.09, -11.42],
[ 19.65, 45.66, -10.11],
[ 18.29, 44.89, -11.94],
[ 17.63, 46.15, -12.08],
[ 18.32, 44.19, -13.32],
[ 16.97, 43.96, -13.73],
[ 19.11, 42.86, -13.22],
[ 19.33, 42.2 , -14.6 ],
[ 18.08, 41.74, -15.13],
[ 20.43, 43.13, -12.67],
[ 19.97, 43.07, -10.64],
[ 20.27, 45.77, -12.08],
[ 17.73, 44.27, -11.24],
[ 18.81, 44.85, -14.05],
[ 18.59, 42.16, -12.55],
[ 18.22, 41.54, -16.07],
[ 18.99, 46.38, -10.17],
[ 16.82, 45.99, -12.59],
[ 19.77, 42.93, -15.28],
[ 20.01, 41.36, -14.49],
[ 17. , 43.31, -14.46]]),
atom_names = ['C1', 'C2', 'O2', 'C3', 'O3', 'C4', 'O4', 'C5', 'C6', 'O6', 'OR', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'H2o', 'H3o', 'H6R', 'H6S', 'H4o'],
bb = [],
sc = [],
bonds = [(0, 1), (0, 10), (0, 11), (1, 2), (1, 3), (1, 12), (2, 17), (3, 4), (3, 5), (3, 13), (4, 18), (5, 6), (5, 7), (5, 14), (6, 21), (7, 8), (7, 10), (7, 15), (8, 9), (8, 19), (8, 20), (9, 16)],
offset = 22)
templates_gl = {'B': BDP_info, 'N': NGA_info,
'A':AFL_info, 'G':NAG_info, 'M':MAG_info,
'D':B6D_info, 'C':A2G_info, 'F':BGC_info}
one_to_three_gl = {'B': 'BDP', 'N': 'NGA',
'A':'AFL', 'G':'NAG', 'M':'MAG',
'D':'B6D', 'C':'A2G', 'F':'BGC'}
three_to_one_gl = {val: key for key, val in one_to_three_gl.items()}
| 0 | 0 | 0 |
093a1b57300718525312eed9124b9ef5739f331a | 2,761 | py | Python | model.py | haizeigh/TransformerTest | 7e0f6b6590c90ac45d749077c4b329bb6b2dacca | [
"MIT"
] | 39 | 2020-05-02T14:45:00.000Z | 2022-03-30T08:32:19.000Z | model.py | haizeigh/TransformerTest | 7e0f6b6590c90ac45d749077c4b329bb6b2dacca | [
"MIT"
] | null | null | null | model.py | haizeigh/TransformerTest | 7e0f6b6590c90ac45d749077c4b329bb6b2dacca | [
"MIT"
] | 11 | 2020-05-17T06:51:38.000Z | 2022-01-23T04:17:43.000Z | import torch
from torch import nn
import math
| 37.821918 | 110 | 0.623687 | import torch
from torch import nn
import math
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=100):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerModel(nn.Module):
def __init__(self, intoken, outtoken, hidden, nlayers=3, dropout=0.1):
super(TransformerModel, self).__init__()
nhead = hidden // 64
self.encoder = nn.Embedding(intoken, hidden)
self.pos_encoder = PositionalEncoding(hidden, dropout)
self.decoder = nn.Embedding(outtoken, hidden)
self.pos_decoder = PositionalEncoding(hidden, dropout)
self.inscale = math.sqrt(intoken)
self.outscale = math.sqrt(outtoken)
self.transformer = nn.Transformer(d_model=hidden, nhead=nhead, num_encoder_layers=nlayers,
num_decoder_layers=nlayers, dim_feedforward=hidden, dropout=dropout)
self.fc_out = nn.Linear(hidden, outtoken)
self.src_mask = None
self.trg_mask = None
self.memory_mask = None
def generate_square_subsequent_mask(self, sz):
mask = torch.triu(torch.ones(sz, sz), 1)
mask = mask.masked_fill(mask == 1, float('-inf'))
return mask
def make_len_mask(self, inp):
return (inp == 0).transpose(0, 1)
def forward(self, src, trg):
if self.trg_mask is None or self.trg_mask.size(0) != len(trg):
self.trg_mask = self.generate_square_subsequent_mask(len(trg)).to(trg.device)
src_pad_mask = self.make_len_mask(src)
trg_pad_mask = self.make_len_mask(trg)
src = self.encoder(src)
src = self.pos_encoder(src)
trg = self.decoder(trg)
trg = self.pos_decoder(trg)
output = self.transformer(src, trg, tgt_mask=self.trg_mask)
# output = self.transformer(src, trg, src_mask=self.src_mask, tgt_mask=self.trg_mask,
# memory_mask=self.memory_mask,
# src_key_padding_mask=src_pad_mask, tgt_key_padding_mask=trg_pad_mask,
# memory_key_padding_mask=src_pad_mask)
output = self.fc_out(output)
return output | 2,480 | 28 | 206 |
5fbca564b6fc867c711e32a5b0b9d483f86650a4 | 694 | py | Python | src/coalescenceml/metadata_store/__init__.py | CornellDataScience/CoalescenceML | 6dd849b272c77011719952b47d5b55684d90733a | [
"Apache-2.0"
] | 1 | 2022-03-22T17:48:55.000Z | 2022-03-22T17:48:55.000Z | src/coalescenceml/metadata_store/__init__.py | CornellDataScience/CoalescenceML | 6dd849b272c77011719952b47d5b55684d90733a | [
"Apache-2.0"
] | 2 | 2022-02-18T18:48:12.000Z | 2022-02-19T18:14:38.000Z | src/coalescenceml/metadata_store/__init__.py | CornellDataScience/CoalescenceML | 6dd849b272c77011719952b47d5b55684d90733a | [
"Apache-2.0"
] | 1 | 2022-02-10T02:52:22.000Z | 2022-02-10T02:52:22.000Z | """
## Metadata Store
The configuration of each pipeline, step, backend, and produced artifacts are
all tracked within the metadata store. The metadata store is an SQL database,
and can be `sqlite` or `mysql`.
Metadata are the pieces of information tracked about the pipelines, experiments
and configurations that you are running with CoML.
"""
from coalescenceml.metadata_store.base_metadata_store import BaseMetadataStore
from coalescenceml.metadata_store.mysql_metadata_store import MySQLMetadataStore
from coalescenceml.metadata_store.sqlite_metadata_store import (
SQLiteMetadataStore,
)
__all__ = [
"BaseMetadataStore",
"MySQLMetadataStore",
"SQLiteMetadataStore",
]
| 30.173913 | 80 | 0.802594 | """
## Metadata Store
The configuration of each pipeline, step, backend, and produced artifacts are
all tracked within the metadata store. The metadata store is an SQL database,
and can be `sqlite` or `mysql`.
Metadata are the pieces of information tracked about the pipelines, experiments
and configurations that you are running with CoML.
"""
from coalescenceml.metadata_store.base_metadata_store import BaseMetadataStore
from coalescenceml.metadata_store.mysql_metadata_store import MySQLMetadataStore
from coalescenceml.metadata_store.sqlite_metadata_store import (
SQLiteMetadataStore,
)
__all__ = [
"BaseMetadataStore",
"MySQLMetadataStore",
"SQLiteMetadataStore",
]
| 0 | 0 | 0 |
6da9028e02ef5241c0c4f065efb85de39f513eba | 2,497 | py | Python | features/tests/test_service.py | wkevina/feature-requests-app | 6580f5eced4a4f3322da1ce3ab2803e0c57938ac | [
"MIT"
] | null | null | null | features/tests/test_service.py | wkevina/feature-requests-app | 6580f5eced4a4f3322da1ce3ab2803e0c57938ac | [
"MIT"
] | null | null | null | features/tests/test_service.py | wkevina/feature-requests-app | 6580f5eced4a4f3322da1ce3ab2803e0c57938ac | [
"MIT"
] | null | null | null | from django.test import TestCase
from ..service import shift_client_priority
from ..models import Client, ProductArea, FeatureRequest
class TestShiftClientPriority(TestCase):
"""Test service.shift_client_priority"""
def test_basic(self):
"""Should shift all priorities up by one"""
# Add 10 features with ascending priority, store id
added = [add_feature(self.client, self.area, client_priority=i).id
for i in range(1, 11)]
query = FeatureRequest.objects.filter(client=self.client)
# Shift priorities to free up 1
shift_client_priority(1, query)
# Actual list of [(id, client_priority) ... ]
priorities = query.values_list('pk', 'client_priority')\
.order_by('pk')
# Expected [(id, client_priority) ... ]
should_be = list(zip(added, range(2, 12)))
self.assertEqual(list(priorities), should_be)
def test_find_gap(self):
"""Should find gap in sequence, minimizing affected models"""
# Make contiguous range
for i in range(1, 11):
add_feature(self.client, self.area, client_priority=i)
# Add more, leaving a gap at 11
for i in range(12, 15):
add_feature(self.client, self.area, client_priority=i)
query = FeatureRequest.objects.filter(client=self.client)
shift_client_priority(1, query)
priorities = query.values_list('client_priority', flat=True)\
.order_by('client_priority')
assert list(priorities) == list(range(2, 15))
def test_returns_modified_models(self):
"""Should return list of all models that were modified"""
# Set up data
models = [add_feature(self.client, self.area, client_priority=i)
for i in range(1,10)]
query = FeatureRequest.objects.filter(client=self.client)
modified = shift_client_priority(1, query)
self.assertCountEqual(models, modified)
| 32.428571 | 74 | 0.629555 | from django.test import TestCase
from ..service import shift_client_priority
from ..models import Client, ProductArea, FeatureRequest
def add_feature(client, product_area, **kwargs):
args = dict(client=client,
product_area=product_area,
target_date='2016-1-1')
args.update(kwargs)
return FeatureRequest.objects.create(**args)
class TestShiftClientPriority(TestCase):
"""Test service.shift_client_priority"""
def setUp(self):
# Prepare a Client and ProductArea for our use
self.client = Client.objects.create(name='test client')
self.area = ProductArea.objects.create(name='test area')
def test_basic(self):
"""Should shift all priorities up by one"""
# Add 10 features with ascending priority, store id
added = [add_feature(self.client, self.area, client_priority=i).id
for i in range(1, 11)]
query = FeatureRequest.objects.filter(client=self.client)
# Shift priorities to free up 1
shift_client_priority(1, query)
# Actual list of [(id, client_priority) ... ]
priorities = query.values_list('pk', 'client_priority')\
.order_by('pk')
# Expected [(id, client_priority) ... ]
should_be = list(zip(added, range(2, 12)))
self.assertEqual(list(priorities), should_be)
def test_find_gap(self):
"""Should find gap in sequence, minimizing affected models"""
# Make contiguous range
for i in range(1, 11):
add_feature(self.client, self.area, client_priority=i)
# Add more, leaving a gap at 11
for i in range(12, 15):
add_feature(self.client, self.area, client_priority=i)
query = FeatureRequest.objects.filter(client=self.client)
shift_client_priority(1, query)
priorities = query.values_list('client_priority', flat=True)\
.order_by('client_priority')
assert list(priorities) == list(range(2, 15))
def test_returns_modified_models(self):
"""Should return list of all models that were modified"""
# Set up data
models = [add_feature(self.client, self.area, client_priority=i)
for i in range(1,10)]
query = FeatureRequest.objects.filter(client=self.client)
modified = shift_client_priority(1, query)
self.assertCountEqual(models, modified)
| 395 | 0 | 50 |
92db293498fd8f1ab38064f3db1404cc4729b205 | 4,861 | py | Python | PublicWebServicesAPI_AND_servercommandScripts/simpleTopUpBalance/simpleTopUpBalance.py | PaperCutSoftware/PaperCutExamples | 392f284939bb498d621600c68a997a8d3559bc77 | [
"MIT"
] | 59 | 2015-10-02T18:42:19.000Z | 2022-01-01T09:37:26.000Z | PublicWebServicesAPI_AND_servercommandScripts/simpleTopUpBalance/simpleTopUpBalance.py | PaperCutSoftware/PaperCutExamples | 392f284939bb498d621600c68a997a8d3559bc77 | [
"MIT"
] | 16 | 2016-06-16T06:42:49.000Z | 2021-08-31T22:49:59.000Z | PublicWebServicesAPI_AND_servercommandScripts/simpleTopUpBalance/simpleTopUpBalance.py | PaperCutSoftware/PaperCutExamples | 392f284939bb498d621600c68a997a8d3559bc77 | [
"MIT"
] | 20 | 2015-09-09T16:27:33.000Z | 2020-11-12T18:17:36.000Z | #!/usr/bin/env python
# Small web app to allow a user to top up their personal PaperCut balance
# Add a custom URL to the PaperCut user web page, which is used by end users
# when they want to add credit to their PaperCut personal account. The url
# should refer to this small web app When the user clicks on the URL link
# (in the PaperCut user web page) to the web app, the user identification details
# are passed as part of the URL. This is explained at:
# https://www.papercut.com/products/ng/manual/common/topics/customize-user-web-pages.html#customize-user-web-pages-nav-links
# The URL neeeds to something like http://localhost:8081/simpleTopUpBalance/?user=%user%&return_url=%return_url%
# Generally additional security should be provided. For example if the URL is http://localhost:8081/promptForPassword/?user=%user%&return_url=%return_url%
# then the user will need to enter their PaperCut password to access the payment system
# Handy Tip: By default the link will open in a separate winodow. You can edit the advanced config property user.web.custom-links and
# change "_body" to "_self". You should then use the %return_url% to return the user to the PaperCut MF/NG web interface
# This code is a basic example only. It should not be used for production
import xmlrpc.client
import sys
from json import load as loadjs
import logging
import traceback
# Bottle does not depend on any external libraries.
# You can just download bottle.py into your project directory and using
# $ wget http://bottlepy.org/bottle.py
from bottle import route, run, template, request, debug, response
# Prefer HTTPS connection
# If not localhost then this address will need to be whitelisted in PaperCut
host = "http://localhost:9191/rpc/api/xmlrpc"
auth = "token" # Value defined in advanced config property "auth.webservices.auth-token". Should be random
proxy = xmlrpc.client.ServerProxy(host)
# For more information on this user database file refer to the custom auth and sync demo
paperCutAccountInfoFile = 'c:\\Program Files\\PaperCut MF\\server\\custom\\config.json'
paperCutAccountData = {}
# The user is sent back to the Summary page as if they had just logged in,
# assuming their session has not timed out
# Therefore return url should be consistent
redirect_url = ''
@route('/')
@route('/promptForPassword/')
@route('/simpleTopUpBalance/', method='GET')
@route("/topUp/")
try:
with open(paperCutAccountInfoFile) as f:
paperCutAccountData = loadjs(f)
except OSError:
paperCutAccountData = None
run(host='localhost', port=8081, debug=True, reloader=True)
| 38.275591 | 154 | 0.727628 | #!/usr/bin/env python
# Small web app to allow a user to top up their personal PaperCut balance
# Add a custom URL to the PaperCut user web page, which is used by end users
# when they want to add credit to their PaperCut personal account. The url
# should refer to this small web app When the user clicks on the URL link
# (in the PaperCut user web page) to the web app, the user identification details
# are passed as part of the URL. This is explained at:
# https://www.papercut.com/products/ng/manual/common/topics/customize-user-web-pages.html#customize-user-web-pages-nav-links
# The URL neeeds to something like http://localhost:8081/simpleTopUpBalance/?user=%user%&return_url=%return_url%
# Generally additional security should be provided. For example if the URL is http://localhost:8081/promptForPassword/?user=%user%&return_url=%return_url%
# then the user will need to enter their PaperCut password to access the payment system
# Handy Tip: By default the link will open in a separate winodow. You can edit the advanced config property user.web.custom-links and
# change "_body" to "_self". You should then use the %return_url% to return the user to the PaperCut MF/NG web interface
# This code is a basic example only. It should not be used for production
import xmlrpc.client
import sys
from json import load as loadjs
import logging
import traceback
# Bottle does not depend on any external libraries.
# You can just download bottle.py into your project directory and using
# $ wget http://bottlepy.org/bottle.py
from bottle import route, run, template, request, debug, response
# Prefer HTTPS connection
# If not localhost then this address will need to be whitelisted in PaperCut
host = "http://localhost:9191/rpc/api/xmlrpc"
auth = "token" # Value defined in advanced config property "auth.webservices.auth-token". Should be random
proxy = xmlrpc.client.ServerProxy(host)
# For more information on this user database file refer to the custom auth and sync demo
paperCutAccountInfoFile = 'c:\\Program Files\\PaperCut MF\\server\\custom\\config.json'
paperCutAccountData = {}
# The user is sent back to the Summary page as if they had just logged in,
# assuming their session has not timed out
# Therefore return url should be consistent
redirect_url = ''
@route('/')
def wrongUrl():
return("Please log into PaperCut and set top up your account from there")
@route('/promptForPassword/')
def prompForPassword():
user = request.query.user or ""
try:
if len(user) == 0 or not proxy.api.isUserExists(auth, user):
return( "Can't find user {}".format(user))
except Exception as e:
logging.error(traceback.format_exc())
return_url = request.query.return_url or ""
return template( 'promptForPassword', user=user, return_url=return_url)
@route('/simpleTopUpBalance/', method='GET')
def promptUser():
user = request.query.user or ""
return_url = request.query.return_url or ""
password = request.query.password or ""
if paperCutAccountData is None or paperCutAccountData['userdata'][user]['password'] == password:
return template('promptForDeposit',user=user, return_url=return_url)
# Password validation failed
return template( 'promptForPassword', user=user, error_text="Invalid password entered", return_url=return_url)
@route("/topUp/")
def topUp(method="GET"):
return_url = request.query.return_url or None
if request.query.cancel == "cancel":
if return_url is None:
return "Cancelled. Please close this tab/window and return to PaperCut"
else:
response.set_header("Refresh", "5; url={}".format(return_url))
return "Cancelled. You will be returned to PaperCut in 5s"
user = request.query.user
amount = float(request.query.amount)
if not amount > 0.0: # Example of data validation -- not used because our form already does this one
return template('promptForDeposit',user=user, return_url=return_url, error_text="Invalid amount \"{}\" entered".format(amount))
proxy.api.adjustUserAccountBalance(
auth, user, amount, "Money added by the Simple Top Up Page")
if len(return_url) == 0:
return "Updated balance is now {}<br><br>Please close this tab/window and return to PaperCut".format(
proxy.api.getUserAccountBalance(auth,user))
# Add refresh with 5s timeout back to PaperCut MF/NG
response.set_header("Refresh", "5; url={}".format(return_url))
return "Updated balance is now {}<br><br>You will be returned to PaperCcut in 5s".format(
proxy.api.getUserAccountBalance(auth,user))
try:
with open(paperCutAccountInfoFile) as f:
paperCutAccountData = loadjs(f)
except OSError:
paperCutAccountData = None
run(host='localhost', port=8081, debug=True, reloader=True)
| 2,174 | 0 | 88 |
371d8878b031f08785e6952cc7b14ad180d14add | 2,226 | py | Python | generate.py | jseguillon/get-awx | 8c3c4be2f53d9bc1e1d1556117190c7619134acc | [
"MIT"
] | null | null | null | generate.py | jseguillon/get-awx | 8c3c4be2f53d9bc1e1d1556117190c7619134acc | [
"MIT"
] | null | null | null | generate.py | jseguillon/get-awx | 8c3c4be2f53d9bc1e1d1556117190c7619134acc | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os, yaml, json, shlex, datetime
import logging
logging.basicConfig(level=logging.INFO)
from jinja2 import Environment, FileSystemLoader
from datetime import datetime
from shutil import copyfile
# Mimic Ansible addional Jinja filter
# Prepare Jinja envs
env = Environment(loader = FileSystemLoader("/awx"), trim_blocks=True, lstrip_blocks=True)
env.filters['quote'] = quote # Register quote filer
# put local config to root Jinja
copyfile("/opt/local/config/values.yml", "/awx/config/values.yml")
logging.info("Generating values")
# Load default config values
default_config_src = yaml.safe_load(open("/awx/config/default.values.yml.j2"))
# Config file itself is a template => render it
template = env.get_template("config/default.values.yml.j2")
data = template.render(default_config_src)
logging.debug("default values render : ")
logging.debug(data)
default_config_src = yaml.safe_load(data)
# Load config values
config_src = yaml.safe_load(open("/awx/config/values.yml"))
# Config file itself may be a template => render it
template = env.get_template("config/values.yml")
data = template.render(config_src)
logging.debug("values render : ")
logging.debug(data)
config_src = yaml.safe_load(data)
# Merge config and defaults
config_src = {**default_config_src, **config_src}
logging.debug(config_src)
# list templates
source_template_dir='/awx/templates'
templates = os.listdir(source_template_dir)
logging.debug(templates)
# Create target dir then iterate
os.makedirs("/opt/local/.awx", exist_ok=True)
for templateName in templates:
logging.info("Generating from : %s", templateName)
template = env.get_template("templates/" + templateName)
data = template.render(config_src)
logging.debug(data)
# Render template
target=os.path.join("/opt/local/.awx",os.path.splitext(templateName)[0])
logging.info("Dumping to : %s ", target)
f = open(target, "w")
f.write(data)
f.close()
logging.info("Create secret key file")
# Dump static secret key
f = open(os.path.join("/opt/local/.awx", "SECRET_KEY"), "w")
f.write(config_src['secret_key'])
f.close()
| 28.909091 | 90 | 0.743935 | #!/usr/bin/env python
import os, yaml, json, shlex, datetime
import logging
logging.basicConfig(level=logging.INFO)
from jinja2 import Environment, FileSystemLoader
from datetime import datetime
from shutil import copyfile
# Mimic Ansible addional Jinja filter
def quote(input):
logging.debug("Quoting : %s", input)
return shlex.quote(str(input))
# Prepare Jinja envs
env = Environment(loader = FileSystemLoader("/awx"), trim_blocks=True, lstrip_blocks=True)
env.filters['quote'] = quote # Register quote filer
# put local config to root Jinja
copyfile("/opt/local/config/values.yml", "/awx/config/values.yml")
logging.info("Generating values")
# Load default config values
default_config_src = yaml.safe_load(open("/awx/config/default.values.yml.j2"))
# Config file itself is a template => render it
template = env.get_template("config/default.values.yml.j2")
data = template.render(default_config_src)
logging.debug("default values render : ")
logging.debug(data)
default_config_src = yaml.safe_load(data)
# Load config values
config_src = yaml.safe_load(open("/awx/config/values.yml"))
# Config file itself may be a template => render it
template = env.get_template("config/values.yml")
data = template.render(config_src)
logging.debug("values render : ")
logging.debug(data)
config_src = yaml.safe_load(data)
# Merge config and defaults
config_src = {**default_config_src, **config_src}
logging.debug(config_src)
# list templates
source_template_dir='/awx/templates'
templates = os.listdir(source_template_dir)
logging.debug(templates)
# Create target dir then iterate
os.makedirs("/opt/local/.awx", exist_ok=True)
for templateName in templates:
logging.info("Generating from : %s", templateName)
template = env.get_template("templates/" + templateName)
data = template.render(config_src)
logging.debug(data)
# Render template
target=os.path.join("/opt/local/.awx",os.path.splitext(templateName)[0])
logging.info("Dumping to : %s ", target)
f = open(target, "w")
f.write(data)
f.close()
logging.info("Create secret key file")
# Dump static secret key
f = open(os.path.join("/opt/local/.awx", "SECRET_KEY"), "w")
f.write(config_src['secret_key'])
f.close()
| 72 | 0 | 22 |
4120a6254f740d446bdedbcf16a3b5f9b9dc4b49 | 6,777 | py | Python | scrape_weather.py | rbbastos/Weather_Processing_App | c77b61e95e53851634afa3a0b1500a1227dcb443 | [
"MIT"
] | null | null | null | scrape_weather.py | rbbastos/Weather_Processing_App | c77b61e95e53851634afa3a0b1500a1227dcb443 | [
"MIT"
] | null | null | null | scrape_weather.py | rbbastos/Weather_Processing_App | c77b61e95e53851634afa3a0b1500a1227dcb443 | [
"MIT"
] | null | null | null | """Module: Creates a WeatherScraper class to scrape data from website."""
import urllib.request
import datetime
import time
from time import strptime
from html.parser import HTMLParser
from html.entities import name2codepoint
class WeatherScraper(HTMLParser):
"""This class contains HTMLParser functions."""
def __init__(self):
"""Create an instance of WeatherScraper."""
HTMLParser.__init__(self)
self.inTr = self.inTh = self.inAabr = self.inA = self.inTd = \
self.isDate = self.inCaption = self.inTbody = False
self.inMean = self.inMin = self.inMax = self.inAvg = \
self.inMyDate = False
self.EqualData = True
self.i = self.j = 0
self.keys = ["max", "min", "mean"]
self.dictInner = {}
self.dictOuter = {}
self.myDate = ''
self.myCaptionYear = ''
self.myCaption = []
self.url_year = self.url_month = None
def handle_starttag(self, tag, attrs):
"""Handle the starttag in a website."""
if tag == 'caption':
"""Only one caption in html"""
try:
self.inCaption = True
except Exception as e:
print("Error:", e)
if tag == 'tbody':
"""Only one <tbody> in html"""
try:
self.inTbody = True
except Exception as e:
print("Error:", e)
if tag == 'tr':
"""There are an average of 35 tr"""
try:
self.inTr = True
except Exception as e:
print("Error:", e)
if tag == 'td':
"""First td is under column MAX and DAY 1"""
try:
"""i = 1 -> column MAX TEMP;
i = 2 -> column MIN TEMP;
i = 3 -> column MEAN TEMP"""
self.i += 1
self.inTd = True
except Exception as e:
print("Error:", e)
if tag == 'th':
"""First th = DAY(0,0) in table.
Date is inside a td>th>abbr(attr[1]). Total: 43 """
try:
self.inTh = True
except Exception as e:
print("Error:", e)
if tag == 'abbr':
"""Date is located in here, attr[1]. Total: 62"""
try:
self.inAabr = True
except Exception as e:
print("Error:", e)
if tag == 'a':
"""Set tag <a> to true"""
try:
self.inA = True
except Exception as e:
print("Error:", e)
for attr in attrs:
"""attrs are inside a tag. E.g id='test'."""
self.inMean = True
# print(self.inTbody)
if self.inTbody and self.inTr and self.inTh and self.inAabr and \
attr[0] == 'title' and not attr[0] == 'href' \
and not attr[1] == 'Average' and not attr[1] == 'Extreme':
# print(attr)
self.inMyDate = True
try:
# print(f" attr[1] {attr[1]}")
self.myDate = datetime.datetime \
.strptime(attr[1], '%B %d, %Y') \
.date() \
.strftime('%Y/%m/%d')
print(f" self.myDate {self.myDate}")
self.isDate = True
except Exception as e:
print(f"Error: {attr[1]}", e)
def handle_endtag(self, tag):
"""Handle the endtag in a website."""
if tag == 'caption':
try:
self.inCaption = False
except Exception as e:
print("Error:", e)
if tag == 'tbody':
"""Only one <tbody> in html"""
try:
self.inTbody = False
except Exception as e:
print("Error:", e)
if tag == 'tr':
try:
self.i = 0
self.inTr = False
except Exception as e:
print("Error:", e)
if tag == 'td':
try:
self.inTd = False
except Exception as e:
print("Error:", e)
if tag == 'th':
try:
self.inTh = False
except Exception as e:
print("Error:", e)
if tag == 'abbr':
try:
self.inMean = False
self.inAabr = False
except Exception as e:
print("Error:", e)
if tag == 'a':
"""Set all tag <a>"""
try:
self.inA = False
except Exception as e:
print("Error:", e)
def handle_data(self, data):
"""Handle the data inside a tag in a website and return dictionary."""
if data == 'Sum':
self.inMyDate = False
if self.inMyDate and self.inTd and self.i == 1:
try:
# print("Max Inner:", data)
self.dictInner[self.keys[0]] = data
except Exception as e:
self.dictInner[self.keys[0]] = 0
print("Error:", e)
if self.inMyDate and self.inTd and self.i == 2:
try:
# print("Min Inner:", data)
self.dictInner[self.keys[1]] = data
except Exception as e:
self.dictInner[self.keys[1]] = 0
print("Error:", e)
if self.inMyDate and self.inTd and self.i == 3:
try:
# print("Mean Inner:", data)
self.dictInner[self.keys[2]] = data
except Exception as e:
self.dictInner[self.keys[2]] = 0
print("Error:", e)
if self.inMyDate:
self.dictOuter[self.myDate] = dict(self.dictInner)
if self.inCaption:
self.myCaption = data.split()
self.myCaptionYear = self.myCaption[5]
# print(f"inCaption - YEAR {self.myCaptionYear}")
self.myCaptionMonth = self.myCaption[4]
# print(f"inCaption - MONTH {self.myCaptionMonth}")
m = {'jan': '01', 'feb': '02', 'mar': '03', 'apr': '04',
'may': '05', 'jun': '06', 'jul': '07', 'aug': '08',
'sep': '09', 'oct': '10', 'nov': '11', 'dec': '12'}
s = self.myCaption[4].strip()[:3].lower()
if str(self.url_month).zfill(2) == m[s] and str(self.url_year) \
in self.myCaptionYear:
# print("EQUALLLLLL")
self.EqualData = True
else:
# print("NOT EQUAL")
self.EqualData = False
| 33.549505 | 78 | 0.449461 | """Module: Creates a WeatherScraper class to scrape data from website."""
import urllib.request
import datetime
import time
from time import strptime
from html.parser import HTMLParser
from html.entities import name2codepoint
class WeatherScraper(HTMLParser):
"""This class contains HTMLParser functions."""
def __init__(self):
"""Create an instance of WeatherScraper."""
HTMLParser.__init__(self)
self.inTr = self.inTh = self.inAabr = self.inA = self.inTd = \
self.isDate = self.inCaption = self.inTbody = False
self.inMean = self.inMin = self.inMax = self.inAvg = \
self.inMyDate = False
self.EqualData = True
self.i = self.j = 0
self.keys = ["max", "min", "mean"]
self.dictInner = {}
self.dictOuter = {}
self.myDate = ''
self.myCaptionYear = ''
self.myCaption = []
self.url_year = self.url_month = None
def handle_starttag(self, tag, attrs):
"""Handle the starttag in a website."""
if tag == 'caption':
"""Only one caption in html"""
try:
self.inCaption = True
except Exception as e:
print("Error:", e)
if tag == 'tbody':
"""Only one <tbody> in html"""
try:
self.inTbody = True
except Exception as e:
print("Error:", e)
if tag == 'tr':
"""There are an average of 35 tr"""
try:
self.inTr = True
except Exception as e:
print("Error:", e)
if tag == 'td':
"""First td is under column MAX and DAY 1"""
try:
"""i = 1 -> column MAX TEMP;
i = 2 -> column MIN TEMP;
i = 3 -> column MEAN TEMP"""
self.i += 1
self.inTd = True
except Exception as e:
print("Error:", e)
if tag == 'th':
"""First th = DAY(0,0) in table.
Date is inside a td>th>abbr(attr[1]). Total: 43 """
try:
self.inTh = True
except Exception as e:
print("Error:", e)
if tag == 'abbr':
"""Date is located in here, attr[1]. Total: 62"""
try:
self.inAabr = True
except Exception as e:
print("Error:", e)
if tag == 'a':
"""Set tag <a> to true"""
try:
self.inA = True
except Exception as e:
print("Error:", e)
for attr in attrs:
"""attrs are inside a tag. E.g id='test'."""
self.inMean = True
# print(self.inTbody)
if self.inTbody and self.inTr and self.inTh and self.inAabr and \
attr[0] == 'title' and not attr[0] == 'href' \
and not attr[1] == 'Average' and not attr[1] == 'Extreme':
# print(attr)
self.inMyDate = True
try:
# print(f" attr[1] {attr[1]}")
self.myDate = datetime.datetime \
.strptime(attr[1], '%B %d, %Y') \
.date() \
.strftime('%Y/%m/%d')
print(f" self.myDate {self.myDate}")
self.isDate = True
except Exception as e:
print(f"Error: {attr[1]}", e)
def handle_endtag(self, tag):
"""Handle the endtag in a website."""
if tag == 'caption':
try:
self.inCaption = False
except Exception as e:
print("Error:", e)
if tag == 'tbody':
"""Only one <tbody> in html"""
try:
self.inTbody = False
except Exception as e:
print("Error:", e)
if tag == 'tr':
try:
self.i = 0
self.inTr = False
except Exception as e:
print("Error:", e)
if tag == 'td':
try:
self.inTd = False
except Exception as e:
print("Error:", e)
if tag == 'th':
try:
self.inTh = False
except Exception as e:
print("Error:", e)
if tag == 'abbr':
try:
self.inMean = False
self.inAabr = False
except Exception as e:
print("Error:", e)
if tag == 'a':
"""Set all tag <a>"""
try:
self.inA = False
except Exception as e:
print("Error:", e)
def handle_data(self, data):
"""Handle the data inside a tag in a website and return dictionary."""
if data == 'Sum':
self.inMyDate = False
if self.inMyDate and self.inTd and self.i == 1:
try:
# print("Max Inner:", data)
self.dictInner[self.keys[0]] = data
except Exception as e:
self.dictInner[self.keys[0]] = 0
print("Error:", e)
if self.inMyDate and self.inTd and self.i == 2:
try:
# print("Min Inner:", data)
self.dictInner[self.keys[1]] = data
except Exception as e:
self.dictInner[self.keys[1]] = 0
print("Error:", e)
if self.inMyDate and self.inTd and self.i == 3:
try:
# print("Mean Inner:", data)
self.dictInner[self.keys[2]] = data
except Exception as e:
self.dictInner[self.keys[2]] = 0
print("Error:", e)
if self.inMyDate:
self.dictOuter[self.myDate] = dict(self.dictInner)
if self.inCaption:
self.myCaption = data.split()
self.myCaptionYear = self.myCaption[5]
# print(f"inCaption - YEAR {self.myCaptionYear}")
self.myCaptionMonth = self.myCaption[4]
# print(f"inCaption - MONTH {self.myCaptionMonth}")
m = {'jan': '01', 'feb': '02', 'mar': '03', 'apr': '04',
'may': '05', 'jun': '06', 'jul': '07', 'aug': '08',
'sep': '09', 'oct': '10', 'nov': '11', 'dec': '12'}
s = self.myCaption[4].strip()[:3].lower()
if str(self.url_month).zfill(2) == m[s] and str(self.url_year) \
in self.myCaptionYear:
# print("EQUALLLLLL")
self.EqualData = True
else:
# print("NOT EQUAL")
self.EqualData = False
| 0 | 0 | 0 |
6e85a1b14c54d8fac368ba969c95b6dc1c2eb330 | 1,020 | py | Python | drone_control/record_video.py | maxhchen/eecs149-final-project | 4b3fd66c59d2f60f85006c8781fbb5eb0e50723a | [
"MIT"
] | 1 | 2022-01-15T20:26:47.000Z | 2022-01-15T20:26:47.000Z | drone_control/record_video.py | maxhchen/eecs149-final-project | 4b3fd66c59d2f60f85006c8781fbb5eb0e50723a | [
"MIT"
] | null | null | null | drone_control/record_video.py | maxhchen/eecs149-final-project | 4b3fd66c59d2f60f85006c8781fbb5eb0e50723a | [
"MIT"
] | null | null | null | import time, cv2
from threading import Thread
from djitellopy import Tello
FPS = 30
tello = Tello()
tello.connect()
print("Battery level:", tello.get_battery())
keepRecording = True
tello.streamon()
frame_read = tello.get_frame_read()
# we need to run the recorder in a seperate thread, otherwise blocking options
# would prevent frames from getting added to the video
recorder = Thread(target=video_recorder)
recorder.start()
# tello.takeoff()
# tello.move_up(100)
# tello.rotate_counter_clockwise(360)
# tello.land()
start = time.time()
while time.time() - start < 15:
cv2.imshow("Drone", frame_read.frame)
cv2.waitKey(1)
keepRecording = False
recorder.join()
tello.end()
| 23.181818 | 95 | 0.709804 | import time, cv2
from threading import Thread
from djitellopy import Tello
FPS = 30
tello = Tello()
tello.connect()
print("Battery level:", tello.get_battery())
keepRecording = True
tello.streamon()
frame_read = tello.get_frame_read()
def video_recorder():
height, width, _ = frame_read.frame.shape
video = cv2.VideoWriter('video.mp4', cv2.VideoWriter_fourcc(*'mp4v'), FPS, (width, height))
while keepRecording:
start = time.time()
video.write(frame_read.frame)
time.sleep(1 / FPS - (time.time() - start))
video.release()
# we need to run the recorder in a seperate thread, otherwise blocking options
# would prevent frames from getting added to the video
recorder = Thread(target=video_recorder)
recorder.start()
# tello.takeoff()
# tello.move_up(100)
# tello.rotate_counter_clockwise(360)
# tello.land()
start = time.time()
while time.time() - start < 15:
cv2.imshow("Drone", frame_read.frame)
cv2.waitKey(1)
keepRecording = False
recorder.join()
tello.end()
| 307 | 0 | 23 |
a47d322a9a5e9bda464a37de739f116470546692 | 1,901 | py | Python | bireme/biblioref/migrations/0004_auto_20150902_1229.py | rfdeoliveira/fi-admin | c2df084c7e79d587e2273dc222f106fa243b7f6e | [
"MIT",
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | bireme/biblioref/migrations/0004_auto_20150902_1229.py | rfdeoliveira/fi-admin | c2df084c7e79d587e2273dc222f106fa243b7f6e | [
"MIT",
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | bireme/biblioref/migrations/0004_auto_20150902_1229.py | rfdeoliveira/fi-admin | c2df084c7e79d587e2273dc222f106fa243b7f6e | [
"MIT",
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import utils.fields
| 43.204545 | 137 | 0.622304 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import utils.fields
class Migration(migrations.Migration):
dependencies = [
('biblioref', '0003_auto_20150901_1025'),
]
operations = [
migrations.CreateModel(
name='ReferenceLocal',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('call_number', utils.fields.JSONField(null=True, verbose_name='Call number', blank=True)),
('database', models.TextField(verbose_name='Database', blank=True)),
('inventory_number', models.TextField(verbose_name='Inventory number', blank=True)),
('internal_note', models.TextField(verbose_name='Internal note', blank=True)),
('local_descriptors', models.TextField(verbose_name='Local descriptors', blank=True)),
('cooperative_center_code', models.CharField(max_length=55, verbose_name='Cooperative center', blank=True)),
('source', models.ForeignKey(verbose_name='Source', to='biblioref.Reference')),
],
options={
'verbose_name': 'Bibliographic Reference Local',
'verbose_name_plural': 'Bibliographic References Local',
},
bases=(models.Model,),
),
migrations.AlterModelOptions(
name='referencecomplement',
options={'verbose_name': 'Bibliographic Reference Complement', 'verbose_name_plural': 'Bibliographic References Complement'},
),
migrations.AlterField(
model_name='referenceanalytic',
name='title',
field=utils.fields.JSONField(help_text='Field mandatory', null=True, verbose_name='Title'),
preserve_default=True,
),
]
| 0 | 1,751 | 23 |
05b01b55fd1d0c2d100e825d487acf27fb75a047 | 748 | py | Python | tests/variable/test_use_local_in_initializer.py | sco1/pylox | b4820828306c20cee3f8533c2547fafb92c6c1bd | [
"MIT"
] | 2 | 2021-12-18T01:52:50.000Z | 2022-01-17T19:41:52.000Z | tests/variable/test_use_local_in_initializer.py | sco1/pylox | b4820828306c20cee3f8533c2547fafb92c6c1bd | [
"MIT"
] | 18 | 2021-11-30T04:05:53.000Z | 2022-02-01T03:30:04.000Z | tests/variable/test_use_local_in_initializer.py | sco1/pylox | b4820828306c20cee3f8533c2547fafb92c6c1bd | [
"MIT"
] | null | null | null | from textwrap import dedent
import pytest
from pylox.lox import Lox
# Base cases from https://github.com/munificent/craftinginterpreters/blob/master/test/variable/use_local_in_initializer.lox
TEST_SRC = dedent(
"""\
var a = "outer";
{
var a = a; // Error at 'a': Can't read local variable in its own initializer.
}
"""
)
EXPECTED_STDOUTS = ["3:11: LoxResolverError: Can't read local variable in its own initializer."]
| 25.793103 | 123 | 0.721925 | from textwrap import dedent
import pytest
from pylox.lox import Lox
# Base cases from https://github.com/munificent/craftinginterpreters/blob/master/test/variable/use_local_in_initializer.lox
TEST_SRC = dedent(
"""\
var a = "outer";
{
var a = a; // Error at 'a': Can't read local variable in its own initializer.
}
"""
)
EXPECTED_STDOUTS = ["3:11: LoxResolverError: Can't read local variable in its own initializer."]
def test_use_local_in_initializer(capsys: pytest.CaptureFixture) -> None:
interpreter = Lox()
interpreter.run(TEST_SRC)
assert interpreter.had_error
assert not interpreter.had_runtime_error
all_out = capsys.readouterr().out.splitlines()
assert all_out == EXPECTED_STDOUTS
| 276 | 0 | 23 |
b43a716afff8ac59493a9d2793b6c660350d23ad | 13,453 | py | Python | lfs/lfs_eeg.py | shiningsunnyday/chestXRay | 75712a34cf025df60261f195262cc671a9fa04dc | [
"Apache-2.0"
] | 9 | 2019-05-27T21:33:13.000Z | 2022-02-24T02:30:16.000Z | lfs/lfs_eeg.py | shiningsunnyday/chestXRay | 75712a34cf025df60261f195262cc671a9fa04dc | [
"Apache-2.0"
] | null | null | null | lfs/lfs_eeg.py | shiningsunnyday/chestXRay | 75712a34cf025df60261f195262cc671a9fa04dc | [
"Apache-2.0"
] | 5 | 2019-06-01T00:36:25.000Z | 2021-11-15T17:09:14.000Z | import re
import spacy
spacy_en = spacy.load('en_core_web_sm')
# Setting LF output values
ABSTAIN_VAL = 0
SEIZURE_VAL = 1
NO_SEIZURE_VAL = -1
######################################################################################################
##### HELPFUL REGEXES AND ONTOLOGIES
######################################################################################################
# Defining useful regular expressions.
SIMPLE_NORMAL_RE = re.compile('\snormal\s', re.IGNORECASE)
# Nouns indicating an EEG
EEGSYN = r'(EEG|study|record|electroencephalogram|ambulatory\s+EEG|video.EEG\sstudy)'
# Phrases indicating a normal study
NORMAL_STUDY_PHRASES = re.compile(rf'\snormal\s+{EEGSYN}'
rf'|\snormal\s+awake\s+and\s+asleep\s+{EEGSYN}'
rf'|\snormal\s+awake\s+{EEGSYN}'
rf'|\snormal\s+awake\s+and\s+drowsy\s+{EEGSYN}'
rf'|\snormal\s+asleep\s+{EEGSYN}'
rf'|\s{EEGSYN}\s+(is|was)\s+normal'
rf'|\srange\s+of\s+normal' # generous
rf'|\s(is|was)\s+normal\s+for\s+age'
#rf'|(EEG|study|record)\s+(is|was)\s+normal\s+for\s+age'
#rf'|(EEG|study|record)\s+(is|was)\s+normal\s+for\s+age'
rf'|{EEGSYN}\s+(is|was)\s+within\s+normal\s+'
rf'|{EEGSYN}\s+(is|was)\s+borderline\+snormal'
rf'|{EEGSYN}\s+(is|was)\s+at\s+the\s+borderline\s+of\s+being\s+normal'
rf'|{EEGSYN}\s+capturing\s+wakefulness\s+and\s+sleep\s+(is|was)\s+normal'
rf'|{EEGSYN}\s+capturing\s+wakefulness\s+(is|was)\s+normal',
re.IGNORECASE)
# Regex for abnormal
ABNORMAL_RE = re.compile(r'abnormal', re.IGNORECASE)
# Regex for seizure synonyms
SEIZURE_SYNONYMS = r'seizure|seizures|spasm|spasms|status\sepilepticus|epilepsia\spartialis\scontinua|drop\sattack'
SEIZURE_SYNONYMS_RE = re.compile(SEIZURE_SYNONYMS, re.IGNORECASE|re.UNICODE)
# Regex for negation
NEG_DET = ['no', 'not', 'without']
# Regex for no seizure in study
NEG_SEIZURE = r'no seizures|no epileptiform activity or seizures'.replace(' ','\s')
NEG_SEIZURE_RE = re.compile(NEG_SEIZURE, re.IGNORECASE)
# Alternate section keys for INTERPRATION section of report
candidate_interps = ['INTERPRETATION', 'Interpretation', 'Summary', 'impression', 'IMPRESSION', 'conclusion', 'conclusions']
CANDIDATE_INTERPS_LOWER = list({ss.lower() for ss in candidate_interps})
# Alternate regex for no seizures
NOSEIZURE_PHRASE_RE = re.compile(r'\bno seizures\b|\bno\sepileptiform\sactivity\sor\sseizures\b'
r'|\bno findings to indicate seizures\b'
r'|no findings to indicate'
r'|no new seizures'
r'|with no seizures'
r'|no evidence to support seizures'
r'|nonepileptic'
r'|non-epileptic'
,
re.IGNORECASE|re.UNICODE)
# Defining negexes
NEG_DET= r'(\bno\b|\bnot\b|\bwithout\sfurther\b|\bno\sfurther\b|without|neither)'
BASIC_NEGEX_RE = re.compile(NEG_DET + '.*('+ SEIZURE_SYNONYMS + ')', re.IGNORECASE|re.UNICODE)
REVERSED_NEGEX_RE = re.compile('('+ SEIZURE_SYNONYMS + ').*' + NEG_DET, re.IGNORECASE|re.UNICODE)
######################################################################################################
##### HELPER FUNCTIONS
######################################################################################################
def is_not_abnormal_interp(interp):
"""
Check text of interpretation for abnormal mentions
"""
m = ABNORMAL_RE.search(interp)
if not m:
return True
else:
return False
def abnormal_interp_with_seizure(interp_text):
"""
Tests for abnormal interpretation with seizure synonym
"""
if ABNORMAL_RE.search(interp_text):
if SEIZURE_SYNONYMS_RE.search(interp_text):
return SEIZURE_VAL
else:
return NO_SEIZURE_VAL
else:
return NO_SEIZURE_VAL
def abnormal_interp_test(interp_text):
"""
Tests for abnormal text
"""
return ABNORMAL_RE.search(interp_text)
def eval_interp_with_negex(interp):
"""
Looks at each sentence, if a sentence says there is a seizure,
then that overrides all the negative sentences
"""
if is_not_abnormal_interp(interp):
return NO_SEIZURE_VAL
parsed_interp = spacy_en(interp)
neg_found = 0
seizure_found_and_no_neg = 0
for sent in parsed_interp.sents:
s = str(sent)
m1 = BASIC_NEGEX_RE.search(s)
if m1:
neg_found=1
m2 = REVERSED_NEGEX_RE.search(s)
if m2:
neg_found =2
if not neg_found:
m3 = SEIZURE_SYNONYMS_RE.search(s)
if m3:
seizure_found_and_no_neg = 1
if neg_found and not seizure_found_and_no_neg:
return NO_SEIZURE_VAL
elif seizure_found_and_no_neg:
return SEIZURE_VAL
return NO_SEIZURE_VAL
def get_section_with_name(section_names, doc):
"""
Check exact matches for keys in section_names;
this presumes a certain structure in EEGNote doc object
"""
text = ''
for section in section_names:
try:
text = ' '.join([text, doc.sections[section]['text']])
except:
pass
try:
text = ' '.join([text, doc.sections['narrative'][section]])
except:
pass
try:
text = ' '.join([text, doc.sections['findings'][section]])
except:
pass
return ' '.join(text.split())
######################################################################################################
##### LABELING FUNCTIONS (LFs)
######################################################################################################
def lf_normal_interp_not_seizure(report):
"""
This LF looks for a top level interpretation section -- if none, no seizure
"""
for keyinterp in CANDIDATE_INTERPS_LOWER:
if keyinterp in report.sections.keys():
interpretation = report.sections[keyinterp]
interp_text = interpretation['text']
if SIMPLE_NORMAL_RE.search(interp_text):
if NORMAL_STUDY_PHRASES.search(interp_text):
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
return ABSTAIN_VAL
def lf_abnormal_interp_with_seizure(report):
"""
Searching for abnormal interpretation section with seizure synonym
"""
if 'interpretation' in report.sections.keys():
interpretation = report.sections['interpretation']
interp_text = interpretation['text']
return abnormal_interp_with_seizure(interp_text)
elif 'summary' in report.sections:
return abnormal_interp_with_seizure(report.sections['summary']['text'])
elif 'findings' in report.sections: # fall back to look in the findings
if 'summary' in report.sections['findings']: # fall back to look for a summary instead
return abnormal_interp_with_seizure(report.sections['findings']['summary'])
if 'impression' in report.sections['findings']:
return abnormal_interp_with_seizure(report.sections['findings']['impression'])
return ABSTAIN_VAL
elif 'narrative' in report.sections: # fall back to look in the findings
ky = 'narrative'
if 'summary' in report.sections[ky]: # fall back to look for a summary instead
return abnormal_interp_with_seizure(report.sections[ky]['summary'])
if 'impression' in report.sections[ky]:
return abnormal_interp_with_seizure(report.sections[ky]['impression'])
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
def lf_findall_interp_with_seizure(report):
"""
Check if interpretation sections are abnormal,
then look for words indicating a seizure
"""
if 'interpretation' in report.sections.keys():
interpretation = report.sections['interpretation']
interp_text = interpretation['text']
return abnormal_interp_with_seizure(interp_text)
else:
candtext = get_section_with_name(CANDIDATE_INTERPS_LOWER, report)
if candtext:
return abnormal_interp_with_seizure(candtext)
else:
return ABSTAIN_VAL
def lf_findall_abnl_interp_without_seizure(report):
"""
Check if interpretation sections are abnormal,
then look for words indicating NO seizure
"""
if 'interpretation' in report.sections.keys():
interpretation = report.sections['interpretation']
interp_text = interpretation['text']
if abnormal_interp_test(interp_text):
if NOSEIZURE_PHRASE_RE.search(interp_text):
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
else:
candtext = get_section_with_name(CANDIDATE_INTERPS_LOWER, report)
if candtext:
if abnormal_interp_test(candtext):
if NOSEIZURE_PHRASE_RE.search(candtext):
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
def lf_abnl_interp_negexsp_seizure(report):
"""
Check if top interpretation section is abnormal and if so,
use negex to find indications that there is no seizure
"""
for topkey in CANDIDATE_INTERPS_LOWER:
if topkey in report.sections.keys():
interpretation = report.sections[topkey]
interp_text = interpretation['text']
return eval_interp_with_negex(interp_text)
return ABSTAIN_VAL
def lf_findall_interp_negex_seizure(report):
"""
Check if lower sections have abnormal text and if so,
use negex to find indications of no seizure
"""
candtext = get_section_with_name(CANDIDATE_INTERPS_LOWER, report)
if candtext:
return eval_interp_with_negex(candtext)
else:
return ABSTAIN_VAL
def lf_seizure_section(report):
"""
Checking to see if there is a 'seizure' section in the report
"""
if 'findings' in report.sections.keys():
seizure_keys = [key for key in report.sections['findings'].keys() if 'seizure' in key ]
if not seizure_keys:
return ABSTAIN_VAL
else:
for ky in seizure_keys:
seizure_text = report.sections['findings'][ky]
if 'None' in seizure_text:
return NO_SEIZURE_VAL
elif 'Many' in seizure_text:
return SEIZURE_VAL
elif len(seizure_text.split()) > 30:
return SEIZURE_VAL
else:
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
def lf_impression_section_negative(report):
"""
Getting impression section, checking for specific terms
"""
impression_words = ['impression','interpretation','comments']
impression = get_section_with_name(impression_words, report)
reg_normal = ['no epileptiform', 'absence of epileptiform', 'not epileptiform',
'normal EEG', 'normal aEEG','benign','non-specific','nonepileptic','idiopathic',
'no seizures','EEG is normal','normal study']
if any([re.search(reg, impression, re.IGNORECASE) for reg in reg_normal] ):
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
def lf_impression_section_positive(report):
"""
Getting impression section, checking for specific terms
"""
impression_words = ['impression','interpretation','comments']
impression = get_section_with_name(impression_words, report)
reg_abnormal = ['status epilepticus','spasms','abnormal continuous',
'tonic','subclinical','spike-wave', 'markedly abnormal']
if any([re.search(reg, impression, re.IGNORECASE) for reg in reg_abnormal] ):
return SEIZURE_VAL
else:
return ABSTAIN_VAL
def lf_spikes_in_impression(report):
"""
Checking for indications of spikes in the impression section
"""
impression_words = ['impression','interpretation','comments']
impression = get_section_with_name(impression_words, report)
if re.search('spike',impression,re.IGNORECASE):
return SEIZURE_VAL
else:
return ABSTAIN_VAL
def lf_extreme_words_in_impression(report):
"""
Checking for words indicating extreme events in the impression section
"""
impression_words = ['impression','interpretation','comments']
impression = get_section_with_name(impression_words, report)
reg_abnormal = ['excessive','frequent']
if any([re.search(reg, impression, re.IGNORECASE) for reg in reg_abnormal] ):
return SEIZURE_VAL
else:
return ABSTAIN_VAL
| 37.895775 | 124 | 0.592953 | import re
import spacy
spacy_en = spacy.load('en_core_web_sm')
# Setting LF output values
ABSTAIN_VAL = 0
SEIZURE_VAL = 1
NO_SEIZURE_VAL = -1
######################################################################################################
##### HELPFUL REGEXES AND ONTOLOGIES
######################################################################################################
# Defining useful regular expressions.
SIMPLE_NORMAL_RE = re.compile('\snormal\s', re.IGNORECASE)
# Nouns indicating an EEG
EEGSYN = r'(EEG|study|record|electroencephalogram|ambulatory\s+EEG|video.EEG\sstudy)'
# Phrases indicating a normal study
NORMAL_STUDY_PHRASES = re.compile(rf'\snormal\s+{EEGSYN}'
rf'|\snormal\s+awake\s+and\s+asleep\s+{EEGSYN}'
rf'|\snormal\s+awake\s+{EEGSYN}'
rf'|\snormal\s+awake\s+and\s+drowsy\s+{EEGSYN}'
rf'|\snormal\s+asleep\s+{EEGSYN}'
rf'|\s{EEGSYN}\s+(is|was)\s+normal'
rf'|\srange\s+of\s+normal' # generous
rf'|\s(is|was)\s+normal\s+for\s+age'
#rf'|(EEG|study|record)\s+(is|was)\s+normal\s+for\s+age'
#rf'|(EEG|study|record)\s+(is|was)\s+normal\s+for\s+age'
rf'|{EEGSYN}\s+(is|was)\s+within\s+normal\s+'
rf'|{EEGSYN}\s+(is|was)\s+borderline\+snormal'
rf'|{EEGSYN}\s+(is|was)\s+at\s+the\s+borderline\s+of\s+being\s+normal'
rf'|{EEGSYN}\s+capturing\s+wakefulness\s+and\s+sleep\s+(is|was)\s+normal'
rf'|{EEGSYN}\s+capturing\s+wakefulness\s+(is|was)\s+normal',
re.IGNORECASE)
# Regex for abnormal
ABNORMAL_RE = re.compile(r'abnormal', re.IGNORECASE)
# Regex for seizure synonyms
SEIZURE_SYNONYMS = r'seizure|seizures|spasm|spasms|status\sepilepticus|epilepsia\spartialis\scontinua|drop\sattack'
SEIZURE_SYNONYMS_RE = re.compile(SEIZURE_SYNONYMS, re.IGNORECASE|re.UNICODE)
# Regex for negation
NEG_DET = ['no', 'not', 'without']
# Regex for no seizure in study
NEG_SEIZURE = r'no seizures|no epileptiform activity or seizures'.replace(' ','\s')
NEG_SEIZURE_RE = re.compile(NEG_SEIZURE, re.IGNORECASE)
# Alternate section keys for INTERPRATION section of report
candidate_interps = ['INTERPRETATION', 'Interpretation', 'Summary', 'impression', 'IMPRESSION', 'conclusion', 'conclusions']
CANDIDATE_INTERPS_LOWER = list({ss.lower() for ss in candidate_interps})
# Alternate regex for no seizures
NOSEIZURE_PHRASE_RE = re.compile(r'\bno seizures\b|\bno\sepileptiform\sactivity\sor\sseizures\b'
r'|\bno findings to indicate seizures\b'
r'|no findings to indicate'
r'|no new seizures'
r'|with no seizures'
r'|no evidence to support seizures'
r'|nonepileptic'
r'|non-epileptic'
,
re.IGNORECASE|re.UNICODE)
# Defining negexes
NEG_DET= r'(\bno\b|\bnot\b|\bwithout\sfurther\b|\bno\sfurther\b|without|neither)'
BASIC_NEGEX_RE = re.compile(NEG_DET + '.*('+ SEIZURE_SYNONYMS + ')', re.IGNORECASE|re.UNICODE)
REVERSED_NEGEX_RE = re.compile('('+ SEIZURE_SYNONYMS + ').*' + NEG_DET, re.IGNORECASE|re.UNICODE)
######################################################################################################
##### HELPER FUNCTIONS
######################################################################################################
def is_not_abnormal_interp(interp):
"""
Check text of interpretation for abnormal mentions
"""
m = ABNORMAL_RE.search(interp)
if not m:
return True
else:
return False
def abnormal_interp_with_seizure(interp_text):
"""
Tests for abnormal interpretation with seizure synonym
"""
if ABNORMAL_RE.search(interp_text):
if SEIZURE_SYNONYMS_RE.search(interp_text):
return SEIZURE_VAL
else:
return NO_SEIZURE_VAL
else:
return NO_SEIZURE_VAL
def abnormal_interp_test(interp_text):
"""
Tests for abnormal text
"""
return ABNORMAL_RE.search(interp_text)
def eval_interp_with_negex(interp):
"""
Looks at each sentence, if a sentence says there is a seizure,
then that overrides all the negative sentences
"""
if is_not_abnormal_interp(interp):
return NO_SEIZURE_VAL
parsed_interp = spacy_en(interp)
neg_found = 0
seizure_found_and_no_neg = 0
for sent in parsed_interp.sents:
s = str(sent)
m1 = BASIC_NEGEX_RE.search(s)
if m1:
neg_found=1
m2 = REVERSED_NEGEX_RE.search(s)
if m2:
neg_found =2
if not neg_found:
m3 = SEIZURE_SYNONYMS_RE.search(s)
if m3:
seizure_found_and_no_neg = 1
if neg_found and not seizure_found_and_no_neg:
return NO_SEIZURE_VAL
elif seizure_found_and_no_neg:
return SEIZURE_VAL
return NO_SEIZURE_VAL
def get_section_with_name(section_names, doc):
"""
Check exact matches for keys in section_names;
this presumes a certain structure in EEGNote doc object
"""
text = ''
for section in section_names:
try:
text = ' '.join([text, doc.sections[section]['text']])
except:
pass
try:
text = ' '.join([text, doc.sections['narrative'][section]])
except:
pass
try:
text = ' '.join([text, doc.sections['findings'][section]])
except:
pass
return ' '.join(text.split())
######################################################################################################
##### LABELING FUNCTIONS (LFs)
######################################################################################################
def lf_normal_interp_not_seizure(report):
"""
This LF looks for a top level interpretation section -- if none, no seizure
"""
for keyinterp in CANDIDATE_INTERPS_LOWER:
if keyinterp in report.sections.keys():
interpretation = report.sections[keyinterp]
interp_text = interpretation['text']
if SIMPLE_NORMAL_RE.search(interp_text):
if NORMAL_STUDY_PHRASES.search(interp_text):
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
return ABSTAIN_VAL
def lf_abnormal_interp_with_seizure(report):
"""
Searching for abnormal interpretation section with seizure synonym
"""
if 'interpretation' in report.sections.keys():
interpretation = report.sections['interpretation']
interp_text = interpretation['text']
return abnormal_interp_with_seizure(interp_text)
elif 'summary' in report.sections:
return abnormal_interp_with_seizure(report.sections['summary']['text'])
elif 'findings' in report.sections: # fall back to look in the findings
if 'summary' in report.sections['findings']: # fall back to look for a summary instead
return abnormal_interp_with_seizure(report.sections['findings']['summary'])
if 'impression' in report.sections['findings']:
return abnormal_interp_with_seizure(report.sections['findings']['impression'])
return ABSTAIN_VAL
elif 'narrative' in report.sections: # fall back to look in the findings
ky = 'narrative'
if 'summary' in report.sections[ky]: # fall back to look for a summary instead
return abnormal_interp_with_seizure(report.sections[ky]['summary'])
if 'impression' in report.sections[ky]:
return abnormal_interp_with_seizure(report.sections[ky]['impression'])
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
def lf_findall_interp_with_seizure(report):
"""
Check if interpretation sections are abnormal,
then look for words indicating a seizure
"""
if 'interpretation' in report.sections.keys():
interpretation = report.sections['interpretation']
interp_text = interpretation['text']
return abnormal_interp_with_seizure(interp_text)
else:
candtext = get_section_with_name(CANDIDATE_INTERPS_LOWER, report)
if candtext:
return abnormal_interp_with_seizure(candtext)
else:
return ABSTAIN_VAL
def lf_findall_abnl_interp_without_seizure(report):
"""
Check if interpretation sections are abnormal,
then look for words indicating NO seizure
"""
if 'interpretation' in report.sections.keys():
interpretation = report.sections['interpretation']
interp_text = interpretation['text']
if abnormal_interp_test(interp_text):
if NOSEIZURE_PHRASE_RE.search(interp_text):
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
else:
candtext = get_section_with_name(CANDIDATE_INTERPS_LOWER, report)
if candtext:
if abnormal_interp_test(candtext):
if NOSEIZURE_PHRASE_RE.search(candtext):
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
else:
return ABSTAIN_VAL
def lf_abnl_interp_negexsp_seizure(report):
"""
Check if top interpretation section is abnormal and if so,
use negex to find indications that there is no seizure
"""
for topkey in CANDIDATE_INTERPS_LOWER:
if topkey in report.sections.keys():
interpretation = report.sections[topkey]
interp_text = interpretation['text']
return eval_interp_with_negex(interp_text)
return ABSTAIN_VAL
def lf_findall_interp_negex_seizure(report):
"""
Check if lower sections have abnormal text and if so,
use negex to find indications of no seizure
"""
candtext = get_section_with_name(CANDIDATE_INTERPS_LOWER, report)
if candtext:
return eval_interp_with_negex(candtext)
else:
return ABSTAIN_VAL
def lf_seizure_section(report):
"""
Checking to see if there is a 'seizure' section in the report
"""
if 'findings' in report.sections.keys():
seizure_keys = [key for key in report.sections['findings'].keys() if 'seizure' in key ]
if not seizure_keys:
return ABSTAIN_VAL
else:
for ky in seizure_keys:
seizure_text = report.sections['findings'][ky]
if 'None' in seizure_text:
return NO_SEIZURE_VAL
elif 'Many' in seizure_text:
return SEIZURE_VAL
elif len(seizure_text.split()) > 30:
return SEIZURE_VAL
else:
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
def lf_impression_section_negative(report):
"""
Getting impression section, checking for specific terms
"""
impression_words = ['impression','interpretation','comments']
impression = get_section_with_name(impression_words, report)
reg_normal = ['no epileptiform', 'absence of epileptiform', 'not epileptiform',
'normal EEG', 'normal aEEG','benign','non-specific','nonepileptic','idiopathic',
'no seizures','EEG is normal','normal study']
if any([re.search(reg, impression, re.IGNORECASE) for reg in reg_normal] ):
return NO_SEIZURE_VAL
else:
return ABSTAIN_VAL
def lf_impression_section_positive(report):
"""
Getting impression section, checking for specific terms
"""
impression_words = ['impression','interpretation','comments']
impression = get_section_with_name(impression_words, report)
reg_abnormal = ['status epilepticus','spasms','abnormal continuous',
'tonic','subclinical','spike-wave', 'markedly abnormal']
if any([re.search(reg, impression, re.IGNORECASE) for reg in reg_abnormal] ):
return SEIZURE_VAL
else:
return ABSTAIN_VAL
def lf_spikes_in_impression(report):
"""
Checking for indications of spikes in the impression section
"""
impression_words = ['impression','interpretation','comments']
impression = get_section_with_name(impression_words, report)
if re.search('spike',impression,re.IGNORECASE):
return SEIZURE_VAL
else:
return ABSTAIN_VAL
def lf_extreme_words_in_impression(report):
"""
Checking for words indicating extreme events in the impression section
"""
impression_words = ['impression','interpretation','comments']
impression = get_section_with_name(impression_words, report)
reg_abnormal = ['excessive','frequent']
if any([re.search(reg, impression, re.IGNORECASE) for reg in reg_abnormal] ):
return SEIZURE_VAL
else:
return ABSTAIN_VAL
| 0 | 0 | 0 |
69dbba474be55d7f5d144dd4498854100f8c4b6b | 313 | py | Python | find_missing.py | Kunalpod/codewars | 8dc1af2f3c70e209471045118fd88b3ea1e627e5 | [
"MIT"
] | null | null | null | find_missing.py | Kunalpod/codewars | 8dc1af2f3c70e209471045118fd88b3ea1e627e5 | [
"MIT"
] | null | null | null | find_missing.py | Kunalpod/codewars | 8dc1af2f3c70e209471045118fd88b3ea1e627e5 | [
"MIT"
] | null | null | null | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Find the missing term in an Arithmetic Progression
#Problem level: 5 kyu
| 31.3 | 94 | 0.58147 | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Find the missing term in an Arithmetic Progression
#Problem level: 5 kyu
def find_missing(s):
for i in range(1, len(s)-1):
if s[i]-s[i-1] != s[i+1]-s[i]:
return 2*s[i]-s[i-1] if abs(s[i] - s[i-1]) < abs(s[i+1] - s[i]) else 2*s[i]-s[i+1]
| 166 | 0 | 23 |
466de17ce18f9b0f37cc8bbacbd2c4cb46780504 | 1,698 | py | Python | tests/dataverk/test_datapackage.py | navikt/dataverk | 7dd803236433048686dd7a58358bc1c09565b14b | [
"MIT"
] | 3 | 2019-09-29T20:48:46.000Z | 2021-03-31T10:16:07.000Z | tests/dataverk/test_datapackage.py | navikt/dataverk | 7dd803236433048686dd7a58358bc1c09565b14b | [
"MIT"
] | 148 | 2019-02-08T12:30:58.000Z | 2021-03-11T15:31:55.000Z | tests/dataverk/test_datapackage.py | navikt/dataverk | 7dd803236433048686dd7a58358bc1c09565b14b | [
"MIT"
] | 1 | 2020-11-18T14:10:05.000Z | 2020-11-18T14:10:05.000Z | import os
import unittest
from dataverk.datapackage import Datapackage
from dataverk.exceptions.dataverk_exceptions import EnvironmentVariableNotSet
from dataverk.utils import storage_paths
valid_metadata = {
'title': 'title',
'readme': "readme",
'license': 'MIT',
'accessRights': 'Open',
'auth': 'unknown',
'description': 'unknown',
'source': 'unknown',
'keywords': ['unknown'],
'provenance': 'unknown',
'publisher': 'unknown',
'bucket': 'opendata',
'store': 'local',
'format': ['datapackage'],
'pii': '',
'purpose': 'open data',
'master': 'secret'
}
| 28.3 | 77 | 0.657244 | import os
import unittest
from dataverk.datapackage import Datapackage
from dataverk.exceptions.dataverk_exceptions import EnvironmentVariableNotSet
from dataverk.utils import storage_paths
valid_metadata = {
'title': 'title',
'readme': "readme",
'license': 'MIT',
'accessRights': 'Open',
'auth': 'unknown',
'description': 'unknown',
'source': 'unknown',
'keywords': ['unknown'],
'provenance': 'unknown',
'publisher': 'unknown',
'bucket': 'opendata',
'store': 'local',
'format': ['datapackage'],
'pii': '',
'purpose': 'open data',
'master': 'secret'
}
class TestClassInstanciation(unittest.TestCase):
def tearDown(self):
for env in ["DATAVERK_API_ENDPOINT", "DATAVERK_BUCKET_ENDPOINT"]:
try:
del os.environ[env]
except KeyError:
pass
def test_instanciation_valid(self):
expected_id = "2138c6203baa39c3c573afdec4404416"
dp = Datapackage(valid_metadata)
self.assertIsInstance(dp, Datapackage)
self.assertEqual(expected_id, dp.dp_id)
def test_instanciation_invalid_title_not_set(self):
invalid_metadata = valid_metadata.copy()
del invalid_metadata['title']
with self.assertRaises(AttributeError):
dp = Datapackage(invalid_metadata)
class TestMethodReturnValues(unittest.TestCase):
def setUp(self):
self.dp = Datapackage(valid_metadata)
def test__nais_specific_paths_valid(self):
dp_id = "id123"
path, store_path = storage_paths.create_nav_paths(dp_id)
self.assertEqual(path, f"/api/{dp_id}")
self.assertEqual(store_path, f"/{dp_id}")
| 844 | 54 | 181 |
33f2919884e5b7e7f21877ccbd78c73ebaf908bc | 3,966 | py | Python | shoppingapp/views.py | KONAPAVANKUMAR/shopping-app-django | f0494a47e4b953cc63198c66f50bc59b94ddc860 | [
"MIT"
] | null | null | null | shoppingapp/views.py | KONAPAVANKUMAR/shopping-app-django | f0494a47e4b953cc63198c66f50bc59b94ddc860 | [
"MIT"
] | null | null | null | shoppingapp/views.py | KONAPAVANKUMAR/shopping-app-django | f0494a47e4b953cc63198c66f50bc59b94ddc860 | [
"MIT"
] | null | null | null | from django.shortcuts import render,redirect
from .models import *
from django.contrib import messages
from django.contrib.auth import login,logout,authenticate
# Create your views here.
from datetime import date
| 37.065421 | 114 | 0.703227 | from django.shortcuts import render,redirect
from .models import *
from django.contrib import messages
from django.contrib.auth import login,logout,authenticate
# Create your views here.
def logoutuser(request):
logout(request)
return redirect('homepage')
def loginview(request):
return render(request,"shoppingapp/login.html")
def signupview(request):
return render(request,"shoppingapp/signup.html")
def loginuser(request):
username = request.POST['username']
password = request.POST['password']
user = authenticate(username = username,password = password)
if user is not None:
login(request,user)
return redirect("homepage")
messages.add_message(request, messages.INFO, 'invalid credentials')
return redirect(request.META["HTTP_REFERER"])
def registeruser(request):
username = request.POST["username"]
password = request.POST["password"]
repassword = request.POST["repassword"]
if not User.objects.filter(username = username).exists():
if password==repassword:
User.objects.create_user(username = username,password=password).save()
messages.add_message(request, messages.INFO, 'user succesfully created')
else:
messages.add_message(request, messages.INFO, 'passwords didnt match')
return redirect(request.META["HTTP_REFERER"])
else:
messages.add_message(request, messages.INFO, 'user already exists')
return redirect(request.META["HTTP_REFERER"])
# create user
return redirect("loginpage")
def homepageview(request):
if not request.user.is_authenticated:
return redirect('loginpage')
context = {'user' : request.user,'items' : ItemModel.objects.all(),'categories' : CategoryModel.objects.all()}
return render(request,"shoppingapp/homepage.html",context)
def categoryview(request,categoryid):
category = CategoryModel.objects.get(id = categoryid)
context = {'items' : category.items.all(),'categories' : CategoryModel.objects.all()}
return render(request,"shoppingapp/homepage.html",context)
def searchview(request):
search = request.POST['search']
items = [item for item in ItemModel.objects.all() if item.name.find(search)!=-1]
context = {'items' : items,'categories' : CategoryModel.objects.all()}
return render(request,"shoppingapp/homepage.html",context)
def addtocart(request,itemid):
user = request.user
item = ItemModel.objects.get(id = itemid)
CartModel(user = user,item = item).save()
return redirect(request.META['HTTP_REFERER'])
def cartview(request):
cartitems = CartModel.objects.filter(user=request.user)
return render(request,"shoppingapp/cart.html",{'cartitems' : cartitems})
def placeorder(request):
discount = 0
if request.POST['promo']!='':
try:
discount = int(PromoCodeModel.objects.get(promo = request.POST['promo']).discount)
except:
pass
items = [([item.item.name,item.item.price]) for item in CartModel.objects.filter(user = request.user)]
OrderModel(user = request.user,items = items,discount = discount).save()
messages.add_message(request,messages.INFO,'order succesfully placed')
for cart in CartModel.objects.filter(user = request.user):
cart.delete()
return redirect('homepage')
def deletefromcart(request,cartid):
CartModel.objects.get(id = cartid).delete()
return redirect(request.META['HTTP_REFERER'])
from datetime import date
def ordersview(request):
orders = []
today_date = date.today()
print(today_date)
for order in OrderModel.objects.filter(user = request.user):
orders.append(order)
context = {'orders' : orders,'today_date' : today_date }
return render(request,"shoppingapp/orders.html",context)
def cancelorder(request,orderid):
order = OrderModel.objects.get(id = orderid).delete()
return redirect(request.META['HTTP_REFERER'])
| 3,433 | 0 | 319 |
ed3fbe249d1378ec4305fb66ee15835b31c365fc | 9,439 | py | Python | src/inscriptis/model/table.py | rlskoeser/inscriptis | e23f79a4ad561f53943c3c6dd70a7d4981b0e0fb | [
"Apache-2.0"
] | 90 | 2016-01-29T15:09:21.000Z | 2022-03-08T15:08:57.000Z | src/inscriptis/model/table.py | rlskoeser/inscriptis | e23f79a4ad561f53943c3c6dd70a7d4981b0e0fb | [
"Apache-2.0"
] | 27 | 2016-01-14T10:30:10.000Z | 2022-03-24T08:00:31.000Z | src/inscriptis/model/table.py | rlskoeser/inscriptis | e23f79a4ad561f53943c3c6dd70a7d4981b0e0fb | [
"Apache-2.0"
] | 20 | 2016-01-14T12:50:55.000Z | 2022-03-04T07:26:30.000Z | #!/usr/bin/env python3
# encoding: utf-8
"""Classes used for representing Tables, TableRows and TableCells."""
from typing import List
from itertools import chain, accumulate
from inscriptis.html_properties import HorizontalAlignment, VerticalAlignment
from inscriptis.annotation import Annotation, horizontal_shift
from inscriptis.model.canvas import Canvas
class TableCell(Canvas):
"""A table cell.
Attributes:
line_width: the original line widths per line (required to adjust
annotations after a reformatting)
vertical_padding: vertical padding that has been introduced due to
vertical formatting rules.
"""
__slots__ = ('annotations', 'block_annotations', 'blocks', 'current_block',
'margin', 'annotation_counter', 'align', 'valign', '_width',
'line_width', 'vertical_padding')
def normalize_blocks(self) -> int:
"""Split multi-line blocks into multiple one-line blocks.
Returns:
The height of the normalized cell.
"""
self._flush_inline()
self.blocks = list(chain(*(line.split('\n') for line in self.blocks)))
if not self.blocks:
self.blocks = ['']
return len(self.blocks)
@property
def height(self):
"""Compute the table cell's height.
Returns:
The cell's current height.
"""
return max(1, len(self.blocks))
@property
def width(self):
"""Compute the table cell's width.
Returns:
The cell's current width.
"""
if self._width:
return self._width
return max((len(line) for line in chain(*(block.split('\n')
for block in self.blocks))))
@width.setter
def width(self, width):
"""Set the table's width and applies the cell's horizontal formatting.
Args:
The cell's expected width.
"""
# save the original line widths before reformatting
self.line_width = [len(block) for block in self.blocks]
# record new width and start reformatting
self._width = width
format_spec = '{{:{align}{width}}}'.format(align=self.align.value,
width=width)
self.blocks = [format_spec.format(b) for b in self.blocks]
@height.setter
def height(self, height: int):
"""Set the cell's height to the given value.
Notes:
Depending on the height and the cell's vertical formatting this
might require the introduction of empty lines.
"""
rows = len(self.blocks)
if rows < height:
empty_line = ['']
if self.valign == VerticalAlignment.bottom:
self.vertical_padding = (height - rows)
self.blocks = self.vertical_padding * empty_line + self.blocks
elif self.valign == VerticalAlignment.middle:
self.vertical_padding = (height - rows) // 2
self.blocks = self.vertical_padding * empty_line + \
self.blocks + ((height - rows + 1) // 2 * empty_line)
else:
self.blocks = self.blocks + ((height - rows) * empty_line)
def get_annotations(self, idx: int, row_width: int) -> List[Annotation]:
"""Return a list of all annotations within the TableCell.
Returns:
A list of annotations that have been adjusted to the cell's
position.
"""
self.current_block.idx = idx
if not self.annotations:
return []
# the easy case - the cell has only one line :)
if len(self.blocks) == 1:
annotations = horizontal_shift(self.annotations,
self.line_width[0],
self.width, self.align, idx)
self.line_width[0] = self.width
return annotations
# the more challenging one - multiple cell lines
line_break_pos = list(accumulate(self.line_width))
annotation_lines = [[] for _ in self.blocks]
# assign annotations to the corresponding line
for a in self.annotations:
for no, line_break in enumerate(line_break_pos):
if a.start <= (line_break + no): # consider newline
annotation_lines[no + self.vertical_padding].append(a)
break
# compute the annotation index based on its line and delta :)
result = []
idx += self.vertical_padding # newlines introduced by the padding
for line_annotations, line_len in zip(annotation_lines,
self.line_width):
result.extend(horizontal_shift(line_annotations, line_len,
self.width, self.align, idx))
idx += row_width - line_len
self.line_width = [self.width for _ in self.line_width]
return result
class TableRow:
"""A single row within a table."""
__slots__ = ('columns', 'cell_separator')
def get_text(self) -> str:
"""Return a text representation of the TableRow."""
row_lines = [self.cell_separator.join(line)
for line in zip(*[column.blocks
for column in self.columns])]
return '\n'.join(row_lines)
@property
def width(self):
"""Compute and return the width of the current row."""
if not self.columns:
return 0
return sum((cell.width for cell in self.columns)) + len(
self.cell_separator) * (len(self.columns) - 1)
class Table:
"""An HTML table.
Attributes:
rows: the table's rows.
left_margin_len: length of the left margin before the table.
"""
__slots__ = ('rows', 'left_margin_len')
def add_row(self):
"""Add an empty :class:`TableRow` to the table."""
self.rows.append(TableRow())
def add_cell(self, table_cell: TableCell):
"""Add a new :class:`TableCell` to the table's last row.
.. note::
If no row exists yet, a new row is created.
"""
if not self.rows:
self.add_row()
self.rows[-1].columns.append(table_cell)
def _set_row_height(self):
"""Set the cell height for all :class:`TableCell`s in the table."""
for row in self.rows:
max_row_height = max((cell.normalize_blocks()
for cell in row.columns)) \
if row.columns else 0
for cell in row.columns:
cell.height = max_row_height
def _set_column_width(self):
"""Set the column width for all :class:`TableCell`s in the table."""
# determine maximum number of columns
max_columns = max((len(row.columns) for row in self.rows))
for cur_column_idx in range(max_columns):
# determine the required column width for the current column
max_column_width = max((row.columns[cur_column_idx].width
for row in self.rows
if len(row) > cur_column_idx))
# set column width for all TableCells in the current column
for row in self.rows:
if len(row) > cur_column_idx:
row.columns[cur_column_idx].width = max_column_width
def get_text(self):
"""Return and render the text of the given table."""
if not self.rows:
return '\n'
self._set_row_height()
self._set_column_width()
return '\n'.join((row.get_text() for row in self.rows)) + '\n'
def get_annotations(self, idx: int,
left_margin_len: int) -> List[Annotation]:
r"""Return all annotations in the given table.
Args:
idx: the table's start index.
left_margin_len: len of the left margin (required for adapting
the position of annotations).
Returns:
A list of all :class:`~inscriptis.annotation.Annotation`\s present
in the table.
"""
if not self.rows:
return []
annotations = []
idx += left_margin_len
for row in self.rows:
if not row.columns:
continue
row_width = row.width + left_margin_len
cell_idx = idx
for cell in row.columns:
annotations += cell.get_annotations(cell_idx, row_width)
cell_idx += cell.width + len(row.cell_separator)
idx += (row_width + 1) * cell.height # linebreak
return annotations
| 35.220149 | 79 | 0.569658 | #!/usr/bin/env python3
# encoding: utf-8
"""Classes used for representing Tables, TableRows and TableCells."""
from typing import List
from itertools import chain, accumulate
from inscriptis.html_properties import HorizontalAlignment, VerticalAlignment
from inscriptis.annotation import Annotation, horizontal_shift
from inscriptis.model.canvas import Canvas
class TableCell(Canvas):
"""A table cell.
Attributes:
line_width: the original line widths per line (required to adjust
annotations after a reformatting)
vertical_padding: vertical padding that has been introduced due to
vertical formatting rules.
"""
__slots__ = ('annotations', 'block_annotations', 'blocks', 'current_block',
'margin', 'annotation_counter', 'align', 'valign', '_width',
'line_width', 'vertical_padding')
def __init__(self, align: HorizontalAlignment, valign: VerticalAlignment):
super().__init__()
self.align = align
self.valign = valign
self._width = None
self.line_width = None
self.vertical_padding = 0
def normalize_blocks(self) -> int:
"""Split multi-line blocks into multiple one-line blocks.
Returns:
The height of the normalized cell.
"""
self._flush_inline()
self.blocks = list(chain(*(line.split('\n') for line in self.blocks)))
if not self.blocks:
self.blocks = ['']
return len(self.blocks)
@property
def height(self):
"""Compute the table cell's height.
Returns:
The cell's current height.
"""
return max(1, len(self.blocks))
@property
def width(self):
"""Compute the table cell's width.
Returns:
The cell's current width.
"""
if self._width:
return self._width
return max((len(line) for line in chain(*(block.split('\n')
for block in self.blocks))))
@width.setter
def width(self, width):
"""Set the table's width and applies the cell's horizontal formatting.
Args:
The cell's expected width.
"""
# save the original line widths before reformatting
self.line_width = [len(block) for block in self.blocks]
# record new width and start reformatting
self._width = width
format_spec = '{{:{align}{width}}}'.format(align=self.align.value,
width=width)
self.blocks = [format_spec.format(b) for b in self.blocks]
@height.setter
def height(self, height: int):
"""Set the cell's height to the given value.
Notes:
Depending on the height and the cell's vertical formatting this
might require the introduction of empty lines.
"""
rows = len(self.blocks)
if rows < height:
empty_line = ['']
if self.valign == VerticalAlignment.bottom:
self.vertical_padding = (height - rows)
self.blocks = self.vertical_padding * empty_line + self.blocks
elif self.valign == VerticalAlignment.middle:
self.vertical_padding = (height - rows) // 2
self.blocks = self.vertical_padding * empty_line + \
self.blocks + ((height - rows + 1) // 2 * empty_line)
else:
self.blocks = self.blocks + ((height - rows) * empty_line)
def get_annotations(self, idx: int, row_width: int) -> List[Annotation]:
"""Return a list of all annotations within the TableCell.
Returns:
A list of annotations that have been adjusted to the cell's
position.
"""
self.current_block.idx = idx
if not self.annotations:
return []
# the easy case - the cell has only one line :)
if len(self.blocks) == 1:
annotations = horizontal_shift(self.annotations,
self.line_width[0],
self.width, self.align, idx)
self.line_width[0] = self.width
return annotations
# the more challenging one - multiple cell lines
line_break_pos = list(accumulate(self.line_width))
annotation_lines = [[] for _ in self.blocks]
# assign annotations to the corresponding line
for a in self.annotations:
for no, line_break in enumerate(line_break_pos):
if a.start <= (line_break + no): # consider newline
annotation_lines[no + self.vertical_padding].append(a)
break
# compute the annotation index based on its line and delta :)
result = []
idx += self.vertical_padding # newlines introduced by the padding
for line_annotations, line_len in zip(annotation_lines,
self.line_width):
result.extend(horizontal_shift(line_annotations, line_len,
self.width, self.align, idx))
idx += row_width - line_len
self.line_width = [self.width for _ in self.line_width]
return result
class TableRow:
"""A single row within a table."""
__slots__ = ('columns', 'cell_separator')
def __init__(self, cell_separator: str = ' '):
self.columns: List[TableCell] = []
self.cell_separator = cell_separator
def __len__(self):
return len(self.columns)
def get_text(self) -> str:
"""Return a text representation of the TableRow."""
row_lines = [self.cell_separator.join(line)
for line in zip(*[column.blocks
for column in self.columns])]
return '\n'.join(row_lines)
@property
def width(self):
"""Compute and return the width of the current row."""
if not self.columns:
return 0
return sum((cell.width for cell in self.columns)) + len(
self.cell_separator) * (len(self.columns) - 1)
class Table:
"""An HTML table.
Attributes:
rows: the table's rows.
left_margin_len: length of the left margin before the table.
"""
__slots__ = ('rows', 'left_margin_len')
def __init__(self, left_margin_len: int):
self.rows = []
self.left_margin_len = left_margin_len
def add_row(self):
"""Add an empty :class:`TableRow` to the table."""
self.rows.append(TableRow())
def add_cell(self, table_cell: TableCell):
"""Add a new :class:`TableCell` to the table's last row.
.. note::
If no row exists yet, a new row is created.
"""
if not self.rows:
self.add_row()
self.rows[-1].columns.append(table_cell)
def _set_row_height(self):
"""Set the cell height for all :class:`TableCell`s in the table."""
for row in self.rows:
max_row_height = max((cell.normalize_blocks()
for cell in row.columns)) \
if row.columns else 0
for cell in row.columns:
cell.height = max_row_height
def _set_column_width(self):
"""Set the column width for all :class:`TableCell`s in the table."""
# determine maximum number of columns
max_columns = max((len(row.columns) for row in self.rows))
for cur_column_idx in range(max_columns):
# determine the required column width for the current column
max_column_width = max((row.columns[cur_column_idx].width
for row in self.rows
if len(row) > cur_column_idx))
# set column width for all TableCells in the current column
for row in self.rows:
if len(row) > cur_column_idx:
row.columns[cur_column_idx].width = max_column_width
def get_text(self):
"""Return and render the text of the given table."""
if not self.rows:
return '\n'
self._set_row_height()
self._set_column_width()
return '\n'.join((row.get_text() for row in self.rows)) + '\n'
def get_annotations(self, idx: int,
left_margin_len: int) -> List[Annotation]:
r"""Return all annotations in the given table.
Args:
idx: the table's start index.
left_margin_len: len of the left margin (required for adapting
the position of annotations).
Returns:
A list of all :class:`~inscriptis.annotation.Annotation`\s present
in the table.
"""
if not self.rows:
return []
annotations = []
idx += left_margin_len
for row in self.rows:
if not row.columns:
continue
row_width = row.width + left_margin_len
cell_idx = idx
for cell in row.columns:
annotations += cell.get_annotations(cell_idx, row_width)
cell_idx += cell.width + len(row.cell_separator)
idx += (row_width + 1) * cell.height # linebreak
return annotations
| 462 | 0 | 108 |
52855729e5cdbb244154d3e3f059f2436b795ff9 | 261 | py | Python | flask_api/__init__.py | kingofsandvich/Program-for-Stylizing-Musical-Compositions-Based-on-Machine-Learning | bd730427a82bdc379177c5f7d33274ed8c9afe72 | [
"MIT"
] | null | null | null | flask_api/__init__.py | kingofsandvich/Program-for-Stylizing-Musical-Compositions-Based-on-Machine-Learning | bd730427a82bdc379177c5f7d33274ed8c9afe72 | [
"MIT"
] | null | null | null | flask_api/__init__.py | kingofsandvich/Program-for-Stylizing-Musical-Compositions-Based-on-Machine-Learning | bd730427a82bdc379177c5f7d33274ed8c9afe72 | [
"MIT"
] | null | null | null | from config import Config
from flask import Flask
| 17.4 | 61 | 0.735632 | from config import Config
from flask import Flask
def create_app(config=Config):
app = Flask(__name__)
app.config.from_object(config)
from . models_api import models
app.register_blueprint(models, url_prefix='/style_model')
return app
| 186 | 0 | 23 |
a03f6959328a0b40991c5eae5c7f38e786add882 | 2,238 | py | Python | step_by_step_code_blocks/split_train_validate.py | AlphaVantageSupport/time-series-forecasting-pytorch | 9d68a0d7ded73592e0a21c08d7652ee8188760bb | [
"Apache-2.0"
] | 43 | 2021-04-29T18:36:48.000Z | 2022-03-23T04:20:42.000Z | step_by_step_code_blocks/split_train_validate.py | djshem/time-series-forecasting-pytorch | defc88f5995c4356c337c14212b684abc7ca1cb7 | [
"Apache-2.0"
] | 9 | 2021-05-21T14:13:07.000Z | 2022-03-17T11:04:49.000Z | step_by_step_code_blocks/split_train_validate.py | djshem/time-series-forecasting-pytorch | defc88f5995c4356c337c14212b684abc7ca1cb7 | [
"Apache-2.0"
] | 26 | 2021-04-29T22:34:30.000Z | 2022-03-31T09:31:13.000Z |
data_x, data_x_unseen = prepare_data_x(normalized_data_close_price, window_size=config["data"]["window_size"])
data_y = prepare_data_y(normalized_data_close_price, window_size=config["data"]["window_size"])
# split dataset
split_index = int(data_y.shape[0]*config["data"]["train_split_size"])
data_x_train = data_x[:split_index]
data_x_val = data_x[split_index:]
data_y_train = data_y[:split_index]
data_y_val = data_y[split_index:]
# prepare data for plotting
to_plot_data_y_train = np.zeros(num_data_points)
to_plot_data_y_val = np.zeros(num_data_points)
to_plot_data_y_train[config["data"]["window_size"]:split_index+config["data"]["window_size"]] = scaler.inverse_transform(data_y_train)
to_plot_data_y_val[split_index+config["data"]["window_size"]:] = scaler.inverse_transform(data_y_val)
to_plot_data_y_train = np.where(to_plot_data_y_train == 0, None, to_plot_data_y_train)
to_plot_data_y_val = np.where(to_plot_data_y_val == 0, None, to_plot_data_y_val)
## plots
fig = figure(figsize=(25, 5), dpi=80)
fig.patch.set_facecolor((1.0, 1.0, 1.0))
plt.plot(data_date, to_plot_data_y_train, label="Prices (train)", color=config["plots"]["color_train"])
plt.plot(data_date, to_plot_data_y_val, label="Prices (validation)", color=config["plots"]["color_val"])
xticks = [data_date[i] if ((i%config["plots"]["xticks_interval"]==0 and (num_data_points-i) > config["plots"]["xticks_interval"]) or i==num_data_points-1) else None for i in range(num_data_points)] # make x ticks nice
x = np.arange(0,len(xticks))
plt.xticks(x, xticks, rotation='vertical')
plt.title("Daily close prices for " + config["alpha_vantage"]["symbol"] + " - showing training and validation data")
plt.grid(b=None, which='major', axis='y', linestyle='--')
plt.legend()
plt.show()
| 43.882353 | 217 | 0.743968 | def prepare_data_x(x, window_size):
# perform windowing
n_row = x.shape[0] - window_size + 1
output = np.lib.stride_tricks.as_strided(x, shape=(n_row, window_size), strides=(x.strides[0], x.strides[0]))
return output[:-1], output[-1]
def prepare_data_y(x, window_size):
# # perform simple moving average
# output = np.convolve(x, np.ones(window_size), 'valid') / window_size
# use the next day as label
output = x[window_size:]
return output
data_x, data_x_unseen = prepare_data_x(normalized_data_close_price, window_size=config["data"]["window_size"])
data_y = prepare_data_y(normalized_data_close_price, window_size=config["data"]["window_size"])
# split dataset
split_index = int(data_y.shape[0]*config["data"]["train_split_size"])
data_x_train = data_x[:split_index]
data_x_val = data_x[split_index:]
data_y_train = data_y[:split_index]
data_y_val = data_y[split_index:]
# prepare data for plotting
to_plot_data_y_train = np.zeros(num_data_points)
to_plot_data_y_val = np.zeros(num_data_points)
to_plot_data_y_train[config["data"]["window_size"]:split_index+config["data"]["window_size"]] = scaler.inverse_transform(data_y_train)
to_plot_data_y_val[split_index+config["data"]["window_size"]:] = scaler.inverse_transform(data_y_val)
to_plot_data_y_train = np.where(to_plot_data_y_train == 0, None, to_plot_data_y_train)
to_plot_data_y_val = np.where(to_plot_data_y_val == 0, None, to_plot_data_y_val)
## plots
fig = figure(figsize=(25, 5), dpi=80)
fig.patch.set_facecolor((1.0, 1.0, 1.0))
plt.plot(data_date, to_plot_data_y_train, label="Prices (train)", color=config["plots"]["color_train"])
plt.plot(data_date, to_plot_data_y_val, label="Prices (validation)", color=config["plots"]["color_val"])
xticks = [data_date[i] if ((i%config["plots"]["xticks_interval"]==0 and (num_data_points-i) > config["plots"]["xticks_interval"]) or i==num_data_points-1) else None for i in range(num_data_points)] # make x ticks nice
x = np.arange(0,len(xticks))
plt.xticks(x, xticks, rotation='vertical')
plt.title("Daily close prices for " + config["alpha_vantage"]["symbol"] + " - showing training and validation data")
plt.grid(b=None, which='major', axis='y', linestyle='--')
plt.legend()
plt.show()
| 435 | 0 | 45 |
f5c2ac374420189e47c9161aa9686ce368683db5 | 5,367 | py | Python | magfetch/settings/common.py | NumanIbnMazid/magfetch | fb6297fd3b2277bf48289ff95d1ed4f071b9aded | [
"MIT"
] | 1 | 2020-03-13T07:09:34.000Z | 2020-03-13T07:09:34.000Z | magfetch/settings/common.py | NumanIbnMazid/magfetch | fb6297fd3b2277bf48289ff95d1ed4f071b9aded | [
"MIT"
] | 8 | 2020-02-11T23:52:58.000Z | 2022-03-11T23:42:09.000Z | magfetch/settings/common.py | NumanIbnMazid/magfetch | fb6297fd3b2277bf48289ff95d1ed4f071b9aded | [
"MIT"
] | 1 | 2020-03-13T07:09:35.000Z | 2020-03-13T07:09:35.000Z |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third Party Apps
'widget_tweaks',
'django_cleanup',
'django_cool_paginator',
'django.contrib.sites',
'allauth',
'allauth.account',
# 'corsheaders',
# 'rest_framework',
# Local Apps
'accounts',
'system_data',
'suspicious',
'utils',
'contribution',
]
SITE_ID = 1
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
# 'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# CorsHeader Middleware before CommonMiddleware
# 'corsheaders.middleware.CorsMiddleware',
# CorsHeader Middleware
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'magfetch.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
# 'loaders': [
# ('django.template.loaders.cached.Loader', [
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# ]),
# ],
},
},
]
WSGI_APPLICATION = 'magfetch.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Dhaka'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
# USE_I18N = True
# USE_L10N = True
# USE_TZ = True
# Allauth
LOGIN_URL = '/account/login/'
LOGOUT_URL = '/'
LOGIN_REDIRECT_URL = '/'
SITE_NAME = 'magFetch'
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 7
ACCOUNT_LOGIN_ATTEMPTS_LIMIT = 5
ACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300
ACCOUNT_USERNAME_MIN_LENGTH = 1
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = True
ACCOUNT_SIGNUP_PASSWORD_VERIFICATION = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_LOGOUT_ON_PASSWORD_CHANGE = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = True
ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/'
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_VERIFICATION = 'optional'
ACCOUNT_EMAIL_SUBJECT_PREFIX = 'magFetch'
ACCOUNT_USERNAME_BLACKLIST =['robot', 'hacker', 'virus', 'spam']
ACCOUNT_ADAPTER = 'magfetch.adapter.UsernameMaxAdapter'
# Cool Paginator
COOL_PAGINATOR_NEXT_NAME = "next"
COOL_PAGINATOR_PREVIOUS_NAME = "previous"
COOL_PAGINATOR_SIZE = "SMALL"
COOL_PAGINATOR_ELASTIC = "300px"
# File Validation Staffs
ALLOWED_FILE_TYPES = ['.doc', '.docx', '.jpg', '.jpeg', '.png',
'.svg', '.DOC', '.DOCX', '.JPG', '.JPEG', '.PNG', '.SVG']
FILE_TYPES = ['.doc', '.docx', '.jpg', '.jpeg', '.png', '.svg']
IMAGE_TYPES = ['.jpg', '.jpeg', '.png', '.svg']
DOCUMENT_TYPES = ['.doc', '.docx']
# 1.5MB - 1621440
# 2.5MB - 2621440
# 5MB - 5242880
# 10MB - 10485760
# 20MB - 20971520
# 50MB - 5242880
# 100MB 104857600
# 250MB - 214958080
# 500MB - 429916160
MAX_UPLOAD_SIZE = 2621440
# # Static Files
# STATIC_URL = '/static/'
# MEDIA_URL = '/media/'
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static_proj'),
# ]
# STATIC_ROOT = os.path.join('static_cdn', 'static_root')
# MEDIA_ROOT = os.path.join('static_cdn', 'media_root')
# STATICFILES_FINDERS = [
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# ]
# Neededf for CorsHeader (accept connections from everywhere)
# CORS_ORIGIN_ALLOW_ALL = True
# CORS_ALLOW_HEADERS = (
# 'x-requested-with',
# 'content-type',
# 'accept',
# 'origin',
# 'authorization',
# 'x-csrftoken',
# 'token',
# 'x-device-id',
# 'x-device-type',
# 'x-push-id',
# 'dataserviceversion',
# 'maxdataserviceversion'
# )
# CORS_ALLOW_METHODS = (
# 'GET',
# 'POST',
# 'PUT',
# 'PATCH',
# 'DELETE',
# 'OPTIONS'
# )
| 26.969849 | 91 | 0.662754 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Third Party Apps
'widget_tweaks',
'django_cleanup',
'django_cool_paginator',
'django.contrib.sites',
'allauth',
'allauth.account',
# 'corsheaders',
# 'rest_framework',
# Local Apps
'accounts',
'system_data',
'suspicious',
'utils',
'contribution',
]
SITE_ID = 1
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
# 'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# CorsHeader Middleware before CommonMiddleware
# 'corsheaders.middleware.CorsMiddleware',
# CorsHeader Middleware
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'magfetch.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
# 'loaders': [
# ('django.template.loaders.cached.Loader', [
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# ]),
# ],
},
},
]
WSGI_APPLICATION = 'magfetch.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Dhaka'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
# USE_I18N = True
# USE_L10N = True
# USE_TZ = True
# Allauth
LOGIN_URL = '/account/login/'
LOGOUT_URL = '/'
LOGIN_REDIRECT_URL = '/'
SITE_NAME = 'magFetch'
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 7
ACCOUNT_LOGIN_ATTEMPTS_LIMIT = 5
ACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300
ACCOUNT_USERNAME_MIN_LENGTH = 1
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = True
ACCOUNT_SIGNUP_PASSWORD_VERIFICATION = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_LOGOUT_ON_PASSWORD_CHANGE = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = True
ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/'
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_VERIFICATION = 'optional'
ACCOUNT_EMAIL_SUBJECT_PREFIX = 'magFetch'
ACCOUNT_USERNAME_BLACKLIST =['robot', 'hacker', 'virus', 'spam']
ACCOUNT_ADAPTER = 'magfetch.adapter.UsernameMaxAdapter'
# Cool Paginator
COOL_PAGINATOR_NEXT_NAME = "next"
COOL_PAGINATOR_PREVIOUS_NAME = "previous"
COOL_PAGINATOR_SIZE = "SMALL"
COOL_PAGINATOR_ELASTIC = "300px"
# File Validation Staffs
ALLOWED_FILE_TYPES = ['.doc', '.docx', '.jpg', '.jpeg', '.png',
'.svg', '.DOC', '.DOCX', '.JPG', '.JPEG', '.PNG', '.SVG']
FILE_TYPES = ['.doc', '.docx', '.jpg', '.jpeg', '.png', '.svg']
IMAGE_TYPES = ['.jpg', '.jpeg', '.png', '.svg']
DOCUMENT_TYPES = ['.doc', '.docx']
# 1.5MB - 1621440
# 2.5MB - 2621440
# 5MB - 5242880
# 10MB - 10485760
# 20MB - 20971520
# 50MB - 5242880
# 100MB 104857600
# 250MB - 214958080
# 500MB - 429916160
MAX_UPLOAD_SIZE = 2621440
# # Static Files
# STATIC_URL = '/static/'
# MEDIA_URL = '/media/'
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static_proj'),
# ]
# STATIC_ROOT = os.path.join('static_cdn', 'static_root')
# MEDIA_ROOT = os.path.join('static_cdn', 'media_root')
# STATICFILES_FINDERS = [
# 'django.contrib.staticfiles.finders.FileSystemFinder',
# 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# ]
# Neededf for CorsHeader (accept connections from everywhere)
# CORS_ORIGIN_ALLOW_ALL = True
# CORS_ALLOW_HEADERS = (
# 'x-requested-with',
# 'content-type',
# 'accept',
# 'origin',
# 'authorization',
# 'x-csrftoken',
# 'token',
# 'x-device-id',
# 'x-device-type',
# 'x-push-id',
# 'dataserviceversion',
# 'maxdataserviceversion'
# )
# CORS_ALLOW_METHODS = (
# 'GET',
# 'POST',
# 'PUT',
# 'PATCH',
# 'DELETE',
# 'OPTIONS'
# )
| 0 | 0 | 0 |
1f500aed9bfce93cd45309a81d07439da902bcae | 4,513 | py | Python | main.py | NortySpock/TESS | 639c7c157bf17b8eb0527ec135dd1fefa4e2038c | [
"MIT"
] | null | null | null | main.py | NortySpock/TESS | 639c7c157bf17b8eb0527ec135dd1fefa4e2038c | [
"MIT"
] | null | null | null | main.py | NortySpock/TESS | 639c7c157bf17b8eb0527ec135dd1fefa4e2038c | [
"MIT"
] | null | null | null | # TESS - ToastmastErs Simple Scheduler
# This simple script will create a shuffled list of suggested meeting roles for multiple meetings.
from random import shuffle
from os import linesep
import csv
import sys
if __name__ == "__main__":
main(sys.argv[1:]) | 33.932331 | 130 | 0.669621 | # TESS - ToastmastErs Simple Scheduler
# This simple script will create a shuffled list of suggested meeting roles for multiple meetings.
from random import shuffle
from os import linesep
import csv
import sys
def main(argv):
if(len(sys.argv) == 2):
try:
number_of_meetings = int(sys.argv[1])
except TypeError:
print "Please provide a number for 'number of meetings'. "
else:
number_of_meetings = 14
# full list of people and roles,
list_of_people = []
list_of_roles = []
# working list of people
available_people = []
meeting_people = []
# a list of completed meetings
multi_meeting_list = []
#cheap text holding list
write_lines = []
#keeps track of width of people so we can pad apppropriately
max_role_name_length = 0
max_person_name_length = 0
#files we're reading and writing
tm_file_name = "toastmasters.txt"
role_file_name = "roles.txt"
suggested_roles_text_file_name = "suggested_roles.txt"
suggested_roles_csv_file_name = "suggested_roles.csv"
#read TM file
tm_file = open(tm_file_name, 'r')
for line in tm_file:
tmp_line = line.strip()
if(tmp_line != ''):
list_of_people.append(tmp_line)
if len(tmp_line) > max_person_name_length:
max_person_name_length = len(tmp_line)
tm_file.close()
#read roles file
role_file = open(role_file_name,'r')
for line in role_file:
tmp_line = line.strip()
if(tmp_line != ''):
list_of_roles.append(line)
if len(tmp_line) > max_role_name_length:
max_role_name_length = len(tmp_line)
role_file.close()
# if we have enough people per meeting we can prevent overbooking
prevent_overbooking = False
if(len(list_of_roles) < len(list_of_people)):
prevent_overbooking = True
#Main segment where each meeting is populated
for meeting in range(1,number_of_meetings+1):
meeting_people = []
# Each meeting has a number of roles. (Toastmaster, Speaker 1, Evaluator 1, etc)
# We will shuffle the person list, select an available person and, if they have not been added to the meeting yet, add them.
for role in list_of_roles:
#if we're out of people for roles, refill the buffer
if(len(available_people)==0):
shuffle(list_of_people)
available_people.extend(list_of_people)
#select a person
proposed_member = available_people.pop()
#If we've already assigned a member to a previous role and we can prevent double-booking
while(proposed_member in meeting_people and prevent_overbooking):
# If we run into a case where a person was booked for two roles during a meeting
# dump the list of people on the back of the list (to prevent infinite loops of people)
# and bump them to the back of the newly padded list so they get recycled.
# It's not really pretty, but it gets the job done.
shuffle(list_of_people)
available_people.extend(list_of_people)
available_people.insert(0,proposed_member) #move member to back of list
proposed_member = available_people.pop()
# assuming we have a non-duplicate member (if we prevent overbooking), so append them to the meeting list
meeting_people.append(proposed_member)
# Now that we have a list of people, match people with roles
suggested_meeting_roles = zip(list_of_roles,meeting_people)
multi_meeting_list.append(suggested_meeting_roles)
write_lines = []
for i, mtg in enumerate(multi_meeting_list):
# do all the ugly text formatting and add it to the outgoing text
write_lines.append("Meeting #"+str(i+1)+linesep)
for role in mtg:
write_lines.append(role[0].strip()+(" "*(max_role_name_length+2-len(role[0])))+role[1].strip()+linesep)
write_lines.append(linesep)
with open(suggested_roles_text_file_name, 'w') as f:
for line in write_lines:
f.write(line)
csv_rows = []
for i, mtg in enumerate(multi_meeting_list):
csv_rows.append(("Meeting #"+str(i+1),))
for role in mtg:
csv_rows.append((role[0].strip(),role[1].strip()))
csv_rows.append("")
with open(suggested_roles_csv_file_name, 'wb') as f:
writer = csv.writer(f,dialect='excel')
writer.writerows(csv_rows)
if __name__ == "__main__":
main(sys.argv[1:]) | 4,229 | 0 | 23 |
e26da87d1a7e5fed466d77acba027ac3c6b8e4a9 | 2,671 | py | Python | src/core/src/tortuga/os_utility/tortugaSubprocess.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 33 | 2018-03-02T17:07:39.000Z | 2021-05-21T18:02:51.000Z | src/core/src/tortuga/os_utility/tortugaSubprocess.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 201 | 2018-03-05T14:28:24.000Z | 2020-11-23T19:58:27.000Z | src/core/src/tortuga/os_utility/tortugaSubprocess.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 23 | 2018-03-02T17:21:59.000Z | 2020-11-18T14:52:38.000Z | # Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from tortuga.exceptions.commandFailed import CommandFailed
# Convenience function for executing command.
def executeCommand(command):
""" Create subprocess and run it, return subprocess object. """
p = TortugaSubprocess(command)
p.run()
return p
# Convenience function for executing command that may fail, and we do not
# care about the failure.
def executeCommandAndIgnoreFailure(command):
"""
Create subprocess, run it, ignore any failures, and return
subprocess object.
"""
p = TortugaSubprocess(command)
try:
p.run()
except CommandFailed:
pass
return p
| 28.72043 | 74 | 0.67091 | # Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from tortuga.exceptions.commandFailed import CommandFailed
class TortugaSubprocess(subprocess.Popen):
def __init__(self, args, bufsize=0, executable=None, stdin=None,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
preexec_fn=None, close_fds=False, shell=True, cwd=None,
env=None, universal_newlines=False, startupinfo=None,
creationflags=0, useExceptions=True): \
# pylint: disable=too-many-locals
"""
Overrides Popen constructor with defaults more appropriate for
Tortuga usage.
"""
subprocess.Popen.__init__(
self, args, bufsize, executable, stdin, stdout, stderr,
preexec_fn, close_fds, shell, cwd, env, universal_newlines,
startupinfo, creationflags)
self._stdout = None
self._stderr = None
self._args = args
self._useExceptions = useExceptions
def run(self, input_=None):
""" Run subprocess. """
self._stdout, self._stderr = subprocess.Popen.communicate(
self, input_)
if self.returncode != 0 and self._useExceptions:
raise CommandFailed(str(self._stderr.decode().rstrip()))
return self._stdout, self._stderr
def getArgs(self):
return self._args
def getStdOut(self):
return self._stdout
def getStdErr(self):
return self._stderr
def getExitStatus(self):
return self.returncode
# Convenience function for executing command.
def executeCommand(command):
""" Create subprocess and run it, return subprocess object. """
p = TortugaSubprocess(command)
p.run()
return p
# Convenience function for executing command that may fail, and we do not
# care about the failure.
def executeCommandAndIgnoreFailure(command):
"""
Create subprocess, run it, ignore any failures, and return
subprocess object.
"""
p = TortugaSubprocess(command)
try:
p.run()
except CommandFailed:
pass
return p
| 111 | 1,300 | 23 |
7929cb67f574a0d18be4ad7d88be3c60517bdce5 | 1,289 | py | Python | cloudsimd/launchers/launch_utils/traffic_shaping.py | osrf/cloudsim-legacy | 01ea7dd2708ed9797a860ac839028ec62fd96a23 | [
"Apache-2.0"
] | null | null | null | cloudsimd/launchers/launch_utils/traffic_shaping.py | osrf/cloudsim-legacy | 01ea7dd2708ed9797a860ac839028ec62fd96a23 | [
"Apache-2.0"
] | null | null | null | cloudsimd/launchers/launch_utils/traffic_shaping.py | osrf/cloudsim-legacy | 01ea7dd2708ed9797a860ac839028ec62fd96a23 | [
"Apache-2.0"
] | 1 | 2021-03-16T15:00:51.000Z | 2021-03-16T15:00:51.000Z | import sshclient
from launch_db import get_constellation_data
from launch_db import log_msg
| 34.837838 | 73 | 0.671839 | import sshclient
from launch_db import get_constellation_data
from launch_db import log_msg
def log(msg, channel=__name__, severity="info"):
log_msg(msg, channel, severity)
def run_tc_command(constellation_name, machine_name_key,
keyPairName,
ip_address_key,
target_latency,
uplink_cap, downlink_cap):
constellation = get_constellation_data(constellation_name)
keyDirectory = constellation['constellation_directory']
ip = constellation[ip_address_key]
cmd = 'redis-cli set vrc_target_outbound_latency %s' % target_latency
ssh = sshclient.SshClient(keyDirectory, keyPairName, 'ubuntu', ip)
r = ssh.cmd(cmd)
log("ssh %s = %s" % (cmd, r))
# Note that we convert from bits (specified in the task description)
# to bytes (used by the vrc network monitoring tools)
limit = int(uplink_cap) / 8
cmd = 'redis-cli set vrc/bytes/limit/uplink %s' % limit
ssh = sshclient.SshClient(keyDirectory, keyPairName, 'ubuntu', ip)
r = ssh.cmd(cmd)
limit = int(downlink_cap) / 8
cmd = 'redis-cli set vrc/bytes/limit/downlink %s' % limit
ssh = sshclient.SshClient(keyDirectory, keyPairName, 'ubuntu', ip)
r = ssh.cmd(cmd)
log("ssh %s = %s" % (cmd, r))
| 1,149 | 0 | 46 |
dee87b225dc9554e3dce793df7d81a7dd581f3cd | 2,698 | py | Python | parser/code_blocks/AK.py | ZaldivardeA/dstv_matcher | 2899c433c42f0a265cf0b377bd257c01c53e1569 | [
"MIT"
] | 1 | 2020-12-03T16:17:56.000Z | 2020-12-03T16:17:56.000Z | parser/code_blocks/AK.py | ZaldivardeA/dstv_matcher | 2899c433c42f0a265cf0b377bd257c01c53e1569 | [
"MIT"
] | null | null | null | parser/code_blocks/AK.py | ZaldivardeA/dstv_matcher | 2899c433c42f0a265cf0b377bd257c01c53e1569 | [
"MIT"
] | null | null | null | from .general import check_for_block_tag, check_for_comment, split_info_lines, read_face_column, dimension_reference
from typing import List, Union, Any
FORMAT: List[int] = [2, 1, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10]
# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10, 11,12, 13,14, 15
| 32.506024 | 135 | 0.576353 | from .general import check_for_block_tag, check_for_comment, split_info_lines, read_face_column, dimension_reference
from typing import List, Union, Any
FORMAT: List[int] = [2, 1, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10, 1, 10]
# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10, 11,12, 13,14, 15
def get_type_notch(char: str) -> Union[str,None]:
aux = {
" ":"tangential",
"t":"tangential",
"w":"hole",
}
if (char not in aux):
return None
return aux[char]
def read_values(line_info, face, reference, contour: List[Any]) -> bool:
try:
x_value = float(line_info[3])
q_value = float(line_info[5])
type_notch = get_type_notch(line_info[6])
if (type_notch == None):
return True;
r_value = float(line_info[7])
weld_v1 = float(line_info[9])
weld_v2 = float(line_info[11])
weld_v3 = float(line_info[13])
weld_v4 = float(line_info[15])
point = {
"face": face,
"reference" : reference,
"x": x_value,
"q": q_value,
"r": r_value,
"type_notch": type_notch,
"weld_v1": weld_v1,
"weld_v2": weld_v2,
"weld_v3": weld_v3,
"weld_v4": weld_v4,
}
contour.append(point)
return False
except:
return True
def read_line_info(line: str, prevFace: Union[str,None], prevReference: Union[str,None], contour: List[Any]) -> List[Union[str,None]]:
line_info: List[str] = split_info_lines(line, FORMAT)
face = read_face_column(line_info[1])
if (face == "previous"):
face = prevFace
if (face == None):
return [None, None]
reference = dimension_reference(line_info[4])
if (reference == "previous"):
reference = prevReference
if (reference == None):
return [None, None]
if (read_values(line_info, face, reference, contour)):
return [None, None]
return [face, reference]
def ak_handle(index: int, lines: List[str], len_list: int, obj) -> int:
prevReference = None
prevFace = None
# new contour
new_contour = []
while (index < len_list):
line: str = lines[index]
if (check_for_comment(line)):
index += 1
continue
if (check_for_block_tag(line)):
obj.external_contours.append(new_contour)
return index
prevFace, prevReference = read_line_info(line, prevFace, prevReference, new_contour)
if (prevFace == None or prevReference == None):
return -1
index += 1
obj.external_contours.append(new_contour)
return index
| 2,299 | 0 | 92 |
79f80e484561d0b01952d366471c57e5bc9c0587 | 6,804 | py | Python | vessel/common/models.py | RaphaelPrevost/Back2Shops | 5f2d369e82fe2a7b9b3a6c55782319b23d142dfd | [
"CECILL-B"
] | null | null | null | vessel/common/models.py | RaphaelPrevost/Back2Shops | 5f2d369e82fe2a7b9b3a6c55782319b23d142dfd | [
"CECILL-B"
] | 6 | 2021-03-31T19:21:50.000Z | 2022-01-13T01:46:09.000Z | vessel/common/models.py | RaphaelPrevost/Back2Shops | 5f2d369e82fe2a7b9b3a6c55782319b23d142dfd | [
"CECILL-B"
] | null | null | null | # -*- coding: utf-8 -*-
#############################################################################
#
# Copyright © Dragon Dollar Limited
# contact: contact@dragondollar.com
#
# This software is a collection of webservices designed to provide a secure
# and scalable framework to build e-commerce websites.
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL-B
# license as circulated by CEA, CNRS and INRIA at the following URL
# " http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
#
#############################################################################
import copy
from B2SUtils import db_utils
| 41.487805 | 79 | 0.591858 | # -*- coding: utf-8 -*-
#############################################################################
#
# Copyright © Dragon Dollar Limited
# contact: contact@dragondollar.com
#
# This software is a collection of webservices designed to provide a secure
# and scalable framework to build e-commerce websites.
#
# This software is governed by the CeCILL-B license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL-B
# license as circulated by CEA, CNRS and INRIA at the following URL
# " http://www.cecill.info".
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL-B license and that you accept its terms.
#
#############################################################################
import copy
from B2SUtils import db_utils
class BaseObj(object):
def toDict(self):
d = copy.deepcopy(self.__dict__)
for k in d:
if isinstance(d[k], BaseObj):
d[k] = d[k].toDict()
if isinstance(d[k], list) and len(d[k]) > 0 \
and isinstance(d[k][0], BaseObj):
for index, subitem in enumerate(d[k]):
d[k][index] = subitem.toDict()
return d
class VesselInfo(BaseObj):
def __init__(self, **kwargs):
self.name = kwargs.get('name', '')
self.imo = kwargs.get('imo', '')
self.mmsi = kwargs.get('mmsi', '')
self.cs = kwargs.get('cs', '')
self.type = kwargs.get('type', '')
self.country_isocode = kwargs.get('country_isocode', '')
self.country_name = kwargs.get('country_name', '')
self.photos = kwargs.get('photos', [])
class VesselPos(BaseObj):
def __init__(self, **kwargs):
self.location = kwargs.get('location', '')
self.longitude = kwargs.get('longitude', '')
self.latitude = kwargs.get('latitude', '')
self.heading = kwargs.get('heading', '')
self.speed = kwargs.get('speed', '')
self.time = kwargs.get('time', '')
self.status = kwargs.get('status', '')
class VesselDetailInfo(VesselInfo):
def __init__(self, **kwargs):
super(VesselDetailInfo, self).__init__(**kwargs)
self.departure_portname = kwargs.get('departure_portname', '')
self.departure_locode = kwargs.get('departure_locode', '')
self.departure_time = kwargs.get('departure_time', '')
self.arrival_portname = kwargs.get('arrival_portname', '')
self.arrival_locode = kwargs.get('arrival_locode', '')
self.arrival_time = kwargs.get('arrival_time', '')
self.positions = [VesselPos(**pos)
for pos in kwargs.get('positions', [])]
def update_portnames(self, conn):
if self.departure_locode:
if self.departure_portname:
self._update_portname(conn,
self.departure_portname,
self.departure_locode)
else:
name = self._query_portname(conn, self.departure_locode) or ''
self.departure_portname = name
if self.arrival_locode:
if self.arrival_portname:
self._update_portname(conn,
self.arrival_portname,
self.arrival_locode)
else:
name = self._query_portname(conn, self.arrival_locode) or ''
self.arrival_portname = name
def _query_portname(self, conn, locode):
results = db_utils.select(conn, "port",
columns=("name", ),
where={'locode': locode},
limit=1)
if len(results) > 0:
return results[0][0]
else:
return None
def _update_portname(self, conn, port_name, locode):
if not port_name: return
existing_name = self._query_portname(conn, locode)
values = {'locode': locode, 'name': port_name}
if existing_name is None:
db_utils.insert(conn, "port", values=values)
else:
if existing_name != port_name:
db_utils.update(conn, "port", values=values,
where={'locode': locode})
class PortInfo(BaseObj):
def __init__(self, **kwargs):
self.name = kwargs.get('name', '')
self.locode = kwargs.get('locode', '')
self.country_isocode = kwargs.get('country_isocode', '')
self.country_name = kwargs.get('country_name', '')
class ContainerStatus(BaseObj):
def __init__(self, **kwargs):
self.status = kwargs.get('status', '')
self.location = kwargs.get('location', '')
self.time = kwargs.get('time', '')
self.mode = kwargs.get('mode', '')
self.vessel_name = kwargs.get('vessel_name', '')
self.from_port = kwargs.get('from_port', '')
self.to_port = kwargs.get('to_port', '')
class Ports(BaseObj):
def __init__(self, **kwargs):
self.first_pol = kwargs.get('first_pol', '')
self.last_pod = kwargs.get('last_pod', '')
self.ts_port = kwargs.get('ts_port', [])
self.por = kwargs.get('por', '')
self.fnd = kwargs.get('fnd', '')
class ContainerInfo(BaseObj):
def __init__(self, **kwargs):
self.container = kwargs.get('container', '')
self.shipment_cycle = [ContainerStatus(**one)
for one in kwargs.get('shipment_cycle', [])]
self.prv_shipment_cycle = [ContainerStatus(**one)
for one in kwargs.get('prv_shipment_cycle', [])]
self.ports = Ports(**kwargs.get('ports', {}))
| 4,410 | 45 | 473 |
6dc89da0e6369c98d276198668c58b74f82f54d8 | 2,504 | py | Python | tests/test_smplify.py | JasonBoy1/mmhuman3d | 79b2665191115f3ed905e6afdf09990a8d484362 | [
"Apache-2.0"
] | 1 | 2021-12-03T04:17:52.000Z | 2021-12-03T04:17:52.000Z | tests/test_smplify.py | wmj142326/mmhuman3d | f107203714f9627a9308d4515d35ab8fbd0074a4 | [
"Apache-2.0"
] | null | null | null | tests/test_smplify.py | wmj142326/mmhuman3d | f107203714f9627a9308d4515d35ab8fbd0074a4 | [
"Apache-2.0"
] | null | null | null | import mmcv
import numpy as np
import torch
from mmhuman3d.core.parametric_model.builder import build_registrant
from mmhuman3d.models.builder import build_body_model
body_model_load_dir = 'data/body_models'
batch_size = 2
| 30.536585 | 79 | 0.676518 | import mmcv
import numpy as np
import torch
from mmhuman3d.core.parametric_model.builder import build_registrant
from mmhuman3d.models.builder import build_body_model
body_model_load_dir = 'data/body_models'
batch_size = 2
def test_smpl():
smplify_config = dict(mmcv.Config.fromfile('configs/smplify/smplify.py'))
device = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu')
body_model_config = dict(
type='SMPL',
gender='neutral',
num_betas=10,
keypoint_src='smpl_45',
keypoint_dst='smpl_45',
model_path='data/body_models/smpl',
batch_size=batch_size)
smplify_config['body_model'] = body_model_config
smplify_config['num_epochs'] = 1
smplify_config['use_one_betas_per_video'] = True
smplify = build_registrant(smplify_config)
# Generate keypoints
smpl = build_body_model(body_model_config)
keypoints3d = smpl()['joints'].detach().to(device=device)
keypoints3d_conf = torch.ones(*keypoints3d.shape[:2], device=device)
# Run SMPLify
smplify_output = smplify(
keypoints3d=keypoints3d, keypoints3d_conf=keypoints3d_conf)
for k, v in smplify_output.items():
if isinstance(v, torch.Tensor):
assert not np.any(np.isnan(
v.detach().cpu().numpy())), f'{k} fails.'
def test_smplx():
smplifyx_config = dict(mmcv.Config.fromfile('configs/smplify/smplifyx.py'))
device = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu')
body_model_config = dict(
type='SMPLX',
gender='neutral',
num_betas=10,
use_face_contour=True,
keypoint_src='smplx',
keypoint_dst='smplx',
model_path='data/body_models/smplx',
batch_size=batch_size)
smplifyx_config['body_model'] = body_model_config
smplifyx_config['num_epochs'] = 1
smplifyx_config['use_one_betas_per_video'] = True
smplifyx = build_registrant(smplifyx_config)
smplx = build_body_model(body_model_config)
keypoints3d = smplx()['joints'].detach().to(device=device)
keypoints3d_conf = torch.ones(*keypoints3d.shape[:2], device=device)
# Run SMPLify-X
smplifyx_output = smplifyx(
keypoints3d=keypoints3d, keypoints3d_conf=keypoints3d_conf)
for k, v in smplifyx_output.items():
if isinstance(v, torch.Tensor):
assert not np.any(np.isnan(
v.detach().cpu().numpy())), f'{k} fails.'
| 2,231 | 0 | 46 |
0aaf3087ab8b452b83ab332928e5e2e21a1d6465 | 372 | py | Python | code/03-skimage-images/Open.py | rahulisaac/image-processing | 595e702e337729844625cd6d5d8252fcc9b63a6a | [
"CC-BY-4.0"
] | 2 | 2021-01-10T17:24:07.000Z | 2021-01-11T09:34:42.000Z | code/03-skimage-images/Open.py | rahulisaac/image-processing | 595e702e337729844625cd6d5d8252fcc9b63a6a | [
"CC-BY-4.0"
] | null | null | null | code/03-skimage-images/Open.py | rahulisaac/image-processing | 595e702e337729844625cd6d5d8252fcc9b63a6a | [
"CC-BY-4.0"
] | null | null | null | """
* Python program to open, display, and save an image.
*
"""
import skimage.io
import skimage.viewer
# read image
image = skimage.io.imread(fname="chair.jpg")
# display image and wait for keypress, using a resizable window
viewer = skimage.viewer.ImageViewer(image)
viewer.show()
# save a new version in .tif format
skimage.io.imsave(fname="chair.tif", arr=image)
| 21.882353 | 63 | 0.736559 | """
* Python program to open, display, and save an image.
*
"""
import skimage.io
import skimage.viewer
# read image
image = skimage.io.imread(fname="chair.jpg")
# display image and wait for keypress, using a resizable window
viewer = skimage.viewer.ImageViewer(image)
viewer.show()
# save a new version in .tif format
skimage.io.imsave(fname="chair.tif", arr=image)
| 0 | 0 | 0 |
715920b78cf3a81962fdc56f128b96fd50298cad | 1,478 | py | Python | src/stance/preprocessing/make_vocab.py | emilyallaway/connotation-embedding | 7f24f4a2ad0945189dd8961f8a85b0a2d4a19f6d | [
"MIT"
] | null | null | null | src/stance/preprocessing/make_vocab.py | emilyallaway/connotation-embedding | 7f24f4a2ad0945189dd8961f8a85b0a2d4a19f6d | [
"MIT"
] | null | null | null | src/stance/preprocessing/make_vocab.py | emilyallaway/connotation-embedding | 7f24f4a2ad0945189dd8961f8a85b0a2d4a19f6d | [
"MIT"
] | null | null | null | import pandas as pd
import json
from sklearn.feature_extraction.text import CountVectorizer
import argparse
from functools import reduce
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', help='What to do', required=True)
parser.add_argument('-d', '--data', help='Data file', required=False)
parser.add_argument('-o', '--out_file', help='Output file name prefix', required=False)
parser.add_argument('-c', '--col', help='Name of data column to use', required=False)
parser.add_argument('-k', '--k', help='Number of words to keep in vocabulary', required=False)
args = vars(parser.parse_args())
if args['mode'] == '1':
k = int(args['k'])
outname = '{}_top{}.txt'.format(args['out_file'], k)
prune_vocab_topk(args['data'], args['col'], outname, k=k)
else:
print("ERROR: doing nothing")
| 28.423077 | 98 | 0.635995 | import pandas as pd
import json
from sklearn.feature_extraction.text import CountVectorizer
import argparse
from functools import reduce
def load_original(inname, col, k=None):
df = pd.read_csv(inname)
str_data = []
for i in df.index:
words = reduce(lambda x, y: x + y, json.loads(df.iloc[i][col]))
str_data.append(' '.join(words))
if k is None:
CV = CountVectorizer()
else:
CV = CountVectorizer(max_features=k)
CV.fit(str_data)
return CV.vocabulary_
def prune_vocab_topk(inname, col, outname, k=10000):
vocab = load_original(inname, col, k)
out_file = open(outname, 'w')
for w in vocab:
out_file.write('{}\n'.format(w))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', help='What to do', required=True)
parser.add_argument('-d', '--data', help='Data file', required=False)
parser.add_argument('-o', '--out_file', help='Output file name prefix', required=False)
parser.add_argument('-c', '--col', help='Name of data column to use', required=False)
parser.add_argument('-k', '--k', help='Number of words to keep in vocabulary', required=False)
args = vars(parser.parse_args())
if args['mode'] == '1':
k = int(args['k'])
outname = '{}_top{}.txt'.format(args['out_file'], k)
prune_vocab_topk(args['data'], args['col'], outname, k=k)
else:
print("ERROR: doing nothing")
| 524 | 0 | 46 |
8fa74f5878fd6fd1f76933cccee2f6c3baac37ae | 1,268 | py | Python | toolbox/nn/DistMult.py | LinXueyuanStdio/KGE-toolbox | 916842835e61ba99dde1409592977a2ec55f8aae | [
"Apache-2.0"
] | 2 | 2021-10-17T17:50:24.000Z | 2021-12-13T05:22:46.000Z | toolbox/nn/DistMult.py | LinXueyuanStdio/KGE-toolbox | 916842835e61ba99dde1409592977a2ec55f8aae | [
"Apache-2.0"
] | null | null | null | toolbox/nn/DistMult.py | LinXueyuanStdio/KGE-toolbox | 916842835e61ba99dde1409592977a2ec55f8aae | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
| 28.818182 | 91 | 0.623028 | import torch
import torch.nn as nn
import torch.nn.functional as F
class CoreDistMult(nn.Module):
def __init__(self, input_dropout_rate=0.2):
super(CoreDistMult, self).__init__()
self.dropout = nn.Dropout(input_dropout_rate)
def forward(self, h, r):
h = self.dropout(h)
r = self.dropout(r)
x = h * r
x = F.relu(x)
return x
class DistMult(nn.Module):
def __init__(self, num_entities, num_relations, embedding_dim, input_dropout_rate=0.2):
super(DistMult, self).__init__()
self.E = nn.Embedding(num_entities, embedding_dim, padding_idx=0)
self.R = nn.Embedding(num_relations, embedding_dim, padding_idx=0)
self.core = CoreDistMult(input_dropout_rate)
self.loss = nn.BCELoss()
self.b = nn.Parameter(torch.zeros(num_entities))
def init(self):
nn.init.xavier_normal_(self.E.weight.data)
nn.init.xavier_normal_(self.R.weight.data)
def forward(self, h_idx, r_idx):
h = self.E(h_idx)
r = self.R(r_idx)
t = self.core(h, r)
t = t.view(-1, self.embedding_dim)
x = torch.mm(t, self.E.weight.transpose(1, 0))
x = x + self.b.expand_as(x)
x = torch.sigmoid(x)
return x
| 1,006 | 14 | 179 |
6df03bd7229b468f7bdab1cd1fe73c6951d497a6 | 17,523 | py | Python | bot.py | dathbezumniy/kmd-sync-bot | bdc922b5b47ded2e851f16441de818a298c1394a | [
"MIT"
] | null | null | null | bot.py | dathbezumniy/kmd-sync-bot | bdc922b5b47ded2e851f16441de818a298c1394a | [
"MIT"
] | 2 | 2020-04-28T19:35:06.000Z | 2020-05-12T04:39:07.000Z | bot.py | dathbezumniy/kmd-sync-bot | bdc922b5b47ded2e851f16441de818a298c1394a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import time
import logging
import requests
from emoji import emojize
from functools import wraps
from pssh.clients import SSHClient
from pssh.exceptions import AuthenticationException
from requests.exceptions import RequestException
from telegram import ReplyKeyboardMarkup, ChatAction, ParseMode
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters,
ConversationHandler, DictPersistence)
#STATES
CONFIGURE, CHOOSE_SERVER, API_CALL, TYPING_REPLY, TYPING_CHOICE, TYPING_CONFIRMATION = range(6)
# TODO: proper logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
#keyboards
configure_keyboard = [['Done']]
configure_markup = ReplyKeyboardMarkup(configure_keyboard, one_time_keyboard=True)
confirmation_keyboard = [['Yes', 'No']]
confirmation_markup = ReplyKeyboardMarkup(confirmation_keyboard, one_time_keyboard=True)
# TODO: dynamic keyboard so that user instead of typing server name can simply tap on the button
choose_server_keyboard = [['Pick a server']]
choose_server_markup = ReplyKeyboardMarkup(choose_server_keyboard, one_time_keyboard=True)
api_calls_keyboard = [['Start all ACs', 'Stop all ACs', 'Get status'],
['Start KMD', 'Stop KMD', 'Available tickers'],
['Change server', 'Server info', 'Launch params']]
api_calls_markup = ReplyKeyboardMarkup(api_calls_keyboard, one_time_keyboard=True)
#typing action utility func
def send_typing_action(func):
"""Sends typing action while processing func command."""
@wraps(func)
return command_func
@send_typing_action
@send_typing_action
#TYPING_REPLY
# TODO: end-to-end test to check if the daemon is able to start.
# TODO: check if we are able to parse output and provide user with feedback on what is going on during the installation process
@send_typing_action
#TYPING_CHOICE
@send_typing_action
@send_typing_action
#### API CALLS
@send_typing_action
@send_typing_action
@send_typing_action
# STATUS
@send_typing_action
#START/STOP
@send_typing_action
@send_typing_action
@send_typing_action
@send_typing_action
@send_typing_action
@send_typing_action
#CLEANUP
@send_typing_action
@send_typing_action
@send_typing_action
@send_typing_action
def help(update, context):
"""Send a message when the command /help is issued."""
help_msg = 'This bot has 3 main states:\n'
help_msg += '-----> CONFIGURATION_STATE\n'
help_msg += '-----> PICK_SERVER_STATE\n'
help_msg += '-----> API_CALL_STATE\n'
help_msg += ' \n'
help_msg += 'Commands that are accessible throughout all states:\n'
help_msg += '/start - sets up a new server.\n'
help_msg += '/help - prints this message.\n'
help_msg += ' \n'
help_msg += 'CONFIGURE_STATE:\n'
help_msg += 'You can trigger that state with /start\n'
help_msg += 'After you have provided data in the following format (server_name,ip,rootpass), simply tap Done and bot will start installation on a new server.\n'
help_msg += 'It usually takes around 2-3 minutes for bot to install/download all dependencies and start API.\n'
help_msg += 'If you\'ve provided server ip with already running API, bot will skip installation and forward you to PICK_SERVER_STATE.\n'
help_msg += ' \n'
help_msg += 'PICK_SERVER_STATE:\n'
help_msg += 'In this state you can only pick a server that you\'ve previously added with /start command. Bot will forward you to API_CALL_STATE after you have successfully picked a server.\n'
help_msg += ' \n'
help_msg += 'API_CALL_STATE:\n'
help_msg += 'You will be able to see all available commands on the keyboard. Other than the keyboard commands there are few others:\n'
help_msg += '/start_sync AXO BET PANGEA - start tickers individually.\n'
help_msg += ' /stop_sync AXO BET PANGEA - stop tickers individually with optional cleanup.\n'
update.message.reply_text(help_msg)
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "{}" caused error "{}"\n\n'.format(update, context.error))
if __name__ == '__main__':
main()
| 41.721429 | 270 | 0.672259 | #!/usr/bin/env python3
import os
import time
import logging
import requests
from emoji import emojize
from functools import wraps
from pssh.clients import SSHClient
from pssh.exceptions import AuthenticationException
from requests.exceptions import RequestException
from telegram import ReplyKeyboardMarkup, ChatAction, ParseMode
from telegram.ext import (Updater, CommandHandler, MessageHandler, Filters,
ConversationHandler, DictPersistence)
#STATES
CONFIGURE, CHOOSE_SERVER, API_CALL, TYPING_REPLY, TYPING_CHOICE, TYPING_CONFIRMATION = range(6)
# TODO: proper logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
#keyboards
configure_keyboard = [['Done']]
configure_markup = ReplyKeyboardMarkup(configure_keyboard, one_time_keyboard=True)
confirmation_keyboard = [['Yes', 'No']]
confirmation_markup = ReplyKeyboardMarkup(confirmation_keyboard, one_time_keyboard=True)
# TODO: dynamic keyboard so that user instead of typing server name can simply tap on the button
choose_server_keyboard = [['Pick a server']]
choose_server_markup = ReplyKeyboardMarkup(choose_server_keyboard, one_time_keyboard=True)
api_calls_keyboard = [['Start all ACs', 'Stop all ACs', 'Get status'],
['Start KMD', 'Stop KMD', 'Available tickers'],
['Change server', 'Server info', 'Launch params']]
api_calls_markup = ReplyKeyboardMarkup(api_calls_keyboard, one_time_keyboard=True)
def main():
bot_persistence = DictPersistence()
updater = Updater(os.environ['SYNC_BOT_TOKEN'], persistence=bot_persistence, use_context=True)
dp = updater.dispatcher
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
CONFIGURE: [MessageHandler(Filters.regex('^(Done)$'), configure)],
TYPING_REPLY: [MessageHandler(Filters.text, received_config_information)],
CHOOSE_SERVER: [MessageHandler(Filters.regex('^(Pick a server)$'), make_a_choice)],
TYPING_CHOICE: [MessageHandler(Filters.text, received_server_choice)],
API_CALL: [MessageHandler(Filters.regex('^(Server info)$'), show_current_server),
MessageHandler(Filters.regex('^(Start all ACs)$'), start_sync_all),
MessageHandler(Filters.regex('^(Start KMD)$'), start_kmd),
MessageHandler(Filters.regex('^(Stop KMD)$'), stop_kmd),
MessageHandler(Filters.regex('^(Available tickers)$'), get_available_tickers),
MessageHandler(Filters.regex('^(Launch params)$'), dummy_func),
MessageHandler(Filters.regex('^(Restart API)$'), dummy_func),
MessageHandler(Filters.regex('^(Stop all ACs)$'), stop_sync_all),
MessageHandler(Filters.regex('^(Get status)$'), get_current_sync_status),
MessageHandler(Filters.regex('^(Change server)$'), make_a_choice),
MessageHandler(Filters.document.mime_type("text/x-python"), dummy_func),
CommandHandler('setup_binary', setup_binary),
CommandHandler('start_sync', start_sync),
CommandHandler('stop_sync', stop_sync)],
TYPING_CONFIRMATION: [MessageHandler(Filters.regex('^(Yes)$'), cleanup),
MessageHandler(Filters.regex('^(No)$'), no_cleanup)],
},
fallbacks=[CommandHandler('help', help),
CommandHandler('start', start)]
)
dp.add_handler(conv_handler)
dp.add_error_handler(error)
updater.start_polling()
updater.idle()
#typing action utility func
def send_typing_action(func):
"""Sends typing action while processing func command."""
@wraps(func)
def command_func(update, context, *args, **kwargs):
context.bot.send_chat_action(chat_id=update.effective_message.chat_id, action=ChatAction.TYPING)
return func(update, context, *args, **kwargs)
return command_func
@send_typing_action
def dummy_func():
update.message.reply_text('this function is in development', reply_markup=api_calls_markup)
return API_CALL
@send_typing_action
def start(update, context):
update.message.reply_text('Hi! Lets configure a new komodo sync server! Please provide data in the following format: server_name,ip,rootpass')
try:
if context.user_data['servers']:
pass
except KeyError:
context.user_data['servers'] = []
context.user_data['new_server'] = {}
context.user_data['KMD'] = 0 #not ready for cleanup
return TYPING_REPLY
#TYPING_REPLY
def received_config_information(update, context):
name, ip, rootpass = update.message.text.split(",")
context.user_data['new_server'] = {'name' : name, 'ip' : ip, 'pass' : rootpass}
update.message.reply_text("Neat! Now press Done to start the setup.", reply_markup=configure_markup)
return CONFIGURE
# TODO: end-to-end test to check if the daemon is able to start.
# TODO: check if we are able to parse output and provide user with feedback on what is going on during the installation process
@send_typing_action
def configure(update, context):
new_server = context.user_data['new_server']
ip = new_server['ip']
rootpass = new_server['pass']
#check if there's already API running on the server
try:
r = requests.get('http://{}'.format(ip)).json()
if "Hi" in r['message']:
update.message.reply_text("Seems like setup is already done on this server. Now you should pick a server.", reply_markup=choose_server_markup)
context.user_data['servers'].append(new_server)
return CHOOSE_SERVER
except RequestException:
pass
#check if auth is correct
try:
client = SSHClient(ip, user='root', password=rootpass)
client.run_command('whoami', sudo=True)
update.message.reply_text("Auth credentials ok.")
except AuthenticationException:
update.message.reply_text("Auth credentials fail. Start-over with /start")
return CONFIGURE
update.message.reply_text("Starting fresh server setup, it will take a few minutes...")
command = "wget https://raw.githubusercontent.com/dathbezumniy/kmd-sync-api/master/sync_api_setup.sh " \
"&& chmod u+x sync_api_setup.sh && ./sync_api_setup.sh"
output = client.run_command(command, sudo=True)
#wait until all dependencies downloaded/installed then check if API is up
time.sleep(200)
try:
r = requests.get('http://{}'.format(ip)).json()
if "Hi" in r['message']:
update.message.reply_text("Seems like setup is done and API is up. Now you should pick a server.", reply_markup=choose_server_markup)
context.user_data['servers'].append(new_server)
return CHOOSE_SERVER
except RequestException:
update.message.reply_text("Something went wrong. API didn't start, you can try to start over the configuration with /start")
return CONFIGURE
update.message.reply_text("Something went wrong. API didn't start, you can try to start over the configuration with /start")
return CONFIGURE
#TYPING_CHOICE
@send_typing_action
def received_server_choice(update, context):
available_servers = context.user_data['servers']
for server in available_servers:
if update.message.text == server['name']:
context.user_data['current_server'] = server
update.message.reply_text('Now you are in the api state, here you should setup a binary first. \nUse: /setup_binary [link_to_a_downloadable_binaries_in.zip]', reply_markup=api_calls_markup)
return API_CALL
update.message.reply_text('Something might be wrong, are you sure you typed the name of the server correctly? try again', reply_markup=choose_server_markup)
return CHOOSE_SERVER
@send_typing_action
def make_a_choice(update, context):
available_servers = context.user_data['servers']
number_of_servers = len(available_servers)
if number_of_servers == 1:
update.message.reply_text('Currently you have registered only one server. I\'m gonna pick it for you. Now you are in the API state, here you should setup a binary first.\nUse /setup_binary [link-to-a-downloadable-binaries-in.zip]', reply_markup=api_calls_markup)
context.user_data['current_server'] = available_servers[0]
return API_CALL
elif number_of_servers > 1:
update.message.reply_text('To pick a server just reply with a name. Currently you registered {} servers. Here they are:'.format(number_of_servers))
msg = ''
for server in available_servers:
msg += '{} --> {}\n'.format(server['name'], server['ip'])
update.message.reply_text(msg)
return TYPING_CHOICE
update.message.reply_text('Something probably went wrong on the configuration stage, you have no registered servers. try to start over with /start')
return CONFIGURE
#### API CALLS
@send_typing_action
def setup_binary(update, context):
link = {'link' : context.args[0]}
msg = requests.post('http://{}/upload_binary'.format(context.user_data['current_server']['ip']), data=link).json()
update.message.reply_text(msg, reply_markup=api_calls_markup)
return API_CALL
@send_typing_action
def get_available_tickers(update, context):
msg = requests.get('http://{}/tickers_list'.format(context.user_data['current_server']['ip'])).json()
update.message.reply_text(msg, reply_markup=api_calls_markup)
return API_CALL
@send_typing_action
def get_launch_params(update, context):
msg = requests.get('http://{}/tickers_params'.format(context.user_data['current_server']['ip'])).json()
update.message.reply_text(msg, reply_markup=api_calls_markup)
return API_CALL
# STATUS
@send_typing_action
def get_current_sync_status(update, context):
msg = requests.get('http://{}/sync_stats_all'.format(context.user_data['current_server']['ip'])).json()
amount = int(msg['amount'])
stats = msg['stats']
reply = '<pre>Currently {} assetchains are syncing:\n'.format(amount)
reply += 'TICKER |SYNC| GOT | TOTAL | %\n'
if amount:
for k,v in stats.items():
if v['synced']:
reply +="" + v['coin'] + " "*(10-len(v['coin']))\
+ emojize(":white_check_mark:", use_aliases=True) + " "*(9-len(emojize(":white_check_mark:", use_aliases=True)))\
+ str(v['blocks']) + " "*(9-len(str(v['blocks'])))\
+ str(v['longestchain']) + " "*(9-len(str(v['longestchain'])))\
+ "{:.0%}".format(zero_division_fix(v['blocks'], v['longestchain'])) + "\n"
else:
reply +="" + v['coin'] + " "*(10-len(v['coin']))\
+ emojize(":no_entry:", use_aliases=True) + " "*(9-len(emojize(":no_entry:", use_aliases=True)))\
+ str(v['blocks']) + " "*(9-len(str(v['blocks'])))\
+ str(v['longestchain']) + " "*(9-len(str(v['longestchain'])))\
+ "{:.0%}".format(zero_division_fix(v['blocks'], v['longestchain'])) + "\n"
reply += "</pre>"
update.message.reply_text(reply, reply_markup=api_calls_markup, parse_mode=ParseMode.HTML)
return API_CALL
def zero_division_fix(blocks, longestchain):
return blocks / longestchain if longestchain else 0
#START/STOP
@send_typing_action
def start_sync(update, context):
for ticker in context.args:
msg = requests.get('http://{}/sync_start/{}'.format(context.user_data['current_server']['ip'], ticker)).json()
update.message.reply_text(msg, reply_markup=api_calls_markup)
return API_CALL
@send_typing_action
def stop_sync(update, context):
for ticker in context.args:
msg = requests.get('http://{}/sync_stop/{}'.format(context.user_data['current_server']['ip'], ticker)).json()
update.message.reply_text(msg, reply_markup=confirmation_markup)
return TYPING_CONFIRMATION
@send_typing_action
def start_kmd(update, context):
msg = requests.get('http://{}/sync_start/{}'.format(context.user_data['current_server']['ip'], 'KMD')).json()
update.message.reply_text(msg)
update.message.reply_text('In case of reindexing it might take a few minutes for KMD to appear in Get status.', reply_markup=api_calls_markup)
context.user_data['KMD'] = 0 #not ready for cleanup
return API_CALL
@send_typing_action
def stop_kmd(update, context):
msg = requests.get('http://{}/sync_stop/{}'.format(context.user_data['current_server']['ip'], 'KMD')).json()
update.message.reply_text(msg)
context.user_data['KMD'] = 1 #ready for cleanup
time.sleep(2)
update.message.reply_text('Lets wait few more seconds for the daemon to stop, before the cleanup.')
time.sleep(8)
update.message.reply_text('Would you like to cleanup KMD sync progress?', reply_markup=confirmation_markup)
return TYPING_CONFIRMATION
@send_typing_action
def start_sync_all(update, context):
msg = requests.get('http://{}/sync_start_all'.format(context.user_data['current_server']['ip'])).json()
update.message.reply_text(msg, reply_markup=api_calls_markup)
return API_CALL
@send_typing_action
def stop_sync_all(update, context):
msg = requests.get('http://{}/sync_stop_all'.format(context.user_data['current_server']['ip'])).json()
update.message.reply_text(msg)
update.message.reply_text('Waiting 30 secs for all tickers to stop with a following clean up of assetchains folders')
time.sleep(30)
update.message.reply_text('All tickers have stopped. Are you sure you want to proceed(Yes/No) and delete all assetchain folders? All sync progress of subchains will be lost.', reply_markup=confirmation_markup)
return TYPING_CONFIRMATION
#CLEANUP
@send_typing_action
def cleanup(update, context):
if context.user_data['KMD']:
msg = requests.get('http://{}/clean_folder/{}'.format(context.user_data['current_server']['ip'], 'KMD')).json()
update.message.reply_text(msg)
time.sleep(2)
update.message.reply_text("Finished clean up of KMD. Fresh start, sir.", reply_markup=api_calls_markup)
context.user_data['KMD'] = 0 #not ready for cleanup
return API_CALL
msg = requests.get('http://{}/clean_assetchain_folders'.format(context.user_data['current_server']['ip'])).json()
update.message.reply_text(msg)
time.sleep(2)
update.message.reply_text("Finished clean up. Fresh start, sir.", reply_markup=api_calls_markup)
return API_CALL
@send_typing_action
def no_cleanup(update, context):
update.message.reply_text("Very well, sir. No cleanup for you.", reply_markup=api_calls_markup)
return API_CALL
@send_typing_action
def show_current_server(update, context):
current_server = context.user_data['current_server']
name = current_server['name']
ip = current_server['ip']
msg = 'Currently you are on {}, --> {}'.format(name, ip)
update.message.reply_text(msg, reply_markup=api_calls_markup)
return API_CALL
@send_typing_action
def help(update, context):
"""Send a message when the command /help is issued."""
help_msg = 'This bot has 3 main states:\n'
help_msg += '-----> CONFIGURATION_STATE\n'
help_msg += '-----> PICK_SERVER_STATE\n'
help_msg += '-----> API_CALL_STATE\n'
help_msg += ' \n'
help_msg += 'Commands that are accessible throughout all states:\n'
help_msg += '/start - sets up a new server.\n'
help_msg += '/help - prints this message.\n'
help_msg += ' \n'
help_msg += 'CONFIGURE_STATE:\n'
help_msg += 'You can trigger that state with /start\n'
help_msg += 'After you have provided data in the following format (server_name,ip,rootpass), simply tap Done and bot will start installation on a new server.\n'
help_msg += 'It usually takes around 2-3 minutes for bot to install/download all dependencies and start API.\n'
help_msg += 'If you\'ve provided server ip with already running API, bot will skip installation and forward you to PICK_SERVER_STATE.\n'
help_msg += ' \n'
help_msg += 'PICK_SERVER_STATE:\n'
help_msg += 'In this state you can only pick a server that you\'ve previously added with /start command. Bot will forward you to API_CALL_STATE after you have successfully picked a server.\n'
help_msg += ' \n'
help_msg += 'API_CALL_STATE:\n'
help_msg += 'You will be able to see all available commands on the keyboard. Other than the keyboard commands there are few others:\n'
help_msg += '/start_sync AXO BET PANGEA - start tickers individually.\n'
help_msg += ' /stop_sync AXO BET PANGEA - stop tickers individually with optional cleanup.\n'
update.message.reply_text(help_msg)
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "{}" caused error "{}"\n\n'.format(update, context.error))
if __name__ == '__main__':
main()
| 12,727 | 0 | 490 |