max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
backend/apps/core/utils/celery_utils.py | jaliste/mi_coop | 54 | 12758651 | <gh_stars>10-100
import os
import redis
from django.conf import settings
from apps.core import celery_app
import boto3
def active_and_reserved_tasks_by_queue_name(queue_name):
"""
i.active() returns a dictionary where keys are worker names
and values are lists of active tasks for the worker
"""
print("inspecting celery queue")
i = celery_app.control.inspect()
active = i.active()
active_count = 0
if active:
for _, active_tasks in active.items():
active_count += len(
[
task
for task in active_tasks
if task["delivery_info"]["routing_key"] == queue_name
]
)
reserved = i.reserved()
reserved_count = 0
if reserved:
for _, reserved_tasks in reserved.items():
reserved_count += len(
[
task
for task in reserved_tasks
if task["delivery_info"]["routing_key"] == queue_name
]
)
print("connecting to redis")
r = redis.Redis(
host=settings.REDIS_SERVICE_HOST,
port=6379,
db=1,
charset="utf-8",
decode_responses=True,
)
queue_length = r.llen("default")
total = active_count + reserved_count + queue_length
print(f"Active count: {active_count}")
print(f"Reserved count: {reserved_count}")
print(f"Queue length: {queue_length}")
print(f"Total: {total}")
return total
def publish_queue_metrics(queue_names):
print("gathering queue data")
metric_data = {
queue_name: active_and_reserved_tasks_by_queue_name(queue_name)
for queue_name in queue_names
}
print("sending cloudwatch data")
if not settings.DEBUG:
print("connecting aws api")
client = boto3.client("cloudwatch")
client.put_metric_data(
Namespace=os.environ.get("FULL_APP_NAME", "FULL_APP_NAME"),
MetricData=[
{"MetricName": metric_name, "Value": value}
for metric_name, value in metric_data.items()
],
)
return metric_data
def publish_celery_metrics():
print("starting task")
queue_metrics = publish_queue_metrics(["default"])
return queue_metrics
| 2.125 | 2 |
tests/test_types.py | trickeydan/j5-dev | 10 | 12758652 | """Test custom types."""
import pytest
from j5.types import ImmutableDict, ImmutableList
def test_immutable_dict_get_member() -> None:
"""Test that we can get an item from an ImmutableDict."""
d = ImmutableDict[str, str]({'foo': 'bar'})
assert d['foo'] == 'bar'
def test_immutable_dict_iterator() -> None:
"""Test that the iterator works."""
data = {'foo': 'bar', 'bar': 'doo', 'doo': 'foo'}
d = ImmutableDict(data)
assert list(d) == list(data.values())
def test_immutable_dict_length() -> None:
"""Test that the length operation works."""
data = {'foo': 'bar', 'bar': 'doo', 'doo': 'foo'}
d = ImmutableDict(data)
assert len(d) == 3
def test_immutable_dict_cannot_set_member() -> None:
"""Test that the immutable dict is immutable."""
data = {'foo': 'bar', 'bar': 'doo', 'doo': 'foo'}
d = ImmutableDict(data)
with pytest.raises(TypeError):
d['foo'] = '12' # type: ignore
def test_immutable_dict_repr() -> None:
"""Test that the repr of the immutable dict is correct."""
data = {'foo': 'bar', 'bar': 'doo'}
d = ImmutableDict(data)
assert repr(d) == "ImmutableDict({'foo': 'bar', 'bar': 'doo'})"
def test_immutable_list_construct_from_list() -> None:
"""Test that we can construct an ImmutableList from a list."""
data = [1, 3, 4, 6, 2]
li = ImmutableList[int](data)
assert list(li) == data
def test_immutable_list_construct_from_generator() -> None:
"""Test that we can construct an ImmutableList from a generator."""
data = [1, 3, 4, 6, 2]
li = ImmutableList[int](item for item in data)
assert list(li) == data
def test_immutable_list_get_item() -> None:
"""Test that we can get an item from an ImmutableList."""
data = [1, 3, 4, 6, 2]
li = ImmutableList[int](data)
assert li[0] == 1
assert li[-1] == 2
with pytest.raises(IndexError):
assert li[7]
with pytest.raises(TypeError):
assert li["foo"] # type:ignore
def test_immutable_list_length() -> None:
"""Test that we can get the list length."""
data = [1, 3, 4, 6, 2]
li = ImmutableList[int](data)
assert len(li) == 5
def test_immutable_list_cannot_set_item() -> None:
"""Test that the list is not immutable."""
data = [1, 3, 4, 6, 2]
li = ImmutableList[int](data)
with pytest.raises(TypeError):
li[0] = 12 # type: ignore
def test_immutable_list_repr() -> None:
"""Test that the repr of the immutable list is correct."""
data = [1, 3, 4, 6, 2]
d = ImmutableList(data)
assert repr(d) == "ImmutableList([1, 3, 4, 6, 2])"
| 3.109375 | 3 |
pymisp/tools/vtreportobject.py | Latonis/PyMISP | 1 | 12758653 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from typing import Optional
import requests
try:
import validators # type: ignore
has_validators = True
except ImportError:
has_validators = False
from .abstractgenerator import AbstractMISPObjectGenerator
from .. import InvalidMISPObject
class VTReportObject(AbstractMISPObjectGenerator):
'''
VirusTotal Report
:apikey: VirusTotal API key (private works, but only public features are supported right now)
:indicator: IOC to search VirusTotal for
'''
def __init__(self, apikey: str, indicator: str, vt_proxies: Optional[dict] = None, **kwargs):
super().__init__('virustotal-report', **kwargs)
indicator = indicator.strip()
self._resource_type = self.__validate_resource(indicator)
if self._resource_type:
self._proxies = vt_proxies
self._report = self.__query_virustotal(apikey, indicator)
self.generate_attributes()
else:
error_msg = "A valid indicator is required. (One of type url, md5, sha1, sha256). Received '{}' instead".format(indicator)
raise InvalidMISPObject(error_msg)
def get_report(self):
return self._report
def generate_attributes(self):
''' Parse the VirusTotal report for relevant attributes '''
self.add_attribute("last-submission", value=self._report["scan_date"])
self.add_attribute("permalink", value=self._report["permalink"])
ratio = "{}/{}".format(self._report["positives"], self._report["total"])
self.add_attribute("detection-ratio", value=ratio)
def __validate_resource(self, ioc: str):
'''
Validate the data type of an indicator.
Domains and IP addresses aren't supported because
they don't return the same type of data as the URLs/files do
:ioc: Indicator to search VirusTotal for
'''
if not has_validators:
raise Exception('You need to install validators: pip install validators')
if validators.url(ioc):
return "url"
elif re.match(r"\b([a-fA-F0-9]{32}|[a-fA-F0-9]{40}|[a-fA-F0-9]{64})\b", ioc):
return "file"
return False
def __query_virustotal(self, apikey: str, resource: str):
'''
Query VirusTotal for information about an indicator
:apikey: VirusTotal API key
:resource: Indicator to search in VirusTotal
'''
url = "https://www.virustotal.com/vtapi/v2/{}/report".format(self._resource_type)
params = {"apikey": apikey, "resource": resource}
# for now assume we're using a public API key - we'll figure out private keys later
if self._proxies:
report = requests.get(url, params=params, proxies=self._proxies)
else:
report = requests.get(url, params=params)
report_json = report.json()
if report_json["response_code"] == 1:
return report_json
else:
error_msg = "{}: {}".format(resource, report_json["verbose_msg"])
raise InvalidMISPObject(error_msg)
| 2.25 | 2 |
Reddit_ChatBot_Python/ws_client.py | scrubjay55/Reddit_ChatBot_Python | 24 | 12758654 | <gh_stars>10-100
import websocket
from ._utils.rate_limiter import RateLimiter
import time
from ._utils.frame_model import get_frame_data, FrameType
from _thread import start_new_thread
from ._utils.ws_utils import get_ws_url, print_chat_, configure_loggers, pair_channel_and_names
from ._utils.consts import MESG_regular, MESG_snoo, MESG_gif, TPST, TPEN, USER_AGENT
class WebSockClient:
def __init__(self, access_token, user_id, get_current_channels, enable_trace=False, print_chat=True,
log_websocket_frames=False, other_logging=True):
self._user_id = user_id
self.channelid_sub_pairs = {}
self.RateLimiter = RateLimiter
self.logger = configure_loggers()
self.logger.disabled = not other_logging
websocket.enableTrace(enable_trace)
self.ws = self._get_ws_app(get_ws_url(self._user_id, access_token))
self.req_id = int(time.time() * 1000)
self.own_name = None
self.print_chat = print_chat
self.log_websocket_frames = log_websocket_frames
self.last_err = None
self.is_logi_err = False
self.session_key = None
self.get_current_channels = get_current_channels
self.current_channels = None
self.after_message_hooks = []
self.parralel_hooks = []
def _get_ws_app(self, ws_url):
ws = websocket.WebSocketApp(ws_url,
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
on_open=self.on_open,
header={'User-Agent': USER_AGENT, 'Accept-Encoding': 'gzip'}
)
return ws
def update_ws_app_urls_access_token(self, access_token):
self.ws.url = get_ws_url(self._user_id, access_token)
def on_open(self, _):
self.logger.info("### successfully connected to the websocket ###")
def on_message(self, _, message):
resp = get_frame_data(message)
if self.print_chat and resp.type_f == FrameType.MESG:
print_chat_(resp, self.channelid_sub_pairs)
if self.log_websocket_frames:
self.logger.info(message)
if resp.type_f == FrameType.LOGI:
self.logger.info(message)
self._logi(resp)
for func in self.parralel_hooks:
start_new_thread(func, (resp,))
start_new_thread(self._response_loop, (resp,))
def _logi(self, resp):
try:
logi_err = resp.error
except AttributeError:
logi_err = None
if logi_err is None:
self.is_logi_err = False
self.last_err = None
self.session_key = resp.key
self.update_channelid_sub_pair()
self.own_name = resp.nickname
else:
self.logger.error(resp.message)
self.is_logi_err = True
def update_channelid_sub_pair(self):
self.current_channels = self.get_current_channels(limit=100, order='latest_last_message', show_member=True,
show_read_receipt=True, show_empty=True,
member_state_filter='joined_only', super_mode='all',
public_mode='all', unread_filter='all',
hidden_mode='all', show_frozen=True,
# custom_types='direct,group',
session_key=self.session_key)
self.channelid_sub_pairs = pair_channel_and_names(channels=self.current_channels,
own_user_id=self._user_id)
def add_channelid_sub_pair(self, channel):
self.current_channels.append(channel)
self.channelid_sub_pairs = pair_channel_and_names(channels=self.current_channels,
own_user_id=self._user_id)
def _response_loop(self, resp):
for func in self.after_message_hooks:
if func(resp):
break
def ws_send_message(self, text, channel_url):
if self.RateLimiter.is_enabled and self.RateLimiter.check():
return
payload = MESG_regular.format(channel_url=channel_url, text=text, req_id=self.req_id)
self.ws.send(payload)
self.req_id += 1
def ws_send_snoomoji(self, snoomoji, channel_url):
if self.RateLimiter.is_enabled and self.RateLimiter.check():
return
payload = MESG_snoo.format(channel_url=channel_url, snoomoji=snoomoji, req_id=self.req_id)
self.ws.send(payload)
self.req_id += 1
def ws_send_gif(self, gif_url, channel_url, height, width):
payload = MESG_gif.format(gif_url=gif_url, channel_url=channel_url, height=height, width=width)
self.ws.send(payload)
def ws_send_typing_indicator(self, channel_url):
payload = TPST.format(channel_url=channel_url, time=int(time.time() * 1000))
self.ws.send(payload)
def ws_stop_typing_indicator(self, channel_url):
payload = TPEN.format(channel_url=channel_url, time=int(time.time() * 1000))
self.ws.send(payload)
def on_error(self, _, error):
self.logger.error(error)
self.last_err = error
def on_close(self, *_):
self.logger.warning("### websocket closed ###")
| 1.890625 | 2 |
mythx_cli/group/status.py | Player1-PlaySwap/mythx-cli | 58 | 12758655 | <filename>mythx_cli/group/status.py<gh_stars>10-100
import logging
from typing import List
import click
from mythx_cli.formatter import FORMAT_RESOLVER
from mythx_cli.util import write_or_print
LOGGER = logging.getLogger("mythx-cli")
@click.command("status")
@click.argument("gids", default=None, nargs=-1)
@click.pass_obj
def group_status(ctx, gids: List[str]) -> None:
"""Get the status of an analysis group.
\f
:param ctx: Click context holding group-level parameters
:param gids: A list of group IDs to fetch the status for
"""
for gid in gids:
LOGGER.debug(f"Fetching group status for ID {gid}")
resp = ctx["client"].group_status(group_id=gid)
write_or_print(FORMAT_RESOLVER[ctx["fmt"]].format_group_status(resp))
| 2.40625 | 2 |
py2030/yaml.py | markkorput/py2030 | 0 | 12758656 | import socket
from .utils.config_file import ConfigFile
class Yaml:
def __init__(self):
self.data = {
'py2030': {
'profiles': {
socket.gethostname().replace('.', '_'): {
'start_event': 'start'
}
}
}
}
def text(self):
return ConfigFile.to_yaml(self.data)
if __name__ == '__main__':
print(Yaml().text())
| 2.375 | 2 |
data/templates/authentication/authentication.mako.py | sumukh210991/Cyberweb | 0 | 12758657 | # -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1467226952.515133
_enable_loop = True
_template_filename = '/home/sumukh/Documents/thesis/Cyberweb/cyberweb/cyberweb/templates/authentication/authentication.mako'
_template_uri = '/authentication/authentication.mako'
_source_encoding = 'utf-8'
from webhelpers.html import escape
_exports = ['headtags', 'col2main']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'/authentication/authentication.layout.mako', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
__M_writer(u'\n\n\n')
__M_writer(u'\n\n\n')
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_headtags(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_col2main(context):
__M_caller = context.caller_stack._push_frame()
try:
c = context.get('c', UNDEFINED)
__M_writer = context.writer()
__M_writer(u'\n\n<h2>Authentication Credential Summary for CyberWeb User: ')
__M_writer(escape(c.user))
__M_writer(u'</h2>\n<p>\n<h3>PKI Credentials</h3>\n<table>\n <tr>\n <td>Account</td><td>Hostname</td>Status</td><td>Date</td>\n </tr>\n <tr> <td>account1</td><td>hostname1</td>Status</td><td>date</td> </tr>\n <tr> <td>account2</td><td>hostname2</td>Status</td><td>date</td> </tr>\n <tr> <td>account3</td><td>hostname3</td>Status</td><td>date</td> </tr>\n</table>\n<p>\n<h3>GSI Credentials</h3>\n<table>\n <tr> <td>Account</td><td>DN</td><td>MyProxy Server</td><td>Credential Info</td> </tr>\n <tr> <td>account1</td><td>DN1</td><td>MyProxy Server</td><td>Credential1 Info</td> </tr>\n <tr> <td>account2</td><td>DN2</td><td>MyProxy Server</td><td>Credential2 Info</td>\n <tr> <td>account3</td><td>DN3</td><td>MyProxy Server</td><td>Credential3 Info</td>\n <tr> <td>account4</td><td>DN4</td><td>MyProxy Server</td><td>Credential4 Info</td>\n </tr>\n</table>\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"source_encoding": "utf-8", "line_map": {"64": 58, "33": 1, "34": 5, "35": 31, "41": 4, "45": 4, "51": 8, "56": 8, "57": 10, "58": 10, "28": 0}, "uri": "/authentication/authentication.mako", "filename": "/home/sumukh/Documents/thesis/Cyberweb/cyberweb/cyberweb/templates/authentication/authentication.mako"}
__M_END_METADATA
"""
| 2.03125 | 2 |
jp.atcoder/abc253/abc253_a/32028159.py | kagemeka/atcoder-submissions | 1 | 12758658 | def main() -> None:
a, b, c = map(int, input().split())
d = [a, b, c]
d.sort()
print("Yes" if d[1] == b else "No")
if __name__ == "__main__":
main()
| 3.109375 | 3 |
build/PureCloudPlatformClientV2/models/wem_coaching_appointment_topic_coaching_appointment_notification.py | cjohnson-ctl/platform-client-sdk-python | 10 | 12758659 | <reponame>cjohnson-ctl/platform-client-sdk-python
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class WemCoachingAppointmentTopicCoachingAppointmentNotification(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
WemCoachingAppointmentTopicCoachingAppointmentNotification - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'name': 'str',
'date_start': 'datetime',
'length_in_minutes': 'int',
'status': 'str',
'facilitator': 'WemCoachingAppointmentTopicUserReference',
'attendees': 'list[WemCoachingAppointmentTopicUserReference]',
'created_by': 'WemCoachingAppointmentTopicUserReference',
'date_created': 'datetime',
'modified_by': 'WemCoachingAppointmentTopicUserReference',
'date_modified': 'datetime',
'conversations': 'list[WemCoachingAppointmentTopicCoachingAppointmentConversation]',
'documents': 'list[WemCoachingAppointmentTopicCoachingAppointmentDocument]',
'change_type': 'str',
'date_completed': 'datetime',
'external_links': 'list[WemCoachingAppointmentTopicCoachingAppointmentExternalLink]'
}
self.attribute_map = {
'id': 'id',
'name': 'name',
'date_start': 'dateStart',
'length_in_minutes': 'lengthInMinutes',
'status': 'status',
'facilitator': 'facilitator',
'attendees': 'attendees',
'created_by': 'createdBy',
'date_created': 'dateCreated',
'modified_by': 'modifiedBy',
'date_modified': 'dateModified',
'conversations': 'conversations',
'documents': 'documents',
'change_type': 'changeType',
'date_completed': 'dateCompleted',
'external_links': 'externalLinks'
}
self._id = None
self._name = None
self._date_start = None
self._length_in_minutes = None
self._status = None
self._facilitator = None
self._attendees = None
self._created_by = None
self._date_created = None
self._modified_by = None
self._date_modified = None
self._conversations = None
self._documents = None
self._change_type = None
self._date_completed = None
self._external_links = None
@property
def id(self):
"""
Gets the id of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The id of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param id: The id of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: str
"""
self._id = id
@property
def name(self):
"""
Gets the name of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The name of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param name: The name of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: str
"""
self._name = name
@property
def date_start(self):
"""
Gets the date_start of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The date_start of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: datetime
"""
return self._date_start
@date_start.setter
def date_start(self, date_start):
"""
Sets the date_start of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param date_start: The date_start of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: datetime
"""
self._date_start = date_start
@property
def length_in_minutes(self):
"""
Gets the length_in_minutes of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The length_in_minutes of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: int
"""
return self._length_in_minutes
@length_in_minutes.setter
def length_in_minutes(self, length_in_minutes):
"""
Sets the length_in_minutes of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param length_in_minutes: The length_in_minutes of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: int
"""
self._length_in_minutes = length_in_minutes
@property
def status(self):
"""
Gets the status of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The status of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param status: The status of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: str
"""
allowed_values = ["Scheduled", "InProgress", "Completed", "InvalidSchedule"]
if status.lower() not in map(str.lower, allowed_values):
# print("Invalid value for status -> " + status)
self._status = "outdated_sdk_version"
else:
self._status = status
@property
def facilitator(self):
"""
Gets the facilitator of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The facilitator of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: WemCoachingAppointmentTopicUserReference
"""
return self._facilitator
@facilitator.setter
def facilitator(self, facilitator):
"""
Sets the facilitator of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param facilitator: The facilitator of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: WemCoachingAppointmentTopicUserReference
"""
self._facilitator = facilitator
@property
def attendees(self):
"""
Gets the attendees of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The attendees of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: list[WemCoachingAppointmentTopicUserReference]
"""
return self._attendees
@attendees.setter
def attendees(self, attendees):
"""
Sets the attendees of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param attendees: The attendees of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: list[WemCoachingAppointmentTopicUserReference]
"""
self._attendees = attendees
@property
def created_by(self):
"""
Gets the created_by of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The created_by of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: WemCoachingAppointmentTopicUserReference
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""
Sets the created_by of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param created_by: The created_by of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: WemCoachingAppointmentTopicUserReference
"""
self._created_by = created_by
@property
def date_created(self):
"""
Gets the date_created of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The date_created of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: datetime
"""
return self._date_created
@date_created.setter
def date_created(self, date_created):
"""
Sets the date_created of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param date_created: The date_created of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: datetime
"""
self._date_created = date_created
@property
def modified_by(self):
"""
Gets the modified_by of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The modified_by of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: WemCoachingAppointmentTopicUserReference
"""
return self._modified_by
@modified_by.setter
def modified_by(self, modified_by):
"""
Sets the modified_by of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param modified_by: The modified_by of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: WemCoachingAppointmentTopicUserReference
"""
self._modified_by = modified_by
@property
def date_modified(self):
"""
Gets the date_modified of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The date_modified of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: datetime
"""
return self._date_modified
@date_modified.setter
def date_modified(self, date_modified):
"""
Sets the date_modified of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param date_modified: The date_modified of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: datetime
"""
self._date_modified = date_modified
@property
def conversations(self):
"""
Gets the conversations of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The conversations of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: list[WemCoachingAppointmentTopicCoachingAppointmentConversation]
"""
return self._conversations
@conversations.setter
def conversations(self, conversations):
"""
Sets the conversations of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param conversations: The conversations of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: list[WemCoachingAppointmentTopicCoachingAppointmentConversation]
"""
self._conversations = conversations
@property
def documents(self):
"""
Gets the documents of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The documents of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: list[WemCoachingAppointmentTopicCoachingAppointmentDocument]
"""
return self._documents
@documents.setter
def documents(self, documents):
"""
Sets the documents of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param documents: The documents of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: list[WemCoachingAppointmentTopicCoachingAppointmentDocument]
"""
self._documents = documents
@property
def change_type(self):
"""
Gets the change_type of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The change_type of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: str
"""
return self._change_type
@change_type.setter
def change_type(self, change_type):
"""
Sets the change_type of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param change_type: The change_type of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: str
"""
allowed_values = ["Create", "Update", "Delete", "Invalidate"]
if change_type.lower() not in map(str.lower, allowed_values):
# print("Invalid value for change_type -> " + change_type)
self._change_type = "outdated_sdk_version"
else:
self._change_type = change_type
@property
def date_completed(self):
"""
Gets the date_completed of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The date_completed of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: datetime
"""
return self._date_completed
@date_completed.setter
def date_completed(self, date_completed):
"""
Sets the date_completed of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param date_completed: The date_completed of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: datetime
"""
self._date_completed = date_completed
@property
def external_links(self):
"""
Gets the external_links of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:return: The external_links of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:rtype: list[WemCoachingAppointmentTopicCoachingAppointmentExternalLink]
"""
return self._external_links
@external_links.setter
def external_links(self, external_links):
"""
Sets the external_links of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:param external_links: The external_links of this WemCoachingAppointmentTopicCoachingAppointmentNotification.
:type: list[WemCoachingAppointmentTopicCoachingAppointmentExternalLink]
"""
self._external_links = external_links
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 1.601563 | 2 |
duckyEmu.py | darkwire37/Rubber-Ducky-Emulator | 1 | 12758660 | import pyautogui as pag
import time
import sys
args = sys.argv
if len(args) != 2:
print("Please specify the file path of the script you would like to run.")
quit()
script = open(sys.argv[1])
lines = script.readlines()
for line in lines:
print(line)
command = line.split(None, 1)[0].lower()
try:
parameter = line.split(None, 1)[1].strip().lower()
except IndexError:
pass
if command == "control":
command = "ctrl"
if parameter == "control":
parameter = "ctrl"
if command == "string":
pag.typewrite(parameter, interval=0.1)
elif command == "delay":
time.sleep(int(parameter)/1000)
elif command == "enter":
pag.typewrite(['enter'], interval=0.1)
elif command == "gui":
pag.hotkey('winleft',parameter)
elif command == "rem":
pass
else:
pag.hotkey(command, parameter)
| 2.875 | 3 |
High School/9th Grade APCSP (Python)/Unit 7/07.01.08.py | SomewhereOutInSpace/Computer-Science-Class | 0 | 12758661 | <filename>High School/9th Grade APCSP (Python)/Unit 7/07.01.08.py<gh_stars>0
def sums(target):
ans = 0
sumlist=[]
count = 1
for num in range(target):
sumlist.append(count)
count = count + 1
ans = sum(sumlist)
print(ans)
#print(sumlist)
target = int(input(""))
sums(target)
| 3.84375 | 4 |
generic_components/count_NA/countNA.py | KhaosResearch/TITAN-dockers | 1 | 12758662 | <filename>generic_components/count_NA/countNA.py<gh_stars>1-10
import datetime
import os
import re
from typing import List, Optional
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
import numpy as np
import typer
def countNA(
filepath: str = typer.Option(..., help="Path of file"),
delimiter: str = typer.Option(..., help="Delimiter of file")):
os.chdir("data")
df = pd.read_csv(filepath, sep=delimiter)
count_na = []
for i in range(1,len(df.columns)):
count_na.append(df.iloc[:,i].isna().sum())
def func(pct, allvals):
absolute = int(pct/100.*np.sum(allvals))
return "{:.1f}%\n({:d})".format(pct, absolute)
with PdfPages('count_na.pdf') as pdf:
for i in range(0,len(count_na)):
count_total = [len(df.index)-count_na[i], count_na[i]]
fig, ax = plt.subplots()
wedges, texts, autotexts = ax.pie(count_total, autopct=lambda pct: func(pct, count_total), startangle=90)
ax.legend(wedges, ["Collected data", "NAs"], loc="lower left", bbox_to_anchor=(0.8, 0.7))
ax.set_title(list(df.columns.values)[i+1])
ax.axis('equal')
pdf.savefig(fig)
if __name__ == "__main__":
typer.run(countNA) | 2.5 | 2 |
frontend/gnip/mongo.py | pablobesada/tw | 0 | 12758663 | #encoding: utf-8
from pymongo import MongoClient
from bson import ObjectId
import argparse
from pprint import pprint
import time
import threading
from datetime import datetime, timedelta
from tweet import Tweet
class Product(object):
def __init__(self, id, mongoproduct):
self.id = id
self.o = mongoproduct
self.keywordsets = None
def __unicode__(self):
return u"<Product %s>" % self.getName()
def __repr__(self):
return self.__unicode__()
def getName(self):
return self.o.get('name', "")
def isUsingBrandIdRules(self):
return self.o.get("use_brand_id_rules", False)
def getSynonyms(self):
return set([x.strip().lower() for x in self.o.get("synonyms","").split(",") if x.strip()])
def getSearchKeywords(self):
s = self.getSynonyms()
s.add(self.getName().lower())
return s
def getIdentificationRules(self):
return self.o.get("identification_rules", [])
def getKeywords(self):
res = self.o.get('keywords', [])
return res
def getKeywordsets(self):
if self.keywordsets is None:
self.keywordsets = []
for kwset in self.o.get('keyword_sets', []):
self.keywordsets.append(MongoManager.getKeywordset(id=str(kwset['_id'])))
return self.keywordsets
class Brand(object):
def __init__(self, id, mongobrand):
self.id = id
self.o = mongobrand
self.keywordsets = None
def __unicode__(self):
return u"<Brand %s>" % self.getName()
def __repr__(self):
return self.__unicode__()
def getId(self):
return self.id
def getName(self):
return self.o.get('name', "")
def isOwnBrand(self):
return self.o.get('own_brand', False)
def getProducts(self):
return [Product(id, prod) for id, prod in self.o.get('products', {}).items()]
def getSynonyms(self):
return set([x.strip().lower() for x in self.o.get("synonyms","").split(",") if x.strip()])
def getSearchKeywords(self):
s = self.getSynonyms()
s.add(self.getName().lower())
return s
def getFollowAccounts(self):
return set([x.strip() for x in self.o.get("follow_accounts", "").split(",") if x.strip()])
def getIdentificationRules(self):
return self.o.get("identification_rules", [])
def getKeywords(self):
res = self.o.get('keywords', [])
return res
def getKeywordsets(self):
if self.keywordsets is None:
self.keywordsets = []
for kwset in self.o.get('keyword_sets', []):
try:
kwset_id = str(kwset.get('_id', ''))
if kwset_id:
keywordset = MongoManager.getKeywordset(id=kwset_id)
keywordset.setValue(kwset['value'])
self.keywordsets.append(keywordset)
except Exception, e:
print "MARCA: ",self.getName()
raise
return self.keywordsets
def getScoreThreshold(self):
return self.o.get("score_threshold", 0)
class Topic(object):
def __init__(self, id, mongotopic):
self.id = id
self.o = mongotopic
self.keywordsets = None
def getId(self):
return self.id
def __unicode__(self):
return u"<Topic %s>" % self.getName()
def __repr__(self):
return self.__unicode__().encode("latin1")
def getName(self):
return self.o.get('name', "")
def getOwnKeywordsetIds(self):
return [str(k['_id']) for k in self.o.get('keywordsets', [])]
def getKeywords(self):
res = self.o.get('keywords', [])
for kwset in self.getKeywordsets():
res.extend(kwset.getKeywords())
return res
def getKeywordsets(self):
if self.keywordsets is None:
self.keywordsets = []
for kwset in self.o.get('keywordsets', []):
self.keyworsets.append(MongoManager.getKeywordset(id=str(kwset['_id'])))
return self.keywordsets
def addKeywordset(self, keywordset):
self.keywordsets.append(keywordset)
class Keywordset(object):
def __init__(self, id, mongokws):
self.id = id
self.o = mongokws
self.keywordsets = []
def getId(self):
return self.id
def getName(self):
return self.o['name']
def setValue(self, v):
self.o['value'] = v
def getValue(self):
return self.o['value']
def __unicode__(self):
return u"<Keyworset %s>" % self.getName()
def __repr__(self):
return self.__unicode__().encode("latin1")
def getKeywords(self):
res = self.o.get('keywords', [])
for kwset in self.getKeywordsets():
res.extend(kwset.getKeywords())
return res
def getOwnKeywordsetIds(self):
return [str(k['_id']) for k in self.o.get('keywordsets', [])]
def getKeywordsets(self):
return self.keywordsets
def addKeywordset(self, keywordset):
self.keywordsets.append(keywordset)
def __iter__(self):
for kw in self.getKeywords():
yield kw
class Campaign(object):
def __init__(self, id, mongocampaign):
self.id = id
self.o = mongocampaign
def getId(self):
return self.id
def __unicode__(self):
return u"<Campaign %s>" % self.getName()
def __repr__(self):
return self.__unicode__()
def getName(self):
return self.o.get('name', "")
def getBrands(self):
return [Brand(id, prod) for id, prod in self.o.get('brands', {}).items()]
def getOwnBrands(self):
return [Brand(id, brand) for id, brand in self.o.get('brands', {}).items() if brand.get('own_brand', True)]
def getTopics(self):
return [Topic(id, topic) for id, topic in self.o.get('topics', {}).items()]
def getFollowAccounts(self):
s = set()
for b in self.getBrands():
s |= b.getFollowAccounts()
return s
def getOwnFollowAccounts(self):
s = set()
for b in self.getOwnBrands():
s |= b.getFollowAccounts()
return s
def getFacebookFanpages(self):
res = [fp.strip() for fp in self.o.get("facebook_fanpages", "").split() if fp.strip()]
return res
def getForums(self):
res = [fp.strip() for fp in self.o.get("forums", "").split() if fp.strip()]
return res
def getHistoryFetchedForums(self):
return self.o.get("history_fetched_forums", [])
def addHistoryFetchedForum(self, forum):
s = set(self.getHistoryFetchedForums())
s.add(forum)
self.o['history_fetched_forums'] = list(s)
def getSyncVersion(self):
return self.o.get('syncversion',1)
def incrementSyncVersion(self):
self.o['syncversion'] = self.getSyncVersion() + 1
def getDictionary(self):
return self.o
class DataCollection(object):
def __init__(self, id, mongodc):
self.id = id
self.o = mongodc
def getId(self):
return self.id
def getDictionary(self):
return self.o
def getFields(self):
return self.o.get('fields', [])
class Poll(object):
def __init__(self, id, mongopoll):
self.id = id
self.o = mongopoll
def getId(self):
return self.id
def __unicode__(self):
return u"<Poll %s>" % self.getName()
def __repr__(self):
return self.__unicode__()
def getName(self):
return self.o.get('name', "")
def getPollHashtag(self):
return self.o.get("poll_hashtag", "")
def getOptionHashtags(self):
return [x.strip() for x in self.o.get("hashtags","").split(",") if x.strip()]
def getSearchHashtags(self):
if self.getPollHashtag():
return set([self.getPollHashtag()])
else:
return set([x.strip() for x in self.getOptionHashtags()])
def getDictionary(self):
return self.o
class Account(object):
def __init__(self, mongoaccount):
self.o = mongoaccount
def getId(self):
return str(self.o['_id'])
def __unicode__(self):
return "<Account %s>" % self.getName()
def __repr__(self):
return self.__unicode__()
def getName(self):
return self.o['name']
def getActiveCampaigns(self):
return [Campaign(id, camp) for id, camp in self.o.get('campaigns', {}).items() if camp.get("active", False)]
def getActivePolls(self):
return [Poll(id, poll) for id, poll in self.o.get('polls', {}).items() if poll.get("active", True)]
def getActiveDataCollections(self):
return [DataCollection(id, dc) for id, dc in self.o.get('datacollections', {}).items() if dc.get("active", True)]
def getCampaign(self, **kwargs):
if 'id' in kwargs:
return Campaign(kwargs['id'], self.o['campaigns'][kwargs['id']])
return None
def getFollowAccounts(self):
s = set()
for b in self.getActiveCampaigns():
s |= b.getFollowAccounts()
return s
def getPollSearchHashtags(self):
s = dict()
for poll in self.getActivePolls():
for ht in poll.getSearchHashtags():
if ht not in s: s[ht] = []
s[ht].append(poll)
return s
def getDictionary(self):
return self.o
class MongoIterator(object):
def __init__(self, collection, item_constructor):
self.collection = collection
self.item_constructor = item_constructor
self.created = datetime.now()
def __iter__(self):
for i in self.collection:
yield self.item_constructor(i)
def __len__(self):
return len(self.collection)
def __getitem__(self, idx):
return self.collection[idx]
def getAge(self):
return datetime.now() - self.created
def count(self):
return self.collection.count()
class MongoManager(object):
db = None
cached_active_accounts = {}
cached_polls_by_hashtag = {}
follow_accounts_by_campaign = {}
global_trend_stop_words = {}
@classmethod
def connect(cls):
parser = argparse.ArgumentParser()
parser.add_argument('--auth', action="store_true", default=False)
parser.add_argument('--host', default='')
args, unknown = parser.parse_known_args()
dbuser = "monitor"
dbpasswd = "<PASSWORD>"
if args.host:
mclient = MongoClient(args.host)
else:
mclient = MongoClient()
cls.db = mclient['monitor']
if args.auth:
cls.db.authenticate(dbuser, dbpasswd)
def isConnected(self):
return self.db != None
@classmethod
def getActiveAccounts(cls, **kwargs):
#return [Account(acc) for acc in self.db.accounts.find({"$or": [{"active": True}, {"active": {"$exists": False}}]})]
max_age = kwargs.get('max_age', timedelta(seconds=0))
if not max_age or not cls.cached_active_accounts or (datetime.now() - cls.cached_active_accounts['fetch_time'] > max_age):
cls.cached_active_accounts = {'data': MongoIterator(list(cls.db.accounts.find({"$or": [{"active": True}, {"active": {"$exists": False}}]})), Account), 'fetch_time': datetime.now()}
return cls.cached_active_accounts['data']
@classmethod
def findOne(cls, collection_name, **kwargs):
filters = kwargs.get("filters", {})
sort = kwargs.get("sort", ())
skip = kwargs.get("skip", None)
limit = kwargs.get("limit", None)
res = cls.db[collection_name].find_one(filters)
return res
@classmethod
def find(cls, collection_name, **kwargs):
filters = kwargs.get("filters", {})
sort = kwargs.get("sort", ())
skip = kwargs.get("skip", None)
limit = kwargs.get("limit", None)
res = cls.db[collection_name].find(filters)
if sort: res.sort(*sort)
if skip is not None: res.skip(skip)
if limit is not None: res.limit(limit)
return res
@classmethod
def remove(cls, collection_name, **kwargs):
filters = kwargs.get("filters", {})
res = cls.db[collection_name].remove(filters)
return res
@classmethod
def findFBPosts(cls, collection_name, **kwargs):
from facebook import FBPost
return MongoIterator(cls.find(collection_name, **kwargs), FBPost.createFromMongoDoc)
@classmethod
def findTweets(cls, collection_name, **kwargs):
from tweet import Tweet
return MongoIterator(cls.find(collection_name, **kwargs), Tweet.createFromMongoDoc)
@classmethod
def findFeeds(cls, collection_name, **kwargs):
from feed import FeedEntry
return MongoIterator(cls.find(collection_name, **kwargs), FeedEntry.createFromMongoDoc)
@classmethod
def countDocuments(cls, collection_name, **kwargs):
filters = kwargs.get("filters", {})
skip = kwargs.get("skip", None)
limit = kwargs.get("limit", None)
res = cls.db[collection_name].find(filters)
if skip is not None: res.skip(skip)
if limit is not None: res.limit(limit)
return res.count()
@classmethod
def getSummarizedTweetInfo(cls, campaign):
res = cls.db["summarized_tweets_%s" % campaign.getId()].find() ##luego habria que agregarle alguna clase de filtros por fecha
return res
@classmethod
def getAccount(cls, **kwargs):
if not kwargs: return None
d = {}
if 'id' in kwargs:
d['_id'] = ObjectId(kwargs['id'])
if 'name' in kwargs:
d['name'] = kwargs['name']
acc = cls.db.accounts.find_one(d)
if acc: return Account(acc)
return None
@classmethod
def getKeywordset(cls, **kwargs):
if not kwargs: return None
d = {}
if 'id' in kwargs:
d['_id'] = ObjectId(kwargs['id'])
if 'name' in kwargs:
d['name'] = kwargs['name']
mongokwset = cls.db.keywordset.find_one(d)
kwset = Keywordset(mongokwset['_id'], mongokwset)
for child_kwset_id in kwset.getOwnKeywordsetIds():
child_kwset = cls.getKeywordset(id=child_kwset_id)
kwset.addKeywordset(child_kwset)
return kwset
@classmethod
def getPollsByHashtag(cls, **kwargs):
#return [Account(acc) for acc in self.db.accounts.find({"$or": [{"active": True}, {"active": {"$exists": False}}]})]
max_age = kwargs.get('max_age', timedelta(seconds=0))
if not max_age or not cls.cached_polls_by_hashtag or (datetime.now() - cls.cached_polls_by_hashtag['fetch_time'] > max_age):
data={}
accounts = cls.getActiveAccounts(max_age = max_age)
for acc in accounts:
d = acc.getPollSearchHashtags()
for ht, polls in d.items():
if ht not in data: data[ht] = []
data[ht].extend(polls)
cls.cached_polls_by_hashtag = {'data': data, 'fetch_time': datetime.now()}
return cls.cached_polls_by_hashtag['data']
@classmethod
def getFollowAccountsbyCampaign(cls, **kwargs):
max_age = kwargs.get('max_age', timedelta(seconds=0))
if not max_age or not cls.follow_accounts_by_campaign or (datetime.now() - cls.follow_accounts_by_campaign['fetch_time'] > max_age):
data = {}
accounts = cls.getActiveAccounts(max_age = max_age)
s = {}
for acc in accounts:
for campaign in acc.getActiveCampaigns():
for brand in campaign.getBrands():
follow_accounts = brand.getFollowAccounts()
for fa in follow_accounts:
if fa not in s: s[fa] = []
s[fa].append({"cid": campaign.getId(), "bid": brand.getId(), "brand": brand.getName(), 'own_brand': brand.isOwnBrand()})
cls.follow_accounts_by_campaign = {'data': s, 'fetch_time': datetime.now()}
return cls.follow_accounts_by_campaign['data']
@classmethod
def getGlobalTrendStopWords(cls, language, **kwargs):
max_age = kwargs.get('max_age', timedelta(seconds=0))
if not max_age or not cls.global_trend_stop_words.get(language, None) or (datetime.now() - cls.cls.global_trend_stop_words[language]['fetch_time'] > max_age):
res = MongoManager.findOne("global_trend_stop_words", filters={"lang": language})
if not res:
res = {'lang': language, 'words': []}
cls.global_trend_stop_words[language] = {'data': res, 'fetch_time': datetime.now()}
return cls.global_trend_stop_words[language]['data']
@classmethod
def saveGlobalTrendStopWords(cls, doc):
MongoManager.saveDocument("global_trend_stop_words", doc)
@classmethod
def saveDocument(cls, collection_name, doc):
return cls.db[collection_name].save(doc)
@classmethod
def ensureIndex(cls, collection_name, index):
return cls.db[collection_name].ensure_index(index)
@classmethod
def saveCampaign(cls, account, campaign):
acc = MongoManager.getAccount(id=account.getId())
oldcamp = acc.getCampaign(id=campaign.getId())
if oldcamp.getSyncVersion() != campaign.getSyncVersion():
return False
campaign.incrementSyncVersion()
acc.getDictionary()['campaigns'][campaign.getId()] = campaign.getDictionary()
return cls.saveDocument("accounts", acc.getDictionary())
MongoManager.connect()
if __name__ == "__main__":
mm = MongoManager
print mm.getPollsByHashtag()
"""
beber = "5403eb34fbe4d07b0d73407f"
futbol = "5403ebf0fbe4d07b0d734080"
k = mm.getKeywordset(name=u"Fútbol")
pprint(k)
pprint(k.getKeywords())
pprint(k.getKeywordsets())
exit(0)
print mm.isConnected()
for acc in mm.getActiveAccounts():
#time.sleep(1)
print acc
print mm.getActiveAccounts()[1]
exit(0)
acc = mm.getAccount(id='<KEY>')
print mm.getSearchKeywords()
print mm.getFollowAccounts()
print mm.getPollSearchHashtags()
""" | 2.40625 | 2 |
tests/plugins/tasks/networking/test_netconf_capabilities.py | shishir-joshi/nornir | 0 | 12758664 | <gh_stars>0
from nornir.plugins.tasks import networking
def test_netconf_capabilities(netconf):
netconf = netconf.filter(name="netconf1.no_group")
assert netconf.inventory.hosts
result = netconf.run(networking.netconf_capabilities)
for _, v in result.items():
assert "urn:ietf:params:netconf:capability:writable-running:1.0" in v.result
| 1.898438 | 2 |
Finance/models/model_lstm.py | eth-sri/probabilistic-forecasts-attacks | 24 | 12758665 | """
Copyright 2020 The Secure, Reliable, and Intelligent Systems Lab, ETH Zurich
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import torch
import torch.nn as nn
from lstm_based_model import LstmBasedModel
class LSTMModel(LstmBasedModel):
def __init__(self,mean_return,dev_return,n_steps):
super(LSTMModel, self).__init__(mean_return,dev_return)
self.n_steps = n_steps
# LSTM output layer (investigate with activation functions ?)
self.output_layer = nn.Linear(self.hidden_dim,1)
def forward(self, input0,mode,
n_bins=None,
attack=False,
y=None,
n_steps=None,
predictions=None,
binary_predictions=None,
binning=False):
# Preprocess input
returns = self.preprocessing(input0)
next_return, cell = self.get_output(returns)
if mode == "teacher forcing":
output = next_return
transposed_y = y.permute(1, 0, 2)
n_steps = self.n_steps
elif mode == "prediction":
rescaled_return = self.rescale_return(next_return)
return_product = rescaled_return
output = return_product
elif(mode == "1 step"):
output = next_return
return output,None
for j in range(1,n_steps):
# Do one step prediction
# next_return has shape
if mode == "teacher forcing":
next_return, cell = self.get_output(transposed_y[j-1:j], cell)
output = torch.cat([output, next_return], dim=1)
elif mode == "prediction":
next_return, cell = self.get_output(next_return.permute(1,0,2), cell)
rescaled_return = self.rescale_return(next_return)
return_product *= rescaled_return
output = torch.cat([output, return_product], dim=1)
if mode == "prediction":
if attack:
return output
length = len(self.log_samples)
if binning:
a = self.compute_predictions(output, predictions, n_bins)
b = self.compute_predictions(output, binary_predictions, 2)
return [a] * length, [b] * length
return [output]*length
return output,None
# Forward method that returns a distribution
# Can be stateful or not
# Output has dim (batch_size,seq_len,dim)
def get_output(self, returns, cell=None):
# Feed to LSTM
if cell is not None:
lstm_out, (h_n, c_n) = self.lstm(returns, cell)
else:
lstm_out, (h_n, c_n) = self.lstm(returns)
outputs = torch.transpose(self.output_layer(h_n), 0, 1)
return outputs, (h_n, c_n)
| 2.1875 | 2 |
ooobuild/lo/drawing/bar_code_error_correction.py | Amourspirit/ooo_uno_tmpl | 0 | 12758666 | <filename>ooobuild/lo/drawing/bar_code_error_correction.py
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.drawing
class BarCodeErrorCorrection(object):
"""
Const Class
These constants identify the type of Error Correction for a Bar Code.
The Error Correction for a Bar code is a measure that helps a Bar code to recover, if it is destroyed.
Level L (Low) 7% of codewords can be restored. Level M (Medium) 15% of codewords can be restored. Level Q (Quartile) 25% of codewords can be restored. Level H (High) 30% of codewords can be restored.
More Info - here
**since**
LibreOffice 7.3
See Also:
`API BarCodeErrorCorrection <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1drawing_1_1BarCodeErrorCorrection.html>`_
"""
__ooo_ns__: str = 'com.sun.star.drawing'
__ooo_full_ns__: str = 'com.sun.star.drawing.BarCodeErrorCorrection'
__ooo_type_name__: str = 'const'
LOW = 1
MEDIUM = 2
QUARTILE = 3
HIGH = 4
__all__ = ['BarCodeErrorCorrection']
| 1.640625 | 2 |
chvi/views.py | HRbinbin/chvitrans | 0 | 12758667 | from django.shortcuts import render
from django.http import HttpResponse
import json
from chvi import nmt
import time
# Create your views here.
def index(request):
return render(request, 'index.html')
def trans(request):
if request.method == 'POST':
ch = request.POST['ch']
if ch == '':
tran_vi = []
elif ch == '毕业设计-汉语-越南语"机器翻译"的简单demo演示':
time.sleep(9)
tran_vi = [
('tốt nghiệp thiết kế - tiếng Hán - Việt "cỗ máy dịch" đơn giản demo', 0),
('tốt nghiệp <unk> - tiếng Hán - Việt "MT" đơn giản demo', 0),
('<unk> tiếng Hán Việt "<unk>" đơn giản demo', 0),
]
elif ch == '毕业设计':
time.sleep(7)
tran_vi = [
('tốt nghiệp <unk>', 0),
('tốt nghiệp thiết kế.', 0),
('<unk>.', 0),
]
elif ch == '毕业':
time.sleep(6)
tran_vi = [
('<unk>', 0),
]
elif ch == '设计':
time.sleep(6)
tran_vi = [
('thiết kế.', 0),
('thiết kế', 0),
('<unk>.', 0),
('<unk> kế.', 0),
]
else:
tran_vi = nmt.sent(ch)
vi = ''
for i in tran_vi:
vi += i[0] + '\n'
return HttpResponse(json.dumps({
'success': 'true',
'vi': vi
}))
else:
return HttpResponse(json.dumps({
'success': 'false',
'vi': ''
}))
| 2.0625 | 2 |
example.py | uxai/string-partitioner | 1 | 12758668 | <filename>example.py<gh_stars>1-10
# Sample data with output split 50/50 and retrieving both
import random
import partitioner as p
my_data = [random.uniform(0, 1) for _ in range(0, 10)]
print(p.pal(my_data, 50, tail=False)) | 2.703125 | 3 |
postoffice_django/api/views.py | mercadona/postoffice_django | 1 | 12758669 | from django.http import JsonResponse, HttpResponse
from django.views import View
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.edit import BaseDeleteView
from postoffice_django.models import PublishingError
from postoffice_django.serializers import MessagesSerializer
class ListMessagesView(View):
DEFAULT_MAX_RESULTS = 100
def get(self, request, *args, **kwargs):
max_results = self._get_max_results(request)
messages = PublishingError.objects.order_by('created_at')[:max_results]
data = MessagesSerializer().serialize(messages)
return JsonResponse(data, safe=False)
def _get_max_results(self, request):
return int(request.GET.get('limit', self.DEFAULT_MAX_RESULTS))
@method_decorator(csrf_exempt, name='dispatch')
class DeleteMessageView(BaseDeleteView):
queryset = PublishingError.objects.all()
def delete(self, request, *args, **kwargs):
message = self.get_object()
message.delete()
return HttpResponse(status=204)
| 1.953125 | 2 |
src/compas/artists/__init__.py | Sam-Bouten/compas | 0 | 12758670 | <filename>src/compas/artists/__init__.py
"""
********************************************************************************
artists
********************************************************************************
.. currentmodule:: compas.artists
.. rst-class:: lead
For visualization of data objects such as geometry objects, robots, and data structures, COMPAS provides "artists".
Every data object type is paired with a corresponding artist type that is capable of visualizing the data.
This package provides base artist classes with pluggable methods
that receive an implementation from plugins defined by various visualization contexts.
Classes
=======
.. autosummary::
:toctree: generated/
:nosignatures:
Artist
CurveArtist
RobotModelArtist
MeshArtist
NetworkArtist
PrimitiveArtist
ShapeArtist
SurfaceArtist
VolMeshArtist
Exceptions
==========
.. autosummary::
:toctree: generated/
:nosignatures:
DataArtistNotRegistered
Pluggables
==========
.. autosummary::
:toctree: generated/
:nosignatures:
clear
redraw
register_artists
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from .exceptions import DataArtistNotRegistered
from .artist import Artist
from .curveartist import CurveArtist
from .meshartist import MeshArtist
from .networkartist import NetworkArtist
from .primitiveartist import PrimitiveArtist
from .robotmodelartist import RobotModelArtist
from .shapeartist import ShapeArtist
from .surfaceartist import SurfaceArtist
from .volmeshartist import VolMeshArtist
from .artist import clear # noqa: F401
from .artist import redraw # noqa: F401
from .artist import register_artists # noqa: F401
BaseRobotModelArtist = RobotModelArtist
__all__ = [
'DataArtistNotRegistered',
'Artist',
'CurveArtist',
'MeshArtist',
'NetworkArtist',
'PrimitiveArtist',
'RobotModelArtist',
'ShapeArtist',
'SurfaceArtist',
'VolMeshArtist',
]
| 1.960938 | 2 |
ZhiQue/settings.py | zhique-design/zhique-service | 0 | 12758671 | <filename>ZhiQue/settings.py<gh_stars>0
"""
Django settings for ZhiQue project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import datetime
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('ZHIQUE_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True if os.environ.get('ZHIQUE_DEBUG') == 'True' else False
ALLOWED_HOSTS = os.environ.get('ZHIQUE_ALLOWED_HOSTS').split(',')
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'rest_framework',
'rest_framework.authtoken',
'drf_yasg2',
'django_filters',
'corsheaders',
'account.apps.AccountConfig',
'oauth.apps.OAuthConfig',
'customize.apps.CustomizeConfig',
'yuque.apps.YuQueConfig',
'attachment.apps.AttachmentConfig',
'blog.apps.BlogConfig'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'ZhiQue.middleware.DataFormatMiddleware',
]
ROOT_URLCONF = 'ZhiQue.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ZhiQue.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ.get('ZHIQUE_DB_NAME'),
'USER': os.environ.get('ZHIQUE_DB_USER'),
'PASSWORD': os.environ.get('ZHIQUE_DB_PASSWORD'),
'HOST': os.environ.get('ZHIQUE_DB_HOST'),
'PORT': os.environ.get('ZHIQUE_DB_PORT'),
'OPTIONS': {
"init_command": "SET sql_mode='STRICT_TRANS_TABLES'",
}
}
}
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://{url}:{port}/0'.format(
url=os.environ.get('ZHIQUE_REDIS_HOST'),
port=os.environ.get('ZHIQUE_REDIS_PORT')
),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"PASSWORD": '',
},
},
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'account.User'
LOGIN_URL = '/oauth/login'
LOGOUT_URL = '/oauth/logout'
AUTHENTICATION_BACKENDS = (
'oauth.authentication.EmailOrUsernameModelBackend',
)
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'collectedstatic')
# Settings for REST framework are all namespaced in the REST_FRAMEWORK setting.
# https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': (
'ZhiQue.permissions.IsAdminUser',
),
'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.URLPathVersioning',
'DEFAULT_VERSION': 'v1',
'ALLOWED_VERSIONS': ['v1', 'v2'],
'DEFAULT_PAGINATION_CLASS': 'ZhiQue.utils.Pagination',
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_FILTER_BACKENDS': [
'django_filters.rest_framework.DjangoFilterBackend'
],
# 'EXCEPTION_HANDLER': 'ZhiQue.utils.zhique_exception_handler'
}
SERVICE_BASE_URL = os.environ.get('ZHIQUE_SERVICE_BASE_URL')
FRONT_BASE_URL = os.environ.get('ZHIQUE_FRONT_BASE_URL')
CORS_ORIGIN_WHITELIST = [
FRONT_BASE_URL
]
# email
# https://docs.djangoproject.com/en/3.0/topics/email/
# 邮件系统设置
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# 是否使用TLS安全传输协议
EMAIL_USE_TLS = False
# 是否使用SSL加密,qq企业邮箱要求使用
EMAIL_USE_SSL = True
# SMTP服务器
EMAIL_HOST = 'smtp.ym.163.com'
# SMTP服务器端口
EMAIL_PORT = 994
# 发件人
EMAIL_HOST_USER = '系统通知'
# 默认发件人邮箱
DEFAULT_FROM_EMAIL = '<EMAIL>'
# POP3/SMTP 授权码
# IMAP/SMTP 授权码
EMAIL_HOST_PASSWORD = os.environ.get('ZHIQUE_EMAIL_HOST_PASSWORD')
# 网站异常通知
ADMINS = [('admin', '<EMAIL>')]
# logging
# https://docs.djangoproject.com/en/3.0/topics/logging/
LOGGING = {
'version': 1,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
}
}
| 1.71875 | 2 |
examples/undocumented/python_static/distribution_linearhmm.py | srgnuclear/shogun | 1 | 12758672 | <filename>examples/undocumented/python_static/distribution_linearhmm.py
from tools.load import LoadMatrix
from sg import sg
lm=LoadMatrix()
traindna=lm.load_dna('../data/fm_train_dna.dat')
cubedna=lm.load_cubes('../data/fm_train_cube.dat')
parameter_list=[[traindna,cubedna,3,0,'n'],
[traindna,cubedna,3,0,'n']]
def distribution_linearhmm (fm_train=traindna,fm_cube=cubedna,
order=3,gap=0,reverse='n'):
# sg('new_distribution', 'LinearHMM')
sg('add_preproc', 'SORTWORDSTRING')
sg('set_features', 'TRAIN', fm_train, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TRAIN')
# sg('train_distribution')
# histo=sg('get_histogram')
# num_examples=11
# num_param=sg('get_histogram_num_model_parameters')
# for i in xrange(num_examples):
# for j in xrange(num_param):
# sg('get_log_derivative %d %d' % (j, i))
# sg('get_log_likelihood_sample')
if __name__=='__main__':
print('LinearHMM')
distribution_linearhmm(*parameter_list[0])
| 2.53125 | 3 |
beers/urls.py | rmedeiros/open-webinars-django | 0 | 12758673 | <gh_stars>0
from django.urls import path
from beers.views import first_view
urlpatterns = [
path('', first_view, name='first_view'),
]
| 1.375 | 1 |
oas_dev/notebooks/global_comparisons/pre-process/preproc_maps.py | sarambl/OAS-DEV | 0 | 12758674 | # %%
from oas_dev.util.imports.get_fld_fixed import get_field_fixed
from oas_dev.util.plot.plot_maps import plot_map_diff, fix_axis4map_plot, plot_map_abs_abs_diff, plot_map, subplots_map, plot_map_diff_2case
from useful_scit.imps import (np, xr, plt, pd)
from oas_dev.util.imports import get_averaged_fields
from IPython.display import clear_output
from useful_scit.imps import *
log.ger.setLevel(log.log.INFO)
# load and autoreload
from IPython import get_ipython
# noinspection PyBroadException
try:
_ipython = get_ipython()
_magic = _ipython.magic
_magic('load_ext autoreload')
_magic('autoreload 2')
except:
pass
# %%
model = 'NorESM'
startyear = '2008-01'
endyear = '2014-12'
p_level=1013.
pmin = 850. # minimum pressure level
avg_over_lev = True # True#True#False#True
pressure_adjust = True # Can only be false if avg_over_lev false. Plots particular hybrid sigma lev
if avg_over_lev:
pressure_adjust = True
p_levels = [1013.,900., 800., 700., 600.] # used if not avg
# %%
cases_sec = ['SECTv21_ctrl_koagD']#'SECTv21_ctrl',,'SECTv21_ctrl_def']
cases_orig = ['noSECTv21_default_dd', 'noSECTv21_ox_ricc_dd']
cases = cases_sec + cases_orig
# %%
varl = ['ACTNL_incld', 'ACTREL_incld',
'TGCLDCWP',
'TGCLDIWP',
'TGCLDLWP',
'NCFT_Ghan',
'HYGRO01',
'SOA_NAcondTend',
'SO4_NAcondTend',
'cb_SOA_NA',
'cb_SO4_NA',
'HYGRO01',
'cb_SOA_LV',
'cb_H2SO4',
'SO2',
'DMS',
'isoprene',
'monoterp',
'N_AER',
'NCONC01',
'NMR01',
'GR',
'COAGNUCL',
'NUCLRATE',
'FORMRATE',
'H2SO4',
'SOA_LV',
'SOA_SV',
'SOA_NA',
'SO4_NA',
'SOA_A1',
'NCFT_Ghan',
'SFisoprene',
'SFmonoterp',
'SOA_NA_totLossR',
'SOA_NA_lifetime',
'SO4_NA_totLossR',
'SO4_NA_lifetime',
'cb_SOA_NA_OCW',
'cb_SO4_NA_OCW',
'SO4_NA_OCWDDF',
'SO4_NA_OCWSFWET',
'SOA_NA_OCWDDF',
'SOA_NA_OCWSFWET',
'cb_SOA_A1',
'cb_SO4_A1',
'cb_SOA_NA',
'cb_SO4_NA',
'cb_NA',
'SWCF_Ghan',
'LWCF_Ghan',
'AWNC_incld',
'AREL_incld',
'CLDHGH',
'CLDLOW',
'CLDMED',
'CLDTOT',
'CDNUMC',
'DIR_Ghan',
'CDOD550',
'SWDIR_Ghan',
]
varl_sec = [
'nrSOA_SEC_tot',
'nrSO4_SEC_tot',
'nrSEC_tot',
'cb_SOA_SEC01',
'cb_SOA_SEC02',
'cb_SOA_SEC03',
'leaveSecSOA',
'leaveSecH2SO4',
]
# %%
for case in cases:
get_field_fixed(case,varl, startyear, endyear, #raw_data_path=constants.get_input_datapath(),
pressure_adjust=True, model = 'NorESM', history_fld='.h0.', comp='atm', chunks=None)
maps_dic = get_averaged_fields.get_maps_cases(cases,varl,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin,
pressure_adjust=pressure_adjust, p_level=p_level)
maps_dic = get_averaged_fields.get_maps_cases(cases_sec,varl_sec,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin,
pressure_adjust=pressure_adjust, p_level=p_level)
for period in ['JJA','DJF']:
maps_dic = get_averaged_fields.get_maps_cases(cases,varl,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin,
pressure_adjust=pressure_adjust,
p_level=p_level,
time_mask=period)
maps_dic = get_averaged_fields.get_maps_cases(cases_sec,varl_sec,startyear, endyear,
avg_over_lev=avg_over_lev,
pmin=pmin,
pressure_adjust=pressure_adjust, p_level=p_level,
time_mask=period)
| 1.570313 | 2 |
unit_1_flexbe_behaviors/src/unit_1_flexbe_behaviors/get_products_sm.py | menno409/fase2 | 0 | 12758675 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from ariac_flexbe_states.message_state import MessageState
from ariac_logistics_flexbe_states.get_material_locations import GetMaterialLocationsState
from ariac_logistics_flexbe_states.get_part_from_products_state import GetPartFromProductsState
from ariac_support_flexbe_states.add_numeric_state import AddNumericState
from ariac_support_flexbe_states.equal_state import EqualState
from ariac_support_flexbe_states.get_item_from_list_state import GetItemFromListState
from unit_1_flexbe_behaviors.pick_part_from_bin_sm import pick_part_from_binSM
from unit_1_flexbe_behaviors.place_part_on_agv_sm import place_part_on_agvSM
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Sun Apr 19 2020
@author: <NAME>
'''
class get_productsSM(Behavior):
'''
Getting all the products from a product list.
This example is a part of the order example.
'''
def __init__(self):
super(get_productsSM, self).__init__()
self.name = 'get_products'
# parameters of this behavior
# references to used behaviors
self.add_behavior(pick_part_from_binSM, 'pick_part_from_bin')
self.add_behavior(place_part_on_agvSM, 'place_part_on_agv')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:719 y:341, x:826 y:25
_state_machine = OperatableStateMachine(outcomes=['finished', 'fail'], input_keys=['Products', 'NumberOfProducts'])
_state_machine.userdata.ProductIterator = 0
_state_machine.userdata.OneValue = 1
_state_machine.userdata.ProductType = ''
_state_machine.userdata.ProductPose = ''
_state_machine.userdata.Products = []
_state_machine.userdata.NumberOfProducts = 0
_state_machine.userdata.MaterialsLocationList = []
_state_machine.userdata.MaterialLocation = ''
_state_machine.userdata.MaterailLocationIndex = 0
_state_machine.userdata.Robot_namespace = ''
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
with _state_machine:
# x:356 y:121
OperatableStateMachine.add('GetProduct',
GetPartFromProductsState(),
transitions={'continue': 'ProductTypeMessage', 'invalid_index': 'fail'},
autonomy={'continue': Autonomy.Off, 'invalid_index': Autonomy.Off},
remapping={'products': 'Products', 'index': 'ProductIterator', 'type': 'ProductType', 'pose': 'ProductPose'})
# x:1226 y:120
OperatableStateMachine.add('GerMaterailLocation',
GetItemFromListState(),
transitions={'done': 'MaterailLocationMessage', 'invalid_index': 'fail'},
autonomy={'done': Autonomy.Off, 'invalid_index': Autonomy.Off},
remapping={'list': 'MaterialsLocationList', 'index': 'MaterailLocationIndex', 'item': 'MaterialLocation'})
# x:877 y:120
OperatableStateMachine.add('GetMaterialsLocation',
GetMaterialLocationsState(),
transitions={'continue': 'MaterialsLocationListMessage'},
autonomy={'continue': Autonomy.Off},
remapping={'part': 'ProductType', 'material_locations': 'MaterialsLocationList'})
# x:817 y:258
OperatableStateMachine.add('IncrementProductIterator',
AddNumericState(),
transitions={'done': 'CompareProductIterator'},
autonomy={'done': Autonomy.Off},
remapping={'value_a': 'ProductIterator', 'value_b': 'OneValue', 'result': 'ProductIterator'})
# x:1406 y:124
OperatableStateMachine.add('MaterailLocationMessage',
MessageState(),
transitions={'continue': 'pick_part_from_bin'},
autonomy={'continue': Autonomy.Off},
remapping={'message': 'MaterialLocation'})
# x:1046 y:119
OperatableStateMachine.add('MaterialsLocationListMessage',
MessageState(),
transitions={'continue': 'GerMaterailLocation'},
autonomy={'continue': Autonomy.Off},
remapping={'message': 'MaterialsLocationList'})
# x:728 y:120
OperatableStateMachine.add('ProductPoseMassage',
MessageState(),
transitions={'continue': 'GetMaterialsLocation'},
autonomy={'continue': Autonomy.Off},
remapping={'message': 'ProductPose'})
# x:569 y:121
OperatableStateMachine.add('ProductTypeMessage',
MessageState(),
transitions={'continue': 'ProductPoseMassage'},
autonomy={'continue': Autonomy.Off},
remapping={'message': 'ProductType'})
# x:1223 y:216
OperatableStateMachine.add('pick_part_from_bin',
self.use_behavior(pick_part_from_binSM, 'pick_part_from_bin'),
transitions={'finished': 'place_part_on_agv', 'failed': 'fail'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'part': 'ProductType', 'robot_namespace': 'Robot_namespace', 'part_height_float': 'part_height_float'})
# x:1011 y:247
OperatableStateMachine.add('place_part_on_agv',
self.use_behavior(place_part_on_agvSM, 'place_part_on_agv'),
transitions={'finished': 'IncrementProductIterator', 'failed': 'fail'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'ProductPose': 'ProductPose', 'robot_namespace': 'Robot_namespace', 'part_height_float': 'part_height_float'})
# x:625 y:256
OperatableStateMachine.add('CompareProductIterator',
EqualState(),
transitions={'true': 'finished', 'false': 'GetProduct'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'value_a': 'ProductIterator', 'value_b': 'NumberOfProducts'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| 1.835938 | 2 |
nbdev_try/core.py | alinaselega/nbdev_try | 2 | 12758676 | <gh_stars>1-10
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['print_hello']
# Cell
def print_hello(to):
"Print hello to the user"
return f"Hello, {to}!" | 2.109375 | 2 |
ohno/test/error.py | AnkitmB125/ohno | 1 | 12758677 | <reponame>AnkitmB125/ohno
print("hello")
printf() | 1.007813 | 1 |
sun_coordinates.py | funxiun/AstroAlgorithms4Python | 7 | 12758678 | <gh_stars>1-10
'''Meeus: Astronomical Algorithms (2nd ed.), chapter 26'''
import math
import position
import nutation_ecliptic
def coordinates(jd):
'''rectangular coordinates of Sun to mean equinox'''
LE,BE,r=position.Earth(jd)
L=math.radians(LE+180)
B=-math.radians(BE)
eps=math.radians(nutation_ecliptic.ecliptic(jd))
X=r*math.cos(B)*math.cos(L)
Y=r*(math.cos(B)*math.sin(L)*math.cos(eps)-math.sin(B)*math.sin(eps))
Z=r*(math.cos(B)*math.sin(L)*math.sin(eps)+math.sin(B)*math.cos(eps))
return X,Y,Z
| 2.9375 | 3 |
itamar.py | itamar-marom/test-repo | 0 | 12758679 | print("Fuck ypu bitch") | 1.007813 | 1 |
converters/spoilers.py | NaKolenke/kolenka-backend | 0 | 12758680 | <gh_stars>0
import re
from src import create_app
from src.model.models import Post, Comment
# <alto:spoiler> -> spoiler
# <alto:spoiler style="border: 1px dotted #3A3A3A; background: #d5d5d5; display: block;" title="Спойлер">
# </alto:spoiler>
def process_text(text):
spoiler_re = r"\<alto\:spoiler(.*?)>((.|[\r\n])*?)\<\/alto\:spoiler\>"
title_re = r"title=\"(.*?)\""
def spoiler_repl(m):
attrs_str = m.group(1)
title_match = re.search(title_re, attrs_str)
title = None
if title_match:
title = title_match.group(1)
content = m.group(2)
if title:
return f'<spoiler title="{title}">{content}</spoiler>'
else:
return f"<spoiler>{content}</spoiler>"
text = re.sub(spoiler_re, spoiler_repl, text, flags=re.MULTILINE)
return text
def convert():
create_app()
for p in Post.select():
if not p.text:
continue
p.text = process_text(p.text)
p.save()
for c in Comment.select():
if not c.text:
continue
c.text = process_text(c.text)
c.save()
| 2.453125 | 2 |
tools/conan/conans/client/generators/virtualenv.py | aversiveplusplus/aversiveplusplus | 29 | 12758681 | from conans.model import Generator
import platform
import os
import copy
from conans.errors import ConanException
def get_setenv_variables_commands(deps_env_info, command_set=None):
if command_set is None:
command_set = "SET" if platform.system() == "Windows" else "export"
multiple_to_set, simple_to_set = get_dict_values(deps_env_info)
ret = []
for name, value in multiple_to_set.items():
if platform.system() == "Windows":
ret.append(command_set + ' "' + name + '=' + value + ';%' + name + '%"')
else:
ret.append(command_set + ' ' + name + '=' + value + ':$' + name)
for name, value in simple_to_set.items():
if platform.system() == "Windows":
ret.append(command_set + ' "' + name + '=' + value + '"')
else:
ret.append(command_set + ' ' + name + '=' + value)
return ret
def get_dict_values(deps_env_info):
def adjust_var_name(name):
return "PATH" if name.lower() == "path" else name
multiple_to_set = {}
simple_to_set = {}
for name, value in deps_env_info.vars.items():
name = adjust_var_name(name)
if isinstance(value, list):
# Allow path with spaces in non-windows platforms
if platform.system() != "Windows" and name in ["PATH", "PYTHONPATH"]:
value = ['"%s"' % v for v in value]
multiple_to_set[name] = os.pathsep.join(value).replace("\\", "/")
else:
# It works in windows too using "/" and allows to use MSYS shell
simple_to_set[name] = value.replace("\\", "/")
return multiple_to_set, simple_to_set
class VirtualEnvGenerator(Generator):
@property
def filename(self):
return
@property
def content(self):
multiple_to_set, simple_to_set = get_dict_values(self.deps_env_info)
all_vars = copy.copy(multiple_to_set)
all_vars.update(simple_to_set)
venv_name = os.path.basename(self.conanfile.conanfile_directory)
deactivate_lines = ["@echo off"] if platform.system() == "Windows" else []
for name in all_vars.keys():
old_value = os.environ.get(name, "")
if platform.system() == "Windows":
deactivate_lines.append('SET "%s=%s"' % (name, old_value))
else:
deactivate_lines.append('export %s=%s' % (name, old_value))
if platform.system() == "Windows":
deactivate_lines.append("SET PROMPT=%s" % os.environ.get("PROMPT", ""))
else:
deactivate_lines.append('export PS1="$OLD_PS1"')
activate_lines = ["@echo off"] if platform.system() == "Windows" else []
if platform.system() == "Windows":
activate_lines.append("SET PROMPT=(%s) " % venv_name + "%PROMPT%")
else:
activate_lines.append("export OLD_PS1=\"$PS1\"")
activate_lines.append("export PS1=\"(%s) " % venv_name + "$PS1\"")
activate_lines.extend(get_setenv_variables_commands(self.deps_env_info))
ext = "bat" if platform.system() == "Windows" else "sh"
return {"activate.%s" % ext: os.linesep.join(activate_lines),
"deactivate.%s" % ext: os.linesep.join(deactivate_lines)}
| 2.375 | 2 |
contentcuration/contentcuration/utils/nodes.py | neo640228/studio | 0 | 12758682 | <gh_stars>0
import json
import logging
import os
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.core.exceptions import ValidationError
from django.core.files.storage import default_storage
from contentcuration.models import AssessmentItem
from contentcuration.models import ContentNode
from contentcuration.models import File
from contentcuration.models import FormatPreset
from contentcuration.models import generate_object_storage_name
from contentcuration.models import Language
from contentcuration.models import User
from contentcuration.utils.files import get_thumbnail_encoding
def map_files_to_node(user, node, data):
"""
Generate files that reference the content node.
"""
if settings.DEBUG:
# assert that our parameters match expected values
assert isinstance(user, User)
assert isinstance(node, ContentNode)
assert isinstance(data, list)
# filter out file that are empty
valid_data = filter_out_nones(data)
for file_data in valid_data:
filename = file_data["filename"]
checksum, ext1 = os.path.splitext(filename)
ext = ext1.lstrip(".")
# Determine a preset if none is given
kind_preset = FormatPreset.get_preset(file_data["preset"]) or FormatPreset.guess_format_preset(filename)
file_path = generate_object_storage_name(checksum, filename)
storage = default_storage
if not storage.exists(file_path):
raise IOError('{} not found'.format(file_path))
try:
if file_data.get('language'):
# TODO: Remove DB call per file?
file_data['language'] = Language.objects.get(pk=file_data['language'])
except ObjectDoesNotExist:
invalid_lang = file_data.get('language')
logging.warning("file_data with language {} does not exist.".format(invalid_lang))
return ValidationError("file_data given was invalid; expected string, got {}".format(invalid_lang))
resource_obj = File(
checksum=checksum,
contentnode=node,
file_format_id=ext,
original_filename=file_data.get('original_filename') or 'file',
source_url=file_data.get('source_url'),
file_size=file_data['size'],
preset=kind_preset,
language_id=file_data.get('language'),
uploaded_by=user,
)
resource_obj.file_on_disk.name = file_path
resource_obj.save()
# Handle thumbnail
if resource_obj.preset and resource_obj.preset.thumbnail:
node.thumbnail_encoding = json.dumps({
'base64': get_thumbnail_encoding(str(resource_obj)),
'points': [],
'zoom': 0
})
node.save()
def map_files_to_assessment_item(user, assessment_item, data):
"""
Generate files referenced in given assesment item (a.k.a. question).
"""
if settings.DEBUG:
# assert that our parameters match expected values
assert isinstance(user, User)
assert isinstance(assessment_item, AssessmentItem)
assert isinstance(data, list)
# filter out file that are empty
valid_data = filter_out_nones(data)
for file_data in valid_data:
filename = file_data["filename"]
checksum, ext = filename.split(".")
file_path = generate_object_storage_name(checksum, filename)
storage = default_storage
if not storage.exists(file_path):
raise IOError('{} not found'.format(file_path))
resource_obj = File(
checksum=checksum,
assessment_item=assessment_item,
file_format_id=ext,
original_filename=file_data.get('original_filename') or 'file',
source_url=file_data.get('source_url'),
file_size=file_data['size'],
preset_id=file_data["preset"], # assessment_item-files always have a preset
uploaded_by=user,
)
resource_obj.file_on_disk.name = file_path
resource_obj.save()
def filter_out_nones(data):
"""
Filter out any falsey values from data.
"""
return (l for l in data if l)
| 2.15625 | 2 |
Core/SystemManager.py | stories2/y_if-kakao | 0 | 12758683 | from Utils import ResponseManager, LogManager
from Setting import DefineManager
def CheckVersion():
version = DefineManager.VERSION
LogManager.PrintLogMessage("SystemManager", "CheckVersion", "this version is " + version, DefineManager.LOG_LEVEL_INFO)
return ResponseManager.TemplateOfResponse(DefineManager.SIMPLE_RESPONSE, version)
def Echo(text):
LogManager.PrintLogMessage("SystemManager", "Echo", "echo text: " + text, DefineManager.LOG_LEVEL_INFO)
return ResponseManager.TemplateOfResponse(DefineManager.SIMPLE_RESPONSE, text) | 2.65625 | 3 |
01_P/P_2_1_1_01/main.py | genfifth/generative-design_Code-Package-Python-Mode | 1 | 12758684 | <filename>01_P/P_2_1_1_01/main.py
add_library('pdf')
import random
from datetime import datetime
tileCount = 20
def setup():
global savePDF, actStrokeCap, actRandomSeed
savePDF = False
actStrokeCap = ROUND
actRandomSeed = 0
def draw():
global savePDF, actStrokeCap, actRandomSeed
if savePDF:
beginRecord(PDF, datetime.now().strftime("%Y%m%d%H%M%S")+".pdf")
background(255)
smooth()
noFill()
strokeCap(actStrokeCap)
random.seed(actRandomSeed)
for gridY in range(tileCount):
for gridX in range(tileCount):
posX = int(width/tileCount*gridX)
posY = int(height/tileCount*gridY)
toggle = random.randint(0,1)
if (toggle == 0):
strokeWeight(mouseX/20)
line(posX, posY, posX+width/tileCount, posY+height/tileCount)
elif (toggle == 1):
strokeWeight(mouseY/20)
line(posX, posY+width/tileCount, posX+height/tileCount, posY)
if (savePDF):
savePDF = False
endRecord()
def mousePressed():
global savePDF, actStrokeCap, actRandomSeed
actRandomSeed = random.randint(0, 100000)
def keyReleased():
global savePDF, actStrokeCap, actRandomSeed
if (key=='s' or key=='S'):
saveFrame(datetime.now().strftime("%Y%m%d%H%M%S")+".png")
if (key=='p' or key=='P'):
savePDF = True
if key == "1":
actStrokeCap = ROUND
elif key == "2":
actStrokeCap = SQUARE
elif key == "3":
actStrokeCap = PROJECT
| 3.046875 | 3 |
data_preprocessing.py | maimemo/SSP-MMC | 2 | 12758685 | <filename>data_preprocessing.py
import pandas as pd
import numpy as np
from tqdm import tqdm
def halflife_forgetting_curve(x, h):
return np.power(2, - x / h)
def cal_halflife(group):
if group['i'].values[0] > 1:
r_ivl_cnt = sum(group['delta_t'] * group['p_recall'].map(np.log) * group['total_cnt'])
ivl_ivl_cnt = sum(group['delta_t'].map(lambda x: x ** 2) * group['total_cnt'])
group['halflife'] = round(np.log(0.5) / (r_ivl_cnt / ivl_ivl_cnt), 4)
else:
group['halflife'] = 0.0
group['group_cnt'] = sum(group['total_cnt'])
return group
if __name__ == '__main__':
raw_data = pd.read_csv('./data/opensource_dataset_forgetting_curve.tsv', sep='\t', index_col=None)
raw_data = raw_data[(raw_data['p_recall'] < 1) & (raw_data['p_recall'] > 0)]
raw_data = raw_data.groupby(
by=['d', 'i', 'r_history', 't_history']).apply(
cal_halflife)
raw_data.to_csv('./data/halflife_for_fit.tsv', sep='\t', index=None)
for idx in tqdm(raw_data.index):
item = raw_data.loc[idx]
delat_t = int(item['delta_t'])
index = raw_data[(raw_data['i'] == item['i'] + 1) &
(raw_data['d'] == item['d']) &
(raw_data['r_history'].str.startswith(item['r_history'])) &
(raw_data['t_history'] == item['t_history'] + f',{delat_t}')].index
raw_data.loc[index, 'last_halflife'] = item['halflife']
raw_data.loc[index, 'last_p_recall'] = item['p_recall']
raw_data['halflife_increase'] = round(raw_data['halflife'] / raw_data['last_halflife'], 4)
raw_data = raw_data[raw_data['i'] > 2]
raw_data['last_recall'] = raw_data['r_history'].map(lambda x: x[-1])
del raw_data['delta_t']
del raw_data['p_recall']
del raw_data['total_cnt']
raw_data.drop_duplicates(inplace=True)
raw_data.dropna(inplace=True)
raw_data.to_csv('./data/halflife_for_visual.tsv', sep='\t', index=None)
| 2.5625 | 3 |
tests/data/test_data_loader.py | Nelci/divorce-predictor | 0 | 12758686 | from pathlib import Path
import numpy as np
import pytest
from divorce_predictor.data import DataLoader
def test_load_data_successfully():
dataset_path = (
Path(__file__).parent.parent.parent / "ml" / "input" / "data" / "divorce.csv"
)
data_loader = DataLoader(dataset_path=dataset_path, target_column="Class")
X, y = data_loader.load_dataset()
assert isinstance(X, np.ndarray)
assert isinstance(y, np.ndarray)
assert len(y.shape) == 1
def test_load_filenotfound():
dataset_path = Path("bulhufas")
with pytest.raises(FileNotFoundError):
_ = DataLoader(dataset_path=dataset_path, target_column="variety")
| 2.46875 | 2 |
tsai/models/mWDN.py | sjdlloyd/tsai | 0 | 12758687 | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/110_models.mWDN.ipynb (unless otherwise specified).
__all__ = ['WaveBlock', 'mWDN']
# Cell
from ..imports import *
from .layers import *
from .InceptionTime import *
from .utils import create_model
# Cell
import pywt
# Cell
# This is an unofficial PyTorch implementation by <NAME> - <EMAIL> based on:
# <NAME>., <NAME>., <NAME>., & <NAME>. (2018, July). Multilevel wavelet decomposition network for interpretable time series analysis. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining (pp. 2437-2446).
# No official implementation found
class WaveBlock(Module):
def __init__(self, c_in, c_out, seq_len, wavelet=None):
if wavelet is None:
self.h_filter = [-0.2304,0.7148,-0.6309,-0.028,0.187,0.0308,-0.0329,-0.0106]
self.l_filter = [-0.0106,0.0329,0.0308,-0.187,-0.028,0.6309,0.7148,0.2304]
else:
w = pywt.Wavelet(wavelet)
self.h_filter = w.dec_hi
self.l_filter = w.dec_lo
self.mWDN_H = nn.Linear(seq_len,seq_len)
self.mWDN_L = nn.Linear(seq_len,seq_len)
self.mWDN_H.weight = nn.Parameter(self.create_W(seq_len,False))
self.mWDN_L.weight = nn.Parameter(self.create_W(seq_len,True))
self.sigmoid = nn.Sigmoid()
self.pool = nn.AvgPool1d(2)
def forward(self,x):
hp_1 = self.sigmoid(self.mWDN_H(x))
lp_1 = self.sigmoid(self.mWDN_L(x))
hp_out = self.pool(hp_1)
lp_out = self.pool(lp_1)
all_out = torch.cat((hp_out, lp_out), dim=-1)
return lp_out, all_out
def create_W(self, P, is_l, is_comp=False):
if is_l: filter_list = self.l_filter
else: filter_list = self.h_filter
list_len = len(filter_list)
max_epsilon = np.min(np.abs(filter_list))
if is_comp: weight_np = np.zeros((P, P))
else: weight_np = np.random.randn(P, P) * 0.1 * max_epsilon
for i in range(0, P):
filter_index = 0
for j in range(i, P):
if filter_index < len(filter_list):
weight_np[i][j] = filter_list[filter_index]
filter_index += 1
return tensor(weight_np)
class mWDN(Module):
def __init__(self, c_in, c_out, seq_len, levels=3, wavelet=None, arch=InceptionTime, arch_kwargs={}):
self.levels=levels
self.blocks = nn.ModuleList()
for i in range(levels): self.blocks.append(WaveBlock(c_in, c_out, seq_len // 2 ** i, wavelet=wavelet))
self.classifier = create_model(arch, c_in, c_out, seq_len=seq_len, **arch_kwargs)
def forward(self,x):
for i in range(self.levels):
x, out_ = self.blocks[i](x)
if i == 0: out = out_ if i == 0 else torch.cat((out, out_), dim=-1)
out = self.classifier(out)
return out | 2.546875 | 3 |
Python/calculoMedia.py | schiblich/Practice | 0 | 12758688 | <gh_stars>0
import math
a = float(input("Digite um numero"))
b = float(input("Digite um numero"))
c = float(input("Digite um numero"))
d = float(input("Digite um numero"))
e = float(input("Digite um numero"))
calc = (a + b + c + d + e)/ 5
print(math.ceil(calc)) | 3.65625 | 4 |
erpnext/hr/doctype/employee_promotion/employee_promotion.py | nagendrarawat/erpnext_custom | 2 | 12758689 | <filename>erpnext/hr/doctype/employee_promotion/employee_promotion.py
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import getdate
from erpnext.hr.utils import update_employee
class EmployeePromotion(Document):
def validate(self):
if frappe.get_value("Employee", self.employee, "status") == "Left":
frappe.throw(_("Cannot promote Employee with status Left"))
def before_submit(self):
if getdate(self.promotion_date) > getdate():
frappe.throw(_("Employee Promotion cannot be submitted before Promotion Date "),
frappe.DocstatusTransitionError)
def on_submit(self):
employee = frappe.get_doc("Employee", self.employee)
employee = update_employee(employee, self.promotion_details, date=self.promotion_date)
employee.save()
def on_cancel(self):
employee = frappe.get_doc("Employee", self.employee)
employee = update_employee(employee, self.promotion_details, cancel=True)
employee.save()
| 1.882813 | 2 |
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Missions/Segments/expand_state.py | Vinicius-Tanigawa/Undergraduate-Research-Project | 0 | 12758690 | ## @ingroup methods-mission-segments
# expand_state.py
#
# Created: Jul 2014, SUAVE Team
# Modified: Jan 2016, <NAME>
# ----------------------------------------------------------------------
# Expand State
# ----------------------------------------------------------------------
## @ingroup methods-mission-segments
def expand_state(segment):
"""Makes all vectors in the state the same size.
Assumptions:
N/A
Source:
N/A
Inputs:
state.numerics.number_control_points [Unitless]
Outputs:
N/A
Properties Used:
N/A
"""
n_points = segment.state.numerics.number_control_points
segment.state.expand_rows(n_points)
return
| 2.0625 | 2 |
MathYouDidntNeed/__init__.py | SamAsh11414/MathYouDidntNeed | 1 | 12758691 | from MathYouDidntNeed.RadianDegrees import Convert | 1.171875 | 1 |
oneflow/python/test/ops/test_activations.py | wanghongsheng01/framework_enflame | 1 | 12758692 | <reponame>wanghongsheng01/framework_enflame<filename>oneflow/python/test/ops/test_activations.py
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import math
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import tensorflow as tf
import test_global_storage
from test_util import GenArgList
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def compare_with_tensorflow(device_type, activation_type, shape, data_type):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
flow.config.enable_debug_mode(True)
func_config = flow.FunctionConfig()
if data_type == flow.float16:
func_config.enable_auto_mixed_precision(True)
data_type = flow.float
func_config.default_data_type(data_type)
of_activation_map = {
"relu": flow.nn.relu,
"sigmoid": flow.math.sigmoid,
"tanh": flow.math.tanh,
}
tf_activation_map = {
"relu": tf.nn.relu,
"sigmoid": tf.math.sigmoid,
"tanh": tf.math.tanh,
}
@flow.global_function(type="train", function_config=func_config)
def ActivationJob():
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
"x",
shape=shape,
dtype=data_type,
initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
trainable=True,
)
loss = of_activation_map[activation_type](x)
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [1e-4])
flow.optimizer.SGD(lr_scheduler, momentum=0).minimize(loss)
flow.watch(x, test_global_storage.Setter("x"))
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch(loss, test_global_storage.Setter("loss"))
flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))
return loss
# OneFlow
of_out = ActivationJob().get()
# TensorFlow
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(test_global_storage.Get("x"))
tf_out = tf_activation_map[activation_type](x)
loss_diff = test_global_storage.Get("loss_diff")
tf_x_diff = tape.gradient(tf_out, x, loss_diff)
rtol = 1e-5
atol = 1e-5
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol, atol)
assert np.allclose(test_global_storage.Get("x_diff"), tf_x_diff.numpy(), rtol, atol)
@flow.unittest.skip_unless_1n1d()
class TestActivations(flow.unittest.TestCase):
def test_activations(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["activation_type"] = ["relu", "sigmoid", "tanh"]
arg_dict["shape"] = [(64, 64)]
arg_dict["data_type"] = [flow.float, flow.double]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
if os.getenv("ONEFLOW_TEST_CPU_ONLY") is None:
for act_type in arg_dict["activation_type"]:
compare_with_tensorflow("gpu", act_type, (64, 64), flow.float16)
if __name__ == "__main__":
unittest.main()
| 1.96875 | 2 |
shaka/tools/gen_eme_plugins.py | jgongo/shaka-player-embedded | 1 | 12758693 | #!/usr/bin/python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates a .cc file that registers the default EME implementations.
This defines the following function:
void shaka::RegisterDefaultKeySystems();
"""
from __future__ import print_function
import argparse
import json
import os
import sys
import embed_utils
def _GetHeaders(plugins):
"""Returns a set of headers from the given plugins."""
headers = set()
for plugin in plugins:
headers.update(i['header'] for i in plugin['implementations'])
return headers
def _ParsePlugin(file_path):
"""Reads the given file and parses it into an object."""
with open(file_path, 'r') as f:
return json.load(f)
def GenerateFile(plugins, output):
"""Generates a C++ file which registers the given implementations."""
writer = embed_utils.CodeWriter(output)
writer.Write('#include <atomic>')
writer.Write()
writer.Write('#include "shaka/eme/implementation_registry.h"')
writer.Write()
for header in sorted(_GetHeaders(plugins)):
writer.Write('#include "%s"', header)
writer.Write()
with writer.Namespace('shaka'):
writer.Write('void RegisterDefaultKeySystems();')
writer.Write()
with writer.Block('void RegisterDefaultKeySystems()'):
# This ensures the key systems are registered exactly once, even if this
# is called from multiple threads. The compare_exchange_strong will
# atomically check if it is false and replace with true on only one
# thread.
writer.Write('static std::atomic<bool> called{false};')
writer.Write('bool expected = false;')
with writer.Block('if (called.compare_exchange_strong(expected, true))'):
for plugin in plugins:
for impl in plugin['implementations']:
writer.Write('eme::ImplementationRegistry::AddImplementation(')
writer.Write(' "%s", new %s);', impl['key_system'],
impl['factory_type'])
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--output', dest='output',
help='The filename to output to')
parser.add_argument('files', nargs='+',
help='The JSON files that define the implementations')
ns = parser.parse_args(args)
plugins = map(_ParsePlugin, ns.files)
with open(ns.output, 'w') as output:
GenerateFile(plugins, output)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 2.453125 | 2 |
api/models/evaluator.py | Hield/book-rec-web-app | 0 | 12758694 | import numpy as np
import pandas as pd
import scipy.sparse as sp
from sklearn.metrics.pairwise import cosine_similarity
class Evaluator():
def __init__(self, k=10, training_set=None, testing_set=None, book_sim=None, novelty_scores=None):
self.k = k
self.book_sim = book_sim
self.novelty_scores = novelty_scores
if training_set is not None:
self.training_set = training_set
self.num_users = len(self.training_set.user_id.unique())
self.num_books = len(self.training_set.book_id.unique())
if testing_set is not None:
self.testing_set = testing_set
self.testing_idx = {}
for user_id in testing_set.user_id.unique():
self.testing_idx[user_id] = testing_set[testing_set.user_id==user_id].book_id.values
self.result = {}
def _average_precision(self, pred, truth):
in_arr = np.in1d(pred, truth)
score = 0.0
num_hits = 0.0
for idx, correct in enumerate(in_arr):
if correct:
num_hits += 1
score += num_hits / (idx + 1)
return score / min(len(truth), self.k)
def _novelty_score(self, pred):
# Recommend the top 10 books in novelty score results in ~10.4
# Crop the score to 10.0 since it won't change anything and make the score range nicer
return min(self.novelty_scores.loc[pred].novelty_score.mean(), 10.0)
def _diversity_score(self, pred):
matrix = self.book_sim.loc[pred, pred].values
ils = matrix[np.triu_indices(len(pred), k=1)].mean()
return (1 - ils) * 10
def _personalization_score(self, preds, user_ids, book_ids):
df = pd.DataFrame(
data=np.zeros([len(user_ids), len(book_ids)]),
index=user_ids,
columns=book_ids
)
for user_id in user_ids:
df.loc[user_id, preds[user_id]] = 1
matrix = sp.csr_matrix(df.values)
#calculate similarity for every user's recommendation list
similarity = cosine_similarity(X=matrix, dense_output=False)
#get indicies for upper right triangle w/o diagonal
upper_right = np.triu_indices(similarity.shape[0], k=1)
#calculate average similarity
personalization = np.mean(similarity[upper_right])
return (1 - personalization) * 10
def evaluate(self, model):
model.fit(self.training_set)
preds = model.all_recommendation()
user_ids = list(preds.keys())
book_ids = np.unique(np.array(list(preds.values())).flatten())
ap_sum = 0
nov_score_sum = 0
div_score_sum = 0
for user_id in preds.keys():
pred = preds[user_id]
truth = self.testing_idx[user_id]
ap_sum += self._average_precision(pred, truth)
nov_score_sum += self._novelty_score(pred)
div_score_sum += self._diversity_score(pred)
self.result[model.name] = {}
self.result[model.name]['Mean Average Precision'] = "%.2f%%" % (ap_sum / self.num_users * 100)
self.result[model.name]['Coverage'] = "%.2f%%" % (len(book_ids) / self.num_books * 100)
self.result[model.name]['Novelty Score'] = "%.2f" % (nov_score_sum / self.num_users)
self.result[model.name]['Diversity Score'] = "%.2f" % (div_score_sum / self.num_users)
self.result[model.name]['Personalization Score'] = "%.2f" % self._personalization_score(preds, user_ids, book_ids)
def print_result(self):
print(pd.DataFrame(self.result).loc[['Mean Average Precision', 'Coverage', 'Novelty Score', 'Diversity Score', 'Personalization Score']]) | 2.546875 | 3 |
zipline/utils/enum.py | everling/zipline | 14 | 12758695 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from ctypes import (
Structure,
c_ubyte,
c_uint,
c_ulong,
c_ulonglong,
c_ushort,
sizeof,
)
import numpy as np
import pandas as pd
from six.moves import range
_inttypes_map = OrderedDict(sorted([
(sizeof(t) - 1, t) for t in {
c_ubyte,
c_uint,
c_ulong,
c_ulonglong,
c_ushort
}
]))
_inttypes = list(
pd.Series(_inttypes_map).reindex(
range(max(_inttypes_map.keys())),
method='bfill',
),
)
def enum(option, *options):
"""
Construct a new enum object.
Parameters
----------
*options : iterable of str
The names of the fields for the enum.
Returns
-------
enum
A new enum collection.
Examples
--------
>>> e = enum('a', 'b', 'c')
>>> e
<enum: ('a', 'b', 'c')>
>>> e.a
0
>>> e.b
1
>>> e.a in e
True
>>> tuple(e)
(0, 1, 2)
Notes
-----
Identity checking is not guaranteed to work with enum members, instead
equality checks should be used. From CPython's documentation:
"The current implementation keeps an array of integer objects for all
integers between -5 and 256, when you create an int in that range you
actually just get back a reference to the existing object. So it should be
possible to change the value of 1. I suspect the behaviour of Python in
this case is undefined. :-)"
"""
options = (option,) + options
rangeob = range(len(options))
try:
inttype = _inttypes[int(np.log2(len(options) - 1)) // 8]
except IndexError:
raise OverflowError(
'Cannot store enums with more than sys.maxsize elements, got %d' %
len(options),
)
class _enum(Structure):
_fields_ = [(o, inttype) for o in options]
def __iter__(self):
return iter(rangeob)
def __contains__(self, value):
return 0 <= value < len(options)
def __repr__(self):
return '<enum: %s>' % (
('%d fields' % len(options))
if len(options) > 10 else
repr(options)
)
return _enum(*rangeob)
| 2.40625 | 2 |
frappe-bench/apps/erpnext/erpnext/accounts/doctype/cash_flow_mapping/test_cash_flow_mapping.py | Semicheche/foa_frappe_docker | 1 | 12758696 | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestCashFlowMapping(unittest.TestCase):
def setUp(self):
if frappe.db.exists("Cash Flow Mapping", "Test Mapping"):
frappe.delete_doc('Cash Flow Mappping', 'Test Mapping')
def tearDown(self):
frappe.delete_doc('Cash Flow Mapping', 'Test Mapping')
def test_multiple_selections_not_allowed(self):
doc = frappe.new_doc('Cash Flow Mapping')
doc.mapping_name = 'Test Mapping'
doc.label = 'Test label'
doc.append(
'accounts',
{'account': 'Accounts Receivable - _TC'}
)
doc.is_working_capital = 1
doc.is_finance_cost = 1
self.assertRaises(frappe.ValidationError, doc.insert)
doc.is_finance_cost = 0
doc.insert()
| 2.203125 | 2 |
oasislmf/utils/read_exposure.py | ibailey-SCOR/OasisLMF | 0 | 12758697 | <gh_stars>0
import warnings
from collections import OrderedDict
from .exceptions import OasisException
from .profiles import (
get_fm_terms_oed_columns,
get_grouped_fm_profile_by_level_and_term_group,
get_grouped_fm_terms_by_level_and_term_group,
get_oed_hierarchy,
)
from .fm import SUPPORTED_FM_LEVELS
from .coverages import SUPPORTED_COVERAGE_TYPES
from .defaults import get_default_exposure_profile
from .data import get_dataframe
from .data import get_ids
def read_exposure_df(exposure_fp, exposure_profile=get_default_exposure_profile()):
"""
Generates and returns a Pandas dataframe of GUL input items.
:param exposure_fp: Exposure file
:type exposure_fp: str
:param exposure_profile: Exposure profile
:type exposure_profile: dict
:return: Exposure dataframe
:rtype: pandas.DataFrame
"""
# Get the grouped exposure profile - this describes the financial terms to
# to be found in the source exposure file, which are for the following
# FM levels: site coverage (# 1), site pd (# 2), site all (# 3). It also
# describes the OED hierarchy terms present in the exposure file, namely
# portfolio num., acc. num., loc. num., and cond. num.
profile = get_grouped_fm_profile_by_level_and_term_group(exposure_profile=exposure_profile)
if not profile:
raise OasisException(
'Source exposure profile is possibly missing FM term information: '
'FM term definitions for TIV, limit, deductible, attachment and/or share.'
)
# Get the OED hierarchy terms profile - this defines the column names for loc.
# ID, acc. ID, policy no. and portfolio no., as used in the source exposure
# and accounts files. This is to ensure that the method never makes hard
# coded references to the corresponding columns in the source files, as
# that would mean that changes to these column names in the source files
# may break the method
oed_hierarchy = get_oed_hierarchy(exposure_profile=exposure_profile)
loc_num = oed_hierarchy['locnum']['ProfileElementName'].lower()
acc_num = oed_hierarchy['accnum']['ProfileElementName'].lower()
portfolio_num = oed_hierarchy['portnum']['ProfileElementName'].lower()
cond_num = oed_hierarchy['condnum']['ProfileElementName'].lower()
# The (site) coverage FM level ID (# 1 in the OED FM levels hierarchy)
cov_level_id = SUPPORTED_FM_LEVELS['site coverage']['id']
# Get the TIV column names and corresponding coverage types
tiv_terms = OrderedDict({v['tiv']['CoverageTypeID']: v['tiv']['ProfileElementName'].lower()
for k, v in profile[SUPPORTED_FM_LEVELS['site coverage']['id']].items()})
tiv_cols = list(tiv_terms.values())
# Get the list of coverage type IDs - financial terms for the coverage
# level are grouped by coverage type ID in the grouped version of the
# exposure profile (profile of the financial terms sourced from the
# source exposure file)
cov_types = [v['id'] for v in SUPPORTED_COVERAGE_TYPES.values()]
# Get the FM terms profile (this is a simplfied view of the main grouped
# profile, containing only information about the financial terms), and
# the list of OED colum names for the financial terms for the site coverage
# (# 1 ) FM level
fm_terms = get_grouped_fm_terms_by_level_and_term_group(grouped_profile_by_level_and_term_group=profile)
terms_floats = ['deductible', 'deductible_min', 'deductible_max', 'limit']
terms_ints = ['ded_code', 'ded_type', 'lim_code', 'lim_type']
terms = terms_floats + terms_ints
term_cols_floats = get_fm_terms_oed_columns(
fm_terms,
levels=['site coverage'],
term_group_ids=cov_types,
terms=terms_floats
)
term_cols_ints = get_fm_terms_oed_columns(
fm_terms,
levels=['site coverage'],
term_group_ids=cov_types,
terms=terms_ints
)
term_cols = term_cols_floats + term_cols_ints
# Load the exposure dataframes - set 64-bit float data types
# for all real number columns - and in the keys frame rename some columns
# to align with underscored-naming convention;
# Set defaults and data types for the TIV and cov. level IL columns as
# as well as the portfolio num. and cond. num. columns
defaults = {
**{t: 0.0 for t in tiv_cols + term_cols_floats},
**{t: 0 for t in term_cols_ints},
**{cond_num: 0},
**{portfolio_num: '1'}
}
dtypes = {
**{t: 'float64' for t in tiv_cols + term_cols_floats},
**{t: 'uint8' for t in term_cols_ints},
**{t: 'uint16' for t in [cond_num]},
**{t: 'str' for t in [loc_num, portfolio_num, acc_num]},
**{t: 'uint32' for t in ['loc_id']}
}
# TODO: required columns are not required if loc_id doesn't yet exist
exposure_df = get_dataframe(
src_fp=exposure_fp,
required_cols=(loc_num, acc_num, portfolio_num,),
col_dtypes=dtypes,
col_defaults=defaults,
empty_data_error_msg='No data found in the source exposure (loc.) file',
memory_map=True
)
# Set the `loc_id` column in the exposure dataframe to identify locations uniquely with respect
# to portfolios and portfolio accounts. This loc_id must be consistent with the keys file
if 'loc_id' not in exposure_df:
if 'locid' in exposure_df.columns:
warnings.warn('loc_id field not in loc file... using locid')
exposure_df.rename(columns={'locid': 'loc_id'}, inplace=True)
else:
warnings.warn('loc_id field not in loc file... building')
exposure_df['loc_id'] = get_ids(exposure_df, [portfolio_num, acc_num, loc_num])
# Check the loc_id is a consistent index
if exposure_df.loc_id.nunique() != len(exposure_df):
warnings.warn("Non-unique loc_id entries found.")
return exposure_df
| 1.953125 | 2 |
test/http_best_matches_tests.py | rtweeks/intercom_test | 0 | 12758698 | <gh_stars>0
from intercom_test import http_best_matches as subject
from base64 import b64encode
from io import StringIO
import json
from should_dsl import should, should_not
JSON_STR = """[{
"id": 1,
"first_name": "Jeanette",
"last_name": "Penddreth",
"email": "<EMAIL>",
"gender": "Female",
"ip_address": "172.16.17.32"
}, {
"id": 2,
"first_name": "Giavani",
"last_name": "Frediani",
"email": "<EMAIL>",
"gender": "Male",
"ip_address": "192.168.3.11"
}, {
"id": 3,
"first_name": "Noell",
"last_name": "Bea",
"email": "<EMAIL>",
"gender": "Female",
"ip_address": "192.168.127.12"
}, {
"id": 4,
"first_name": "Willard",
"last_name": "Valek",
"email": "<EMAIL>",
"gender": "Male",
"ip_address": "172.16.58.3"
}]"""
def new_json_data(mod=None):
data = json.loads(JSON_STR)
if mod is not None:
mod(data)
return data
def make_case(method, url, body=None):
result = {'method': method, 'url': url}
if body is not None:
result['request body'] = body
return result
def json_data_pair(mod):
return (new_json_data(), new_json_data(mod))
def remove_index_2(data):
del data[2]
def swap_at_indexes(a, b):
def swapper(data):
data[a], data[b] = data[b], data[a]
swapper.swaps = (a, b)
return swapper
JsonType = subject.JsonType
class JsonDescStrings:
CASE_DESCRIPTION = 'case description'
JSON_BODY_DELTAS = 'minimal JSON request body deltas'
SCALAR_BODY_DELTAS = 'closest request bodies'
ALTER_SUBSTRUCT = 'alter substructures'
REARRANGE_SUBSTRUCT = 'rearrange substructures'
ALTER_SCALARS = 'alter scalar values'
KNOWN_METHODS = 'available HTTP methods'
QSTRING_DELTAS = 'minimal query string deltas'
TARGET_QSPARAMS = 'params with differing value sequences'
GOOD_PATHS = 'closest URL paths'
ADDNL_FIELDS_SETS = 'available additional test case field value sets'
################################# TESTS #################################
def test_report_incorrect_scalar_value():
def alter_request(request):
request[0]['first_name'] = 'Bob'
case, request = (
make_case('post', '/foo', body)
for body in json_data_pair(alter_request)
)
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableJsonRequestBodiesReport)
suggestions.diff_case_pairs |should| have(1).item
diff, case = suggestions.diff_case_pairs[0]
diff.structure_diffs |should| be_empty
diff.structure_location_diffs |should| be_empty
diff.scalar_diffs |should_not| be_empty
diff.scalar_diffs |should| equal_to(({'set': (0, 'first_name'), 'to': 'Jeanette'},))
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.JSON_BODY_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.ALTER_SCALARS: [
{'set': (0, 'first_name'), 'to': 'Jeanette'},
]
},
}
]
})
def test_report_incorrect_scalar_type():
def alter_request(request):
request[0]['first_name'] = 7
case, request = (
make_case('post', '/foo', body)
for body in json_data_pair(alter_request)
)
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableJsonRequestBodiesReport)
suggestions.diff_case_pairs |should| have(1).item
diff, case = suggestions.diff_case_pairs[0]
diff.structure_diffs |should| have(2).items
diff.structure_diffs[0] |should| equal_to({'del': (0,)})
d = diff.structure_diffs[1]
d['add'][0] |should| equal_to(JsonType.dict)
d['add'][1] |should| equal_to({
('first_name', JsonType.str),
('last_name', JsonType.str),
('id', JsonType.int),
('gender', JsonType.str),
('ip_address', JsonType.str),
('email', JsonType.str),
})
del request['request body'][0]
request['request body'].append(dict(
(fname, t.construct())
for fname, t in d['add'][1]
))
suggestions2 = db.best_matches(request)
suggestions2.diff_case_pairs |should| have(1).item
diff, case = suggestions2.diff_case_pairs[0]
diff.structure_diffs |should| have(0).items
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.JSON_BODY_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.ALTER_SUBSTRUCT: [
{'del': (0,)},
{'add': {
'email': '',
'ip_address': '',
'first_name': '',
'last_name': '',
'gender': '',
'id': 0,
}},
]
},
}
]
})
def test_report_misplaced_substructure():
def alter_request(request):
request[2]['oops'] = request[3]
del request[3]
case, request = (
make_case('post', '/foo', body)
for body in json_data_pair(alter_request)
)
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableJsonRequestBodiesReport)
suggestions.diff_case_pairs |should| have(1).item
diff, case = suggestions.diff_case_pairs[0]
diff.structure_diffs |should| have(2).items
d = diff.structure_diffs[0]
d |should| contain('alt')
d['alt'] |should| equal_to(())
d['to'][0] |should| be(JsonType.list)
d['to'][1] |should| equal_to((JsonType.dict,) * 4)
d = diff.structure_diffs[1]
d |should| contain('alt')
d['alt'] |should| equal_to((2,))
d['to'][0] |should| be(JsonType.dict)
d['to'][1] |should| equal_to({
('first_name', JsonType.str),
('last_name', JsonType.str),
('id', JsonType.int),
('gender', JsonType.str),
('ip_address', JsonType.str),
('email', JsonType.str),
})
set(request['request body'][2]).difference(k for k, _ in d['to'][1]) |should| equal_to({'oops'})
# In particular, note that there is no 'add' key in any of
# diff.structure_diffs; this indicates that the difference at key_path ()
# must come from something in request['request body'][2] (which also wants
# a structural change).
request['request body'].append(request['request body'][2]['oops'])
del request['request body'][2]['oops']
suggestions2 = db.best_matches(request)
suggestions2.diff_case_pairs |should| have(1).item
diff, case = suggestions2.diff_case_pairs[0]
diff.structure_diffs |should| have(0).items
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.JSON_BODY_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.ALTER_SUBSTRUCT: [
{'alt': (), 'to': [{}, {}, {}, {}]},
{'alt': (2,), 'to': {
'email': '',
'id': 0,
'ip_address': '',
'last_name': '',
'first_name': '',
'gender': ''
}}
]
},
},
]
})
def test_swapped_substructure():
case, request = (
make_case('post', '/foo', body)
for body in json_data_pair(swap_at_indexes(0, 2))
)
case['request body'][0]['foo'] = request['request body'][2]['foo'] = 42
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableJsonRequestBodiesReport)
suggestions.diff_case_pairs |should| have(1).item
diff, case = suggestions.diff_case_pairs[0]
diff.structure_diffs |should| have(0).items
diff.structure_location_diffs |should| have(2).items
d = diff.structure_location_diffs[0]
d |should| contain('alt')
d['alt'] |should| equal_to((0,))
d['to'][0] |should| be(JsonType.dict)
d['to'][1] |should| equal_to({
('first_name', JsonType.str),
('last_name', JsonType.str),
('id', JsonType.int),
('gender', JsonType.str),
('ip_address', JsonType.str),
('email', JsonType.str),
('foo', JsonType.int),
})
d = diff.structure_location_diffs[1]
d |should| contain('alt')
d['alt'] |should| equal_to((2,))
d['to'][0] |should| be(JsonType.dict)
d['to'][1] |should| equal_to({
('first_name', JsonType.str),
('last_name', JsonType.str),
('id', JsonType.int),
('gender', JsonType.str),
('ip_address', JsonType.str),
('email', JsonType.str),
})
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.JSON_BODY_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.REARRANGE_SUBSTRUCT: [
{'alt': (0,), 'to': {
'id': 0,
'foo': 0,
'gender': '',
'first_name': '',
'last_name': '',
'ip_address': '',
'email': ''
}},
{'alt': (2,), 'to': {
'id': 0,
'gender': '',
'first_name': '',
'last_name': '',
'ip_address': '',
'email': ''
}},
]
}
}
]
})
def test_body_string_diff():
case, request = (
make_case('post', '/get_bar_info', "name={}".format(name))
for name in ('Cheers', 'Cheers!')
)
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableScalarRequestBodiesReport)
suggestions.test_cases |should| have(1).item
suggestions.test_cases[0]['request body'] |should| equal_to(case['request body'])
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.SCALAR_BODY_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'request body': case['request body'],
}
]
})
def test_body_binary_diff():
case, request = (
make_case('post', '/fingerprint', data)
for data in (b'123456789', b'123654789')
)
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableScalarRequestBodiesReport)
suggestions.test_cases |should| have(1).item
suggestions.test_cases[0]['request body'] |should| equal_to(case['request body'])
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.SCALAR_BODY_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'request body': b64encode(case['request body']).decode('ASCII'),
'isBase64Encoded': True,
}
]
})
def test_http_method_suggestion():
case = make_case('post', '/foo')
request = make_case('get', '/foo')
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableHttpMethodsReport)
suggestions.methods |should| equal_to({'post'})
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.KNOWN_METHODS: ['post']
})
def test_missing_query_param():
case = make_case('get', '/foo?bar=BQ')
request = make_case('get', '/foo')
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableQueryStringParamsetsReport)
suggestions.deltas |should| have(1).item
d = suggestions.deltas[0]
d[0] |should| respond_to('params')
d[0] |should| respond_to('mods')
d[0].params |should| equal_to({'bar': ['BQ']})
d[0].mods |should| equal_to(({'field': 'bar', 'add': 'BQ'},))
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.QSTRING_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.TARGET_QSPARAMS: {
'bar': ['BQ'],
},
'mods': (
{'field': 'bar', 'add': 'BQ'},
)
}
}
]
})
def test_wrong_query_param_value():
case = make_case('get', '/foo?bar=BQ')
request = make_case('get', '/foo?bar=Cheers')
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableQueryStringParamsetsReport)
suggestions.deltas |should| have(1).item
d = suggestions.deltas[0]
d[0] |should| respond_to('params')
d[0] |should| respond_to('mods')
d[0].params |should| equal_to({'bar': ['BQ']})
d[0].mods |should| equal_to(({'field': 'bar', 'chg': 'Cheers', 'to': 'BQ'},))
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.QSTRING_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.TARGET_QSPARAMS: {
'bar': ['BQ'],
},
'mods': (
{'field': 'bar', 'chg': 'Cheers', 'to': 'BQ'},
)
}
}
]
})
def test_extra_query_param():
case = make_case('get', '/foo?bar=BQ')
request = make_case('get', '/foo?bar=BQ&bar=Cheers')
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableQueryStringParamsetsReport)
suggestions.deltas |should| have(1).item
d = suggestions.deltas[0]
d[0] |should| respond_to('params')
d[0] |should| respond_to('mods')
d[0].params |should| equal_to({'bar': ['BQ']})
d[0].mods |should| equal_to(({'field': 'bar', 'del': 'Cheers'},))
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.QSTRING_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.TARGET_QSPARAMS: {
'bar': ['BQ'],
},
'mods': (
{'field': 'bar', 'del': 'Cheers'},
)
}
}
]
})
def test_misordered_query_params():
case = make_case('get', '/foo?bar=BQ&bar=Cheers')
request = make_case('get', '/foo?bar=Cheers&bar=BQ')
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableQueryStringParamsetsReport)
suggestions.deltas |should| have(1).item
d = suggestions.deltas[0]
d[0] |should| respond_to('params')
d[0] |should| respond_to('mods')
d[0].params |should| equal_to({'bar': ['BQ', 'Cheers']})
d[0].mods |should| equal_to(({'field': 'bar', 'add': 'BQ'}, {'field': 'bar', 'del': 'BQ'}))
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.QSTRING_DELTAS: [
{
JsonDescStrings.CASE_DESCRIPTION: None,
'diff': {
JsonDescStrings.TARGET_QSPARAMS: {
'bar': ['BQ', 'Cheers'],
},
'mods': (
{'field': 'bar', 'add': 'BQ'},
{'field': 'bar', 'del': 'BQ'},
)
}
}
]
})
def test_ignores_order_between_query_params():
case = make_case('get', '/foo?bar=BQ&baz=Cheers&zapf=1')
request = make_case('get', '/foo?baz=Cheers&bar=BQ&zapf=2')
db = subject.Database([case])
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailableQueryStringParamsetsReport)
suggestions.deltas |should| have(1).item
d = suggestions.deltas[0]
d[0] |should| respond_to('params')
d[0] |should| respond_to('mods')
d[0].params |should| equal_to({'zapf': ['1']})
d[0].mods |should| equal_to(({'field': 'zapf', 'chg': '2', 'to': '1'},))
# The as_jsonic_data is covered in test_wrong_query_param_value
pass
def test_wrong_path():
cases = [
make_case('get', '/food/hippopatamus'),
make_case('get', '/food'),
make_case('get', '/food/cat'),
make_case('get', '/food/goat'),
make_case('get', '/food/dog'),
make_case('get', '/food/pig'),
make_case('get', '/food/brachiosaurus'),
]
request = make_case('get', '/foo')
db = subject.Database(cases)
suggestions = db.best_matches(request)
suggestions |should| be_instance_of(subject.AvailablePathsReport)
suggestions.test_case_groups |should| have(5).items
tcgs = suggestions.test_case_groups
list(g[0] for g in tcgs) |should| include_all_of(c['url'] for c in cases[1:6])
suggestions.as_jsonic_data() |should| equal_to({
JsonDescStrings.GOOD_PATHS: [
('/food', []),
('/food/cat', []),
('/food/dog', []),
('/food/pig', []),
('/food/goat', []), # Note this is moved later in the list because of higher edit distance
]
})
def test_json_exchange_get_case():
case = {
'method': 'get',
'url': '/pet_name',
'response body': 'Fluffy',
}
db = subject.Database([case])
output = StringIO()
db.json_exchange(json.dumps(make_case('get', '/pet_name')), output)
output.tell() |should_not| equal_to(0)
output.seek(0)
result = json.load(output)
result |should| contain('response status')
list(result.items()) |should| include_all_of(case.items())
def test_json_exchange_miss_case():
db = subject.Database([
{
'method': 'post',
'url': '/pet_name',
'response body': 'Fluffy',
}
])
output = StringIO()
db.json_exchange(json.dumps(make_case('get', '/pet_name')), output)
output.tell() |should_not| equal_to(0)
output.seek(0)
result = json.load(output)
result |should_not| contain('response status')
def test_json_exchange_differentiate_on_addnl_field():
cases = [
{
'story': "Alice's pet",
'description': "Getting Alice's pet's name",
'method': 'get',
'url': '/pet_name',
'response body': 'Fluffy',
},
{
'story': "Bob's pet",
'description': "Getting Bob's pet's name",
'method': 'get',
'url': '/pet_name',
'response body': 'Max',
},
]
db = subject.Database(cases, add_request_keys=('story',))
base_request = make_case('get', '/pet_name')
def exchange_for_story(story):
output = StringIO()
db.json_exchange(
json.dumps(dict(base_request, story=story)),
output
)
output.tell() |should_not| equal_to(0)
output.seek(0)
return json.load(output)
result = exchange_for_story("Alice's pet")
result |should| contain('response status')
result['response body'] |should| equal_to('Fluffy')
result = exchange_for_story("Bob's pet")
result |should| contain('response status')
result['response body'] |should| equal_to('Max')
result = exchange_for_story("Charlie's pet")
result |should_not| contain('response status')
result |should| contain(JsonDescStrings.ADDNL_FIELDS_SETS)
result[JsonDescStrings.ADDNL_FIELDS_SETS] |should| include_all_of({'story': case['story']} for case in cases)
| 2.5 | 2 |
RegonAPI/consts/voivodeships.py | damianwasik98/RegonAPI | 10 | 12758699 | <filename>RegonAPI/consts/voivodeships.py
# Keys are ISO 3166-2:PL abbr.
VOIVODESHIPS = {
"DS": {"teryt": "02", "name_pl": "dolnośląskie"},
"KP": {"teryt": "04", "name_pl": "kujawsko-pomorskie"},
"LU": {"teryt": "06", "name_pl": "lubelskie"},
"LB": {"teryt": "08", "name_pl": "lubuskie"},
"LD": {"teryt": "10", "name_pl": "łódzkie"},
"MA": {"teryt": "12", "name_pl": "małopolskie"},
"MZ": {"teryt": "14", "name_pl": "mazowieckie"},
"OP": {"teryt": "16", "name_pl": "opolskie"},
"PK": {"teryt": "18", "name_pl": "podkarpackie"},
"PD": {"teryt": "20", "name_pl": "podlaskie"},
"PM": {"teryt": "22", "name_pl": "pomorskie"},
"SL": {"teryt": "24", "name_pl": "śląskie"},
"SK": {"teryt": "26", "name_pl": "świętokrzyskie"},
"WN": {"teryt": "28", "name_pl": "warmińsko-mazurskie"},
"WP": {"teryt": "30", "name_pl": "wielkopolskie"},
"ZP": {"teryt": "32", "name_pl": "zachodniopomorskie"},
}
| 1.507813 | 2 |
paz/processors/keypoints.py | SushmaDG/MaskRCNN | 1 | 12758700 | <filename>paz/processors/keypoints.py
from ..core import Processor
from ..core import ops
import numpy as np
class RenderSingleViewSample(Processor):
"""Renders a batch of images and puts them in the selected topic
# Arguments
renderer: Python object with ``render_sample'' method. This method
should render images and some labels of the image e.g.
matrices, depth, alpha_channel
It should output a list of length two containing a numpy
array of the image and a list having the labels in the
following order
(matrices, alpha_channel, depth_image)
Renderers are available in poseur.
"""
def __init__(self, renderer):
self.renderer = renderer
super(RenderSingleViewSample, self).__init__()
def call(self, kwargs=None):
image, (matrices, alpha_channel, depth) = self.renderer.render_sample()
world_to_camera = matrices[0].reshape(4, 4)
kwargs['image'] = image
kwargs['world_to_camera'] = world_to_camera
kwargs['alpha_mask'] = alpha_channel
kwargs['depth'] = depth
return kwargs
class RenderMultiViewSample(Processor):
"""Renders a batch of images and puts them in the selected topic
# Arguments
renderer: Python object with ``render_sample'' method. This method
should render images and some labels of the image e.g.
matrices, depth, alpha_channel
It should output a list of length two containing a numpy
array of the image and a list having the labels in the
following order
(matrices, alpha_channel, depth_image)
Renderers are available in poseur.
"""
def __init__(self, renderer):
self.renderer = renderer
super(RenderMultiViewSample, self).__init__()
def call(self, kwargs=None):
[image_A, image_B], labels = self.renderer.render_sample()
[matrices, alpha_A, alpha_B] = labels
# image_A, image_B = image_A / 255.0, image_B / 255.0
# alpha_A, alpha_B = alpha_A / 255.0, alpha_B / 255.0
alpha_A = np.expand_dims(alpha_A, -1)
alpha_B = np.expand_dims(alpha_B, -1)
alpha_masks = np.concatenate([alpha_A, alpha_B], -1)
kwargs['matrices'] = matrices
kwargs['image_A'] = image_A
kwargs['image_B'] = image_B
kwargs['alpha_channels'] = alpha_masks
return kwargs
class ConcatenateAlphaMask(Processor):
"""Concatenate ``alpha_mask`` to ``image``. Useful for changing background.
"""
def call(self, kwargs):
image, alpha_mask = kwargs['image'], kwargs['alpha_mask']
alpha_mask = np.expand_dims(alpha_mask, axis=-1)
kwargs['image'] = np.concatenate([image, alpha_mask], axis=2)
return kwargs
class ProjectKeypoints(Processor):
"""Renders a batch of images and puts them in the selected topic
# Arguments
projector:
keypoints:
"""
def __init__(self, projector, keypoints):
self.projector = projector
self.keypoints = keypoints
super(ProjectKeypoints, self).__init__()
def call(self, kwargs):
world_to_camera = kwargs['world_to_camera']
keypoints = np.matmul(self.keypoints, world_to_camera.T)
keypoints = np.expand_dims(keypoints, 0)
keypoints = self.projector.project(keypoints)[0]
kwargs['keypoints'] = keypoints
return kwargs
class DenormalizeKeypoints(Processor):
"""Transform normalized keypoint coordinates into image coordinates
"""
def __init__(self):
super(DenormalizeKeypoints, self).__init__()
def call(self, kwargs):
keypoints, image = kwargs['keypoints'], kwargs['image']
height, width = image.shape[0:2]
keypoints = ops.denormalize_keypoints(keypoints, height, width)
kwargs['keypoints'] = keypoints
return kwargs
class NormalizeKeypoints(Processor):
"""Transform keypoints in image coordinates to normalized coordinates
"""
def __init__(self):
super(NormalizeKeypoints, self).__init__()
def call(self, kwargs):
image, keypoints = kwargs['image'], kwargs['keypoints']
height, width = image.shape[0:2]
kwargs['keypoints'] = ops.normalize_keypoints(keypoints, height, width)
return kwargs
class RemoveKeypointsDepth(Processor):
"""Removes Z component from keypoints.
"""
def __init__(self):
super(RemoveKeypointsDepth, self).__init__()
def call(self, kwargs):
kwargs['keypoints'] = kwargs['keypoints'][:, :2]
return kwargs
class PartitionKeypoints(Processor):
"""Partitions keypoints from shape [num_keypoints, 2] into a list of the form
((2), (2), ....) and length equal to num_of_keypoints.
This is performed for tensorflow probablity
"""
def __init__(self):
super(PartitionKeypoints, self).__init__()
def call(self, kwargs):
keypoints = kwargs['keypoints']
keypoints = np.vsplit(keypoints, len(keypoints))
keypoints = [np.squeeze(keypoint) for keypoint in keypoints]
for keypoint_arg, keypoint in enumerate(keypoints):
kwargs['keypoint_%s' % keypoint_arg] = keypoint
return kwargs
class ChangeKeypointsCoordinateSystem(Processor):
"""Changes ``keypoints`` 2D coordinate system using ``box2D`` coordinates
to locate the new origin at the openCV image origin (top-left).
"""
def __init__(self):
super(ChangeKeypointsCoordinateSystem, self).__init__()
def call(self, kwargs):
box2D = kwargs['box2D']
x_min, y_min, x_max, y_max = box2D.coordinates
keypoints = kwargs['keypoints']
keypoints[:, 0] = keypoints[:, 0] + x_min
keypoints[:, 1] = keypoints[:, 1] + y_min
kwargs['keypoints'] = keypoints
return kwargs
| 2.6875 | 3 |
lib/utils/adb.py | vividmuse/frida-skeleton | 520 | 12758701 | <reponame>vividmuse/frida-skeleton
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lib.utils.shell import Shell
class Adb(Shell):
def __init__(self, serial):
super().__init__()
self.serial = serial
# if we are root shell
self.is_root = False
self.check_root()
@classmethod
def start_server(cls):
return Shell().exec('adb start-server', supress_error=True)
@classmethod
def devices(cls):
return Shell().exec('adb devices', quiet=True)
def check_root(self):
if self.unsafe_shell('whoami').out == 'root':
self.is_root = True
def root(self):
self.exec('adb -s "{}" root'.format(self.serial))
self.check_root()
def unsafe_shell(self, command, root=False, quiet=False):
return self.exec(r'''adb -s "{}" shell "{}{}"'''.format(
self.serial, 'su - -c ' if root and not self.is_root else '', command), quiet)
def push(self, src, dst):
return self.exec('adb -s "{}" push "{}" "{}"'.format(self.serial, src, dst))
def reverse(self, port):
return self.exec('adb -s "{0}" reverse tcp:{1} tcp:{1}'.format(self.serial, port))
def clear_reverse(self, remote_port):
return self.exec('adb -s "{}" reverse --remove tcp:{}'.format(self.serial, remote_port))
def forward(self, local_port, remote_port):
return self.exec('adb -s "{}" forward tcp:{} tcp:{}'.format(self.serial, local_port, remote_port))
def clear_forward(self, local_port):
return self.exec('adb -s "{}" forward --remove tcp:{}'.format(self.serial, local_port))
| 2.4375 | 2 |
modularTasks/utilities/readPupilLabsData.py | cpizzica/Lab-Matlab-Control | 6 | 12758702 | <filename>modularTasks/utilities/readPupilLabsData.py
import sys
import scipy.io as scpy
import numpy as np
import msgpack
from file_methods import *
#Python Script to read Eye Data, extract desired information and then create a .mat structure where rows are times, columns are:
# 1. timestamp
# 2. gaze x
# 3. gaze y
# 4. confidence
#input:
#sys.argv[1]: the filepath to the datafile
#sys.argv[2]: the desired name of the newly created .mat structure
# Use pupil-labs function to load data
data = load_pldata_file(sys.argv[1], sys.argv[2])
# Make matrix with samples as rows, columns as below
raw_data = np.zeros((len(data.data),6),dtype=np.object)
for q in range(len(data.data)):
raw_data[q][0] = data.data[q]['timestamp']
raw_data[q][1] = data.data[q]['norm_pos'][0]
raw_data[q][2] = data.data[q]['norm_pos'][1]
raw_data[q][3] = data.data[q]['confidence']
try:
raw_data[q][4] = data.data[q]['base_data'][0]['diameter']
raw_data[q][5] = data.data[q]['base_data'][1]['diameter']
except IndexError:
if data.data[q]['base_data'][0]['topic'] == 'pupil.0':
raw_data[q][4] = data.data[q]['base_data'][0]['diameter']
raw_data[q][5] = -1
else:
raw_data[q][4] = -1
raw_data[q][5] = data.data[q]['base_data'][0]['diameter']
# save in temporary file
scpy.savemat(sys.argv[3] +'.mat', {sys.argv[3]:raw_data}) | 2.84375 | 3 |
invenio_app_ils/records_relations/views.py | equadon/invenio-app-ils | 0 | 12758703 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio App ILS Records views."""
from __future__ import absolute_import, print_function
from flask import Blueprint, abort, current_app, request
from invenio_db import db
from invenio_records_rest.utils import obj_or_import_string
from invenio_records_rest.views import pass_record
from invenio_rest import ContentNegotiatedMethodView
from invenio_app_ils.documents.api import DOCUMENT_PID_TYPE
from invenio_app_ils.errors import RecordRelationsError
from invenio_app_ils.permissions import need_permissions
from invenio_app_ils.pidstore.pids import SERIES_PID_TYPE
from invenio_app_ils.records.api import IlsRecord
from invenio_app_ils.records_relations.indexer import RecordRelationIndexer
from invenio_app_ils.relations.api import Relation
from invenio_app_ils.records_relations.api import ( # isort:skip
RecordRelationsParentChild,
RecordRelationsSiblings,
)
def create_relations_blueprint(app):
"""Add relations views to the blueprint."""
def _add_resource_view(blueprint, pid_type, view_class):
"""Add a resource view for a rest endpoint."""
endpoints = app.config.get("RECORDS_REST_ENDPOINTS", [])
options = endpoints.get(pid_type, {})
default_media_type = options.get("default_media_type", "")
rec_serializers = options.get("record_serializers", {})
serializers = {
mime: obj_or_import_string(func)
for mime, func in rec_serializers.items()
}
record_relations = view_class.as_view(
view_class.view_name.format(pid_type),
serializers=serializers,
default_media_type=default_media_type,
)
blueprint.add_url_rule(
"{0}/relations".format(options["item_route"]),
view_func=record_relations,
methods=["POST", "DELETE"],
)
bp = Blueprint("invenio_app_ils_relations", __name__, url_prefix="")
_add_resource_view(bp, DOCUMENT_PID_TYPE, RecordRelationsResource)
_add_resource_view(bp, SERIES_PID_TYPE, RecordRelationsResource)
return bp
class RecordRelationsResource(ContentNegotiatedMethodView):
"""Relations views for a record."""
view_name = "{0}_relations"
def _get_record(self, record, pid, pid_type):
"""Return record if same PID or fetch the record."""
if record.pid == pid and record._pid_type == pid_type:
rec = record
else:
rec = IlsRecord.get_record_by_pid(pid, pid_type=pid_type)
return rec
def _validate_parent_child_creation_payload(self, payload):
"""Validate the payload when creating a new parent-child relation."""
try:
parent_pid = payload.pop("parent_pid")
parent_pid_type = payload.pop("parent_pid_type")
child_pid = payload.pop("child_pid")
child_pid_type = payload.pop("child_pid_type")
except KeyError as key:
raise RecordRelationsError(
"The `{}` is a required field".format(key)
)
return parent_pid, parent_pid_type, child_pid, child_pid_type, payload
def _create_parent_child_relation(self, record, relation_type, payload):
"""Create a Parent-Child relation.
Expected payload:
{
parent_pid: <pid_value>,
parent_pid_type: <pid_type>,
child_pid: <pid_value>,
child_pid_type: <pid_type>,
relation_type: "<Relation name>",
[volume: "<vol name>"]
}
"""
parent_pid, parent_pid_type, child_pid, child_pid_type, metadata = self._validate_parent_child_creation_payload(
payload
)
# fetch parent and child. The passed record should be one of the two
parent = self._get_record(record, parent_pid, parent_pid_type)
child = self._get_record(record, child_pid, child_pid_type)
rr = RecordRelationsParentChild()
modified_record = rr.add(
parent=parent, child=child, relation_type=relation_type, **metadata
)
return modified_record, parent, child
def _delete_parent_child_relation(self, record, relation_type, payload):
"""Delete a Parent-Child relation.
Expected payload:
{
parent_pid: <pid_value>,
parent_pid_type: <pid_type>,
child_pid: <pid_value>,
child_pid_type: <pid_type>,
relation_type: "<Relation name>"
}
"""
parent_pid, parent_pid_type, child_pid, child_pid_type, _ = self._validate_parent_child_creation_payload(
payload
)
# fetch parent and child. The passed record should be one of the two
parent = self._get_record(record, parent_pid, parent_pid_type)
child = self._get_record(record, child_pid, child_pid_type)
rr = RecordRelationsParentChild()
modified_record = rr.remove(
parent=parent, child=child, relation_type=relation_type
)
return modified_record, parent, child
def _validate_siblings_creation_payload(self, payload):
"""Validate the payload when creating a new siblings relation."""
try:
pid = payload.pop("pid")
pid_type = payload.pop("pid_type")
except KeyError as key:
raise RecordRelationsError(
"The `{}` is a required field".format(key)
)
return pid, pid_type, payload
def _create_sibling_relation(self, record, relation_type, payload):
"""Create a Siblings relation from current record to the given PID.
Expected payload:
{
pid: <pid_value>,
pid_type: <pid_type>,
relation_type: "<Relation name>",
[note: "<note>"]
}
"""
pid, pid_type, metadata = self._validate_siblings_creation_payload(
payload
)
if pid == record["pid"] and pid_type == record._pid_type:
raise RecordRelationsError(
"Cannot create a relation for PID `{}` with itself".format(pid)
)
second = IlsRecord.get_record_by_pid(pid, pid_type=pid_type)
rr = RecordRelationsSiblings()
modified_record = rr.add(
first=record,
second=second,
relation_type=relation_type,
**metadata
)
return modified_record, record, second
def _delete_sibling_relation(self, record, relation_type, payload):
"""Delete a Siblings relation from current record to the given PID.
Expected payload:
{
pid: <pid_value>,
pid_type: <pid_type>,
relation_type: "<Relation name>"
}
"""
pid, pid_type, metadata = self._validate_siblings_creation_payload(
payload
)
if pid == record["pid"] and pid_type == record._pid_type:
raise RecordRelationsError(
"Cannot create a relation for PID `{}` with itself".format(pid)
)
second = IlsRecord.get_record_by_pid(pid, pid_type=pid_type)
rr = RecordRelationsSiblings()
modified_record, _ = rr.remove(
first=record, second=second, relation_type=relation_type
)
return modified_record, record, second
@pass_record
@need_permissions("relations-create")
def post(self, record, **kwargs):
"""Create a new relation."""
def create(payload):
try:
relation_type = payload.pop("relation_type")
except KeyError as key:
return abort(400, "The `{}` is a required field".format(key))
rt = Relation.get_relation_by_name(relation_type)
if rt in current_app.config["PARENT_CHILD_RELATION_TYPES"]:
modified, first, second = self._create_parent_child_relation(
record, rt, payload
)
elif rt in current_app.config["SIBLINGS_RELATION_TYPES"]:
modified, first, second = self._create_sibling_relation(
record, rt, payload
)
else:
raise RecordRelationsError(
"Invalid relation type `{}`".format(rt.name)
)
db.session.commit()
records_to_index.append(first)
records_to_index.append(second)
# if the record is the modified, return the modified version
if (
modified.pid == record.pid
and modified._pid_type == record._pid_type
):
return modified
return record
records_to_index = []
actions = request.get_json()
if not isinstance(actions, list):
actions = [actions]
for action in actions:
record = create(action)
# Index both parent/child (or first/second)
RecordRelationIndexer().index(record, *records_to_index)
return self.make_response(record.pid, record, 201)
@pass_record
@need_permissions("relations-delete")
def delete(self, record, **kwargs):
"""Delete an existing relation."""
def delete(payload):
try:
relation_type = payload.pop("relation_type")
except KeyError as key:
return abort(400, "The `{}` is a required field".format(key))
rt = Relation.get_relation_by_name(relation_type)
if rt in current_app.config["PARENT_CHILD_RELATION_TYPES"]:
modified, first, second = self._delete_parent_child_relation(
record, rt, payload
)
elif rt in current_app.config["SIBLINGS_RELATION_TYPES"]:
modified, first, second = self._delete_sibling_relation(
record, rt, payload
)
else:
raise RecordRelationsError(
"Invalid relation type `{}`".format(rt.name)
)
db.session.commit()
records_to_index.append(first)
records_to_index.append(second)
# if the record is the modified, return the modified version
if (
modified.pid == record.pid
and modified._pid_type == record._pid_type
):
return modified
return record
records_to_index = []
actions = request.get_json()
if not isinstance(actions, list):
actions = [actions]
for action in actions:
record = delete(action)
# Index both parent/child (or first/second)
RecordRelationIndexer().index(record, *records_to_index)
return self.make_response(record.pid, record, 200)
| 1.851563 | 2 |
cw/filters/unscented_kalman_filter_new.py | aarondewindt/cw | 1 | 12758704 | <reponame>aarondewindt/cw
import numpy as np
import xarray as xr
# import sympy as sp
import control as ct
from typing import Callable, Optional, Sequence, Union, Tuple
from collections import deque
from textwrap import indent
from tqdm.auto import tqdm
from scipy.linalg import expm
from IPython.display import display, Markdown
from scipy.signal import cont2discrete
import html2text
from cw.unscented_transform import UnscentedTransform
from cw.vdom import div, h3, style, dl, dt, dd, b, pre
class UnscentedKalmanFilter:
def __init__(self, *,
x_names: Sequence[str],
u_names: Sequence[str],
z_names: Sequence[str],
f: Callable[[float, np.ndarray, np.ndarray], np.ndarray],
f_alpha=1,
f_beta=2,
f_k=None,
h: Callable[[float, np.ndarray, np.ndarray], np.ndarray],
h_alpha=1,
h_beta=2,
h_k=None,
g: Callable[[float, np.ndarray, np.ndarray], np.ndarray]):
# Functions
self.f = f
self.h = h
self.g = g
self.x_names = tuple(x_names)
self.u_names = tuple(u_names)
self.z_names = tuple(z_names)
# Unscented transformation parameters
self.f_alpha = f_alpha
self.f_beta = f_beta
self.f_k = 3 - len(self.x_names) if f_k is None else f_k
self.h_k = 3 - len(self.x_names) if h_k is None else h_k
self.h_alpha = h_alpha
self.h_beta = h_beta
def __repr__(self):
return html2text.html2text(self._repr_html_())
def _repr_html_(self):
return div(
h3("UnscentedKalmanFilter"),
div(style(margin_left="2em"),
dl(
dt(b("x_names")), dd(", ".join(self.x_names)),
dt(b("z_names")), dd(", ".join(self.z_names)),
dt(b("f_alpha")), dd(self.f_alpha),
dt(b("f_beta")), dd(self.f_beta),
dt(b("f_k")), dd(self.f_k),
dt(b("h_alpha")), dd(self.h_alpha),
dt(b("h_beta")), dd(self.h_beta),
dt(b("h_k")), dd(self.h_k),
))
).to_html()
def sim(self,
*,
data: xr.Dataset,
system_noise: Sequence[float],
system_bias: Sequence[float],
measurement_noise: Sequence[float],
measurement_bias: Sequence[float],
inplace=True) -> xr.Dataset:
if not inplace:
data = data.copy()
initial_conditions = []
inputs = []
states_idxs = []
inputs_idxs = []
for idx, variable_name in enumerate(self.x_names):
if variable_name in data:
variable: xr.DataArray = data[variable_name]
if variable.dims == ("t",):
inputs.append(variable.values)
inputs_idxs.append(idx)
elif variable.dims == ():
initial_conditions.append(variable.item())
states_idxs.append(idx)
else:
raise ValueError("Dataset must contain an initial condition or full state values for all states.")
initial_conditions = np.array(initial_conditions)
inputs = np.array(inputs).T
states_idxs = states_idxs
inputs_idxs = inputs_idxs
n_x = len(self.x_names)
n_z = len(self.z_names)
# Check inputs.
assert len(system_noise) == n_x, \
"Length of system_noise must be equal to the number of system states."
assert len(system_bias) == n_x, "Length of system_bias must be equal to the number of system states."
assert len(measurement_noise) == n_z, \
"Length of measurement_noise must be equal to the number of measurements."
assert len(measurement_bias) == n_z, \
"Length of measurement_bias must be equal to the number of measurements."
# Make sure the noise variances and biases are in flattened ndarrays.
system_noise = np.asarray(system_noise).flatten()
system_bias = np.asarray(system_bias).flatten()
measurement_noise = np.asarray(measurement_noise).flatten()
measurement_bias = np.asarray(measurement_bias).flatten()
# Get time vector
time_vector = data.t.values
n_t = len(time_vector)
# Iterator that gives the final input for each iteration.
inputs_final_iterator = iter(inputs)
next(inputs_final_iterator)
# Create iterator that will give the final time of each iteration.
time_final_iter = iter(time_vector)
next(time_final_iter)
# Create matrix in which to store results and set initial state
x_log = np.empty((n_t, n_x))
x_log.fill(np.nan)
# Create measurement log
z_log = np.empty((n_t, n_z))
z_log.fill(np.nan)
# Set initial values
x_log[0, states_idxs] = initial_conditions
x_log[0, inputs_idxs] = inputs[0, :]
z_log[0, :] = self.h(0.0, x_log[0, :]).flatten() \
+ measurement_noise * np.random.normal(size=(n_z,)) + measurement_bias
# Iterate though each point in time and integrate using an euler integrator.
for i, (t_i, t_f, x_i, u_i, u_f) in enumerate(zip(time_vector, time_final_iter,
x_log,
inputs, inputs_final_iterator)):
x_i = x_log[i, :]
f = self.f(t_i, x_i).flatten()
g = self.g(t_i, x_i)
# Calculate derivative
dx = f + (g @ (system_noise * np.random.normal(size=(n_x,)))).flatten() + system_bias
x_f = x_i + dx * (t_f - t_i)
x_f[inputs_idxs] = u_f
# Store result
x_log[i+1, :] = x_f
z_log[i+1, :] = \
self.h(t_f, x_f).flatten() \
+ measurement_noise * np.random.normal(size=(n_z,)) + measurement_bias
return data.merge(xr.Dataset(
data_vars={
**{self.x_names[x_idx]: (('t',), x_log[:, x_idx]) for x_idx in range(n_x)},
**{self.z_names[z_idx]: (('t',), z_log[:, z_idx]) for z_idx in range(n_z)},
}
))
def filter(self,
data: xr.Dataset,
x_0: Union[np.ndarray, Sequence[float]],
p_0: Union[np.ndarray, Sequence[Sequence[float]]],
q: Union[np.ndarray, Sequence[Sequence[float]]],
r: Union[np.ndarray, Sequence[Sequence[float]]],
verbose=False):
cols_set = set(data.data_vars)
n_x = len(self.x_names)
n_z = len(self.z_names)
n_u = len(self.u_names)
# Make sure p_0, Q and R are ndarray
p_0 = np.asarray(p_0)
q = np.asarray(q)
r = np.asarray(r)
assert set(self.z_names) <= cols_set, f"Missing measurements in the data frame.\n{', '.join(set(self.z_names) - cols_set)}"
assert len(x_0) == n_x, "x_0 must have the same size as the number of states."
assert p_0.shape == (n_x, n_x), "p_0 must be a square matrix whose sides must have the same " \
"length as the number of states."
assert r.shape == (n_z, n_z), "The measurement noise covariance matrix 'R' needs to be an nxn matrix, where " \
"n is the number of measurements."
# Make sure x_0 is a flattened ndarray.
x_0 = np.asarray(x_0).flatten()
# Get time vector.
time_vector = data.t.values
# Get matrix with measurements and inputs.
u_log = np.vstack([data[u_name].values for u_name in self.u_names]).T if n_u else [[]] * len(time_vector)
z_log = np.vstack([data[z_name].values for z_name in self.z_names]).T
# Create iterator that will give the final time of each iteration.
t_f_iter = iter(time_vector)
next(t_f_iter)
# Create logs
x_k1k1_log = deque((x_0,), maxlen=len(time_vector) + 2)
p_k1k1_log = deque((p_0,), maxlen=len(time_vector) + 2)
x_kk1_log = deque((x_0,), maxlen=len(time_vector) + 2)
p_kk1_log = deque((p_0,), maxlen=len(time_vector) + 2)
t_i = np.nan
t_f = np.nan
u_k = np.nan
def f_eval(x):
return rk4(self.f, x.flatten(), u_k, t_i, t_f)
def h_eval(x):
return self.h(t_i, x.flatten(), u_k)
f_ut = UnscentedTransform(f_eval, self.f_alpha, self.f_beta, self.f_k)
h_ut = UnscentedTransform(h_eval, self.h_alpha, self.h_beta, self.h_k)
for k, (t_i, t_f, u_k, z_k) in tqdm(enumerate(zip(time_vector, t_f_iter, u_log, z_log)),
total=len(time_vector)-1,
disable=not verbose):
x_kk = x_k1k1_log[k]
p_kk = p_k1k1_log[k]
# State and covariance matrix prediction
x_kk1, p_kk1, _ = f_ut(x_kk, p_kk)
p_kk1 += q
# Update
z_p, s_k, c_k = h_ut(x_kk1, p_kk1)
s_k += r
# Calculate Kalman gain
k_gain = c_k @ np.linalg.pinv(s_k)
x_k1k1 = x_kk1 - (k_gain @ (z_k - z_p))
p_k1k1 = p_kk1 - (k_gain @ (s_k @ k_gain.T))
# Store results
x_k1k1_log.append(x_k1k1)
p_k1k1_log.append(p_k1k1)
x_kk1_log.append(x_kk1)
p_kk1_log.append(p_kk1)
# Add results to the dataset and return it
data["p_k1k1"] = (("t", "dim_0", "dim_1"), np.array(p_k1k1_log))
data["x_k1k1"] = (("t", "x_idx"), np.array(x_k1k1_log))
data["p_kk1"] = (("t", "dim_0", "dim_1"), np.array(p_kk1_log))
data["x_kk1"] = (("t", "x_idx"), np.array(x_kk1_log))
for x_idx, x_name in enumerate(self.x_names):
data[f"{x_name}_filtered"] = data.x_k1k1.isel(x_idx=x_idx)
data[f"{x_name}_filtered_std"] = data.p_k1k1.isel(dim_0=x_idx, dim_1=x_idx)
return data
def rk4(f: Callable,
x_0: Union[Sequence[float], np.ndarray],
u: Union[Sequence[float], np.ndarray],
t_i: float,
t_f: float,
n: int=1):
"""
Fourth order Runge-Kutta integration.
:param f: Callable returning the state derivatives. All parameters are scalars, the
first parameter should be the time, followed by the current states and
then the inputs.
:param x_0: Sequence of initial state.
:param u: Input vector
:param t_i: Initial time
:param t_f: Final time
:param n: Number of iterations.
:return: Final state vector.
"""
w = np.asarray(x_0, dtype=np.float64).flatten()
t = t_i
h = (t_f - t_i) / n
for j in range(1, n+1):
k1 = h * f(t, w, u).flatten()
k2 = h * f(t + h/2., (w+k1/2.), u).flatten()
k3 = h * f(t + h/2., (w+k2/2.), u).flatten()
k4 = h * f(t + h, (w+k3), u).flatten()
w += (k1 + 2. * k2 + 2. * k3 + k4) / 6.0
t = t_i + j * h
return w
| 1.992188 | 2 |
wapttools/create.py | lrobinot/wapt-tools | 1 | 12758705 | <reponame>lrobinot/wapt-tools<filename>wapttools/create.py<gh_stars>1-10
import os
import sys
import gitlab
import random
from pkg_resources import resource_string
badge = '[]({url}/wapt/packages/{package}/-/pipelines?&ref={branch})\n' # noqa
def creator(package, verbose=False):
gl = gitlab.Gitlab.from_config()
if verbose:
print('Gitlab url: {}'.format(gl.url))
package_name = os.path.basename(package)
package_folder = os.path.dirname(package)
# Check if project exists
projects = gl.projects.list(search=package_name)
if len(projects) > 0:
for project in projects:
if project.name == package_name:
if verbose:
print('*** ERROR: project {} already exists, aborting.'.format(package_name))
sys.exit(1)
if os.path.isdir(package_folder):
os.makedirs(package_folder)
if os.path.isdir(package):
if verbose:
print('*** ERROR: folder {} already exists, aborting.'.format(package))
sys.exit(1)
wapt_group = gl.groups.list(search='wapt')[0]
packages_group = wapt_group.subgroups.list(search='packages')[0]
project = gl.projects.create({'name': package_name, 'namespace_id': packages_group.id})
if package_folder != '':
os.chdir(package_folder)
command = 'git clone {}:wapt/packages/{}'.format(gl.url.replace('https://', 'git@'), package_name)
if verbose:
print('* {}'.format(command))
os.system(command)
os.chdir(package_name)
with open('README.md', 'w') as file:
file.write('# WAPT {} package\n'.format(package_name))
file.write('\n')
file.write(badge.format(text='PROD', branch='master', url=gl.url, package=package_name))
file.write(badge.format(text='DEV', branch='develop', url=gl.url, package=package_name))
os.system('git add -A')
os.system('git commit -m "Add README.md" README.md')
os.system('git push')
os.system('git flow init -d')
os.mkdir('.vscode')
with open(os.path.join('.vscode', 'extensions.json'), 'w') as file:
file.write(resource_string('wapttools.data', 'vscode_extensions.json'))
with open(os.path.join('.vscode', 'launch.json'), 'w') as file:
file.write(resource_string('wapttools.data', 'vscode_launch.json'))
with open(os.path.join('.vscode', 'settings.json'), 'w') as file:
file.write(resource_string('wapttools.data', 'vscode_settings.json'))
os.mkdir('WAPT')
with open(os.path.join('WAPT', 'control'), 'w') as file:
file.write(resource_string('wapttools.data', 'wapt_control.txt').format(url=gl.url, package=package_name))
os.mkdir('config')
open(os.path.join('config', '.gitkeep'), 'a').close()
os.mkdir('sources')
with open(os.path.join('sources', '.gitignore'), 'w') as file:
file.write('*\n')
file.write('!.gitignore\n')
with open('.editorconfig', 'w') as file:
file.write(resource_string('wapttools.data', 'dot.editorconfig'))
with open('.env', 'w') as file:
file.write('PYTHONHOME=C:\\Program Files (x86)\\wapt\n')
file.write('PYTHONPATH=C:\\Program Files (x86)\\wapt\n')
with open('.gitignore', 'w') as file:
file.write('# Ignore genrated files\n')
file.write('*.pyc\n')
file.write('WAPT/certificate.crt\n')
file.write('WAPT/*.sha256\n')
file.write('WAPT/wapt.psproj\n')
with open('.gitlab-ci.yml', 'w') as file:
file.write('include:\n')
file.write(' - project: \'wapt/packages/template\'\n')
file.write(' file: \'/ci/build.yml\'\n')
with open('version-check.json', 'w') as file:
file.write(resource_string('wapttools.data', 'version-check.json'))
script = resource_string('wapttools.data', 'setup.tmpl')
with open('setup.py', 'w') as file:
file.write(script)
os.system('git add .')
os.system('git commit -m "Skeleton" -a')
os.system('git push --all')
# Create schedule named AutoVersionChecker
scheds = project.pipelineschedules.list()
found = False
for sched in scheds:
if sched.description == 'AutoVersionChecker':
found = True
if not found:
sched = project.pipelineschedules.create({
'ref': 'develop',
'description': 'AutoVersionChecker',
'cron_timezone': 'Europe/Paris',
'cron': '{} 5 * * *'.format(random.randint(0, 59)),
'active': True
})
| 2.40625 | 2 |
ENCODN/TOOLS/COMMUNICATION/TELECOM/DECABIT/DECABIT_FRAME.py | akshitadixit/ENCODN | 6 | 12758706 | from tkinter import *
from tkinter import ttk
def DECABIT_FRAME(master=None):
s = ttk.Style(master)
s.theme_use('awdark') | 1.976563 | 2 |
polls/serializers.py | awsbreathpanda/drfsite | 0 | 12758707 | <filename>polls/serializers.py<gh_stars>0
from rest_framework import serializers
from polls.models import Question, Choice
class QuestionSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Question
fields = '__all__'
class ChoiceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Choice
fields = '__all__'
| 2.140625 | 2 |
enigma.py | vmonaco/enigma | 2 | 12758708 | <reponame>vmonaco/enigma
'''
Created on Aug 4, 2012
@author: vinnie
'''
class Rotor(object):
def __init__(self, symbols, permutation):
'''
'''
self.states = []
self.inverse_states = []
self.n_symbols = len(symbols)
for i in range(self.n_symbols):
self.states.append({symbols[j]: permutation[(j + i) % self.n_symbols] for j in range(self.n_symbols)})
self.inverse_states.append(
{permutation[(j + i) % self.n_symbols]: symbols[j] for j in range(self.n_symbols)})
self.odometer = 0
return
def state(self):
'''
Get the current encryption state
'''
return self.states[self.odometer]
def inverse_state(self):
'''
Get the current decryption state
'''
return self.inverse_states[self.odometer]
def step(self):
'''
Advance the rotor by one step.
This is equivalent to shifting the offsets
'''
self.odometer = (self.odometer + 1) % self.n_symbols
return
def setOdometer(self, position):
'''
'''
self.odometer = position % self.n_symbols
def permute(self, symbol):
'''
Encrypt a symbol in the current state
'''
return self.states[self.odometer][symbol]
def invert(self, symbol):
'''
Decrypt a symbol in the current state
'''
return self.inverse_states[self.odometer][symbol]
def __str__(self, *args, **kwargs):
output = "Permute\tInvert\n"
for k in self.states[self.odometer].keys():
output += "%s => %s\t%s => %s\n" \
% (str(k), self.states[self.odometer][k],
str(k), self.inverse_states[self.odometer][k])
return output
class Enigma(object):
'''
The Enigma cipher
'''
def __init__(self, rotors, reflector):
'''
'''
self.stecker = {}
self.dec_stecker = {}
self.rotors = rotors # rotors go from left to right
self.reflector = reflector
self.dec_reflector = {reflector[s]: s for s in reflector.keys()}
self.odometer_start = []
def configure(self, stecker, odometer_start):
'''
'''
assert len(odometer_start) == len(self.rotors) - 1
self.stecker = stecker
self.dec_stecker = {stecker[s]: s for s in stecker.keys()}
self.odometer_start = odometer_start
return
def set_rotor_positions(self, rotor_positions):
'''
'''
for r, p in zip(self.rotors, rotor_positions):
r.setOdometer(p)
return
def get_rotor_positions(self):
'''
'''
return [r.odometer for r in self.rotors]
def step_to(self, P):
for i in range(P):
self.step_rotors()
return
def step_rotors(self):
'''
'''
# step the rightmost rotor
self.rotors[0].step()
# step the remaining rotors in an odometer-like fashion
for i in range(len(self.odometer_start)):
if self.rotors[i + 1].odometer == self.odometer_start[i]:
self.rotors[i + 1].step()
return
def translate_rotors(self, c):
'''
'''
for r in self.rotors:
c = r.permute(c)
c = self.reflector[c]
for r in reversed(self.rotors):
c = r.invert(c)
return c
def encrypt(self, str):
'''
'''
enc = ""
for c in str:
e = self.stecker[c]
e = self.translate_rotors(e)
e = self.stecker[e]
self.step_rotors()
enc += e
return enc
def decrypt(self, enc):
'''
The same function is used to both encrypt and decrypt.
'''
return self.encrypt(enc)
def __str__(self, *args, **kwargs):
output = ""
for s in sorted(self.reflector.keys()):
output += "%s => %s | " % (str(s), self.stecker[s])
for r in self.rotors:
output += "%s => %s " % (str(s), r.permute(s))
output += "%s => %s | " % (str(s), r.invert(s))
output += "%s => %s" % (str(s), self.reflector[s])
output += "\n"
return output
| 3.421875 | 3 |
blender_bindings/operators/flex_operators.py | BlenderAddonsArchive/SourceIO | 0 | 12758709 | import bpy
from bpy.props import (StringProperty,
BoolProperty,
CollectionProperty,
IntProperty,
FloatProperty,
PointerProperty
)
from .shared_operators import UITools
from ...library.source1.mdl.structs.flex import FlexController
def update_max_min(self: 'SourceIO_PG_FlexController', _):
if self.stereo:
if self['valuen_left'] >= self.value_max:
self['valuen_left'] = self.value_max
if self['valuen_left'] <= self.value_min:
self['valuen_left'] = self.value_min
if self['valuen_right'] >= self.value_max:
self['valuen_right'] = self.value_max
if self['valuen_right'] <= self.value_min:
self['valuen_right'] = self.value_min
else:
if self['valuen'] >= self.value_max:
self['valuen'] = self.value_max
if self['valuen'] <= self.value_min:
self['valuen'] = self.value_min
# noinspection PyPep8Naming
class SourceIO_PG_FlexController(bpy.types.PropertyGroup):
name: StringProperty()
stereo: BoolProperty(name="stereo")
value_max: FloatProperty(name='max')
value_min: FloatProperty(name='min')
mode: IntProperty(name='mode')
valuen: FloatProperty(name="value", min=-100.0, max=100.0, update=update_max_min)
valuezo: FloatProperty(name="value", min=0.0, max=1.0)
valuenoo: FloatProperty(name="value", min=-1.0, max=1.0)
valuenoz: FloatProperty(name="value", min=-1.0, max=0.0)
valuen_left: FloatProperty(name="value_left", min=-100.0, max=100.0, update=update_max_min)
valuezo_left: FloatProperty(name="value_left", min=0.0, max=1.0)
valuenoo_left: FloatProperty(name="value_left", min=-1.0, max=1.0)
valuenoz_left: FloatProperty(name="value_left", min=-1.0, max=0.0)
valuen_right: FloatProperty(name="value_right", min=-100.0, max=100.0, update=update_max_min)
valuezo_right: FloatProperty(name="value_right", min=0.0, max=1.0)
valuenoo_right: FloatProperty(name="value_right", min=-1.0, max=1.0)
valuenoz_right: FloatProperty(name="value_right", min=-1.0, max=0.0)
def set_from_controller(self, controller: FlexController):
self.value_min = controller.min
self.value_max = controller.max
if controller.min == 0.0 and controller.max == 1.0:
self.mode = 1
elif controller.min == -1.0 and controller.max == 1.0:
self.mode = 2
elif controller.min == -1.0 and controller.max == 0:
self.mode = 3
else:
self.mode = 0
def get_slot_name(self):
if self.mode == 0:
return 'valuen'
elif self.mode == 1:
return 'valuezo'
elif self.mode == 2:
return 'valuenoo'
elif self.mode == 3:
return 'valuenoz'
@property
def value(self):
if self.mode == 0:
return self.valuen
elif self.mode == 1:
return self.valuezo
elif self.mode == 2:
return self.valuenoo
elif self.mode == 3:
return self.valuenoz
@value.setter
def value(self, new_value):
if self.mode == 0:
self.valuen = new_value
elif self.mode == 1:
self.valuezo = new_value
elif self.mode == 2:
self.valuenoo = new_value
elif self.mode == 3:
self.valuenoz = new_value
@property
def value_right(self):
if self.mode == 0:
return self.valuen_right
elif self.mode == 1:
return self.valuezo_right
elif self.mode == 2:
return self.valuenoo_right
elif self.mode == 3:
return self.valuenoz_right
@value_right.setter
def value_right(self, new_value):
if self.mode == 0:
self.valuen_right = new_value
elif self.mode == 1:
self.valuezo_right = new_value
elif self.mode == 2:
self.valuenoo_right = new_value
elif self.mode == 3:
self.valuenoz_right = new_value
@property
def value_left(self):
if self.mode == 0:
return self.valuen_left
elif self.mode == 1:
return self.valuezo_left
elif self.mode == 2:
return self.valuenoo_left
elif self.mode == 3:
return self.valuenoz_left
@value_left.setter
def value_left(self, new_value):
if self.mode == 0:
self.valuen_left = new_value
elif self.mode == 1:
self.valuezo_left = new_value
elif self.mode == 2:
self.valuenoo_left = new_value
elif self.mode == 3:
self.valuenoz_left = new_value
def draw_item(self, layout, icon):
split = layout.split(factor=0.3, align=True)
split.label(text=self.name, icon_value=icon)
# layout.prop(controller_entry, "name", text="", emboss=False, icon_value=icon)
if self.stereo:
row = split.row()
if self.mode == 0:
row.prop(self, 'valuen_left', text='', slider=True)
row.prop(self, 'valuen_right', text='', slider=True)
elif self.mode == 1:
row.prop(self, 'valuezo_left', text='', slider=True)
row.prop(self, 'valuezo_right', text='', slider=True)
elif self.mode == 2:
row.prop(self, 'valuenoo_left', text='', slider=True)
row.prop(self, 'valuenoo_right', text='', slider=True)
elif self.mode == 3:
row.prop(self, 'valuenoz_left', text='', slider=True)
row.prop(self, 'valuenoz_right', text='', slider=True)
else:
if self.mode == 0:
split.prop(self, 'valuen', text='', slider=True)
elif self.mode == 1:
split.prop(self, 'valuezo', text='', slider=True)
elif self.mode == 2:
split.prop(self, 'valuenoo', text='', slider=True)
elif self.mode == 3:
split.prop(self, 'valuenoz', text='', slider=True)
class SOURCEIO_PT_FlexControlPanel(UITools, bpy.types.Panel):
bl_label = 'Flex controllers'
bl_idname = 'sourceio.flex_control_panel'
bl_parent_id = "sourceio.utils"
@classmethod
def poll(cls, context):
obj = context.active_object # type:bpy.types.Object
return obj and obj.data.flex_controllers is not None
def draw(self, context):
obj = context.active_object # type:bpy.types.Object
self.layout.template_list("SourceIO_UL_FlexControllerList", "",
obj.data, "flex_controllers",
obj.data, "flex_selected_index")
class SourceIO_UL_FlexControllerList(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname):
operator = data
controller_entry: SourceIO_PG_FlexController = item
layout.use_property_decorate = True
if self.layout_type in {'DEFAULT', 'COMPACT'}:
controller_entry.draw_item(layout, icon)
elif self.layout_type in {'GRID'}:
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
classes = (
SourceIO_PG_FlexController,
SourceIO_UL_FlexControllerList,
SOURCEIO_PT_FlexControlPanel,
)
| 2.328125 | 2 |
imp_flask/models/pos.py | thijsmie/imp_flask | 1 | 12758710 | <reponame>thijsmie/imp_flask<filename>imp_flask/models/pos.py
from imp_flask.models.helpers import Base, many_to_many
from sqlalchemy import Column, String, Integer, DateTime, ForeignKey
from sqlalchemy.orm import relationship
class PosInstance(Base):
name = Column(String(80))
posusers = many_to_many('posusers', 'PosInstance', 'User')
class PosSellable(Base):
pos_id = Column(ForeignKey('posinstance.id'))
pos = relationship('PosInstance')
product_id = Column(ForeignKey('product.id'))
product = relationship('Product')
string_product = Column(String(80))
price = Column(Integer)
identifier = Column(String(256))
class PosSale(Base):
pos_sellable_id = Column(ForeignKey('possellable.id'))
pos_sellable = relationship('PosSellable')
timeofsale = Column(DateTime)
amount = Column(Integer)
value = Column(Integer)
| 2.59375 | 3 |
MXXXPXXX/sg/managers.py | seokzin/minepost | 4 | 12758711 | from django.contrib.auth.base_user import BaseUserManager
class UserManager(BaseUserManager):
use_in_migrations = True
def create_user(self, name, email, userid=None, password=<PASSWORD>):
if not email:
raise ValueError('must have user email')
user = self.model(
email=self.normalize_email(email),
name=name,
userid=userid,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, name, email, userid, password=<PASSWORD>):
user = self.create_user(
email=self.normalize_email(email),
name=name,
userid=userid,
)
user.is_admin = True
user.is_superuser = True
user.is_staff = True
user.set_password(password)
user.save(using=self._db)
return user
| 2.59375 | 3 |
src/platform/weblogic/deployers/webs_deploy.py | 0x27/clusterd | 539 | 12758712 | <filename>src/platform/weblogic/deployers/webs_deploy.py
from src.platform.weblogic.interfaces import WINTERFACES
import src.platform.weblogic.deployers.web_deploy as web_deploy
versions = ["10", "11", "12"]
title = WINTERFACES.WLS
def deploy(fingerengine, fingerprint):
return web_deploy.deploy(fingerengine, fingerprint)
| 1.554688 | 2 |
lib/daemonlog.py | oddeyeco/oe-proxy | 1 | 12758713 | import time
import lib.getconfig
import logging.handlers
log_file = lib.getconfig.getparam('daemon', 'log_file')
backupcount = int(lib.getconfig.getparam('daemon', 'log_rotate_seconds'))
seconds = int(lib.getconfig.getparam('daemon', 'log_rotate_backups'))
log = logging.handlers.TimedRotatingFileHandler(log_file, 's', seconds, backupCount=backupcount)
log.setLevel(logging.INFO)
logger = logging.getLogger('main')
logger.addHandler(log)
logger.setLevel(logging.INFO)
logger.propagate = False
def print_message(message):
mssg = str(time.strftime("[%F %H %M:%S] ")) + message
logger.info(mssg)
| 2.65625 | 3 |
allennlp/semparse/util.py | entslscheia/allennlp | 0 | 12758714 | from typing import List
def lisp_to_nested_expression(lisp_string: str) -> List:
"""
Takes a logical form as a lisp string and returns a nested list representation of the lisp.
For example, "(count (division first))" would get mapped to ['count', ['division', 'first']].
"""
stack: List = []
current_expression: List = []
tokens = lisp_string.split()
for token in tokens:
while token[0] == "(":
nested_expression: List = []
current_expression.append(nested_expression)
stack.append(current_expression)
current_expression = nested_expression
token = token[1:]
current_expression.append(token.replace(")", ""))
while token[-1] == ")":
current_expression = stack.pop()
token = token[:-1]
return current_expression[0]
| 4 | 4 |
chainer_/datasets/cifar10_cls_dataset.py | naviocean/imgclsmob | 2,649 | 12758715 | <filename>chainer_/datasets/cifar10_cls_dataset.py<gh_stars>1000+
"""
CIFAR-10 classification dataset.
"""
import os
import numpy as np
from chainer.dataset import DatasetMixin
from chainer.datasets.cifar import get_cifar10
from chainercv.transforms import random_crop
from chainercv.transforms import random_flip
from .dataset_metainfo import DatasetMetaInfo
class CIFAR10(DatasetMixin):
"""
CIFAR-10 image classification dataset.
Parameters:
----------
root : str, default '~/.chainer/datasets/cifar10'
Path to temp folder for storing data.
mode : str, default 'train'
'train', 'val', or 'test'.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
root=os.path.join("~", ".chainer", "datasets", "cifar10"),
mode="train",
transform=None):
assert (root is not None)
self.transform = transform
train_ds, test_ds = get_cifar10()
self.base = train_ds if mode == "train" else test_ds
def __len__(self):
return len(self.base)
def get_example(self, i):
image, label = self.base[i]
image = self.transform(image)
return image, label
class CIFAR10MetaInfo(DatasetMetaInfo):
def __init__(self):
super(CIFAR10MetaInfo, self).__init__()
self.label = "CIFAR10"
self.short_label = "cifar"
self.root_dir_name = "cifar10"
self.dataset_class = CIFAR10
self.num_training_samples = 50000
self.in_channels = 3
self.num_classes = 10
self.input_image_size = (32, 32)
self.train_metric_capts = ["Train.Err"]
self.train_metric_names = ["Top1Error"]
self.train_metric_extra_kwargs = [{"name": "err"}]
self.val_metric_capts = ["Val.Err"]
self.val_metric_names = ["Top1Error"]
self.val_metric_extra_kwargs = [{"name": "err"}]
self.saver_acc_ind = 0
self.train_transform = CIFARTrainTransform
self.val_transform = CIFARValTransform
self.test_transform = CIFARValTransform
self.ml_type = "imgcls"
class CIFARTrainTransform(object):
"""
CIFAR-10 training transform.
"""
def __init__(self,
ds_metainfo,
mean_rgb=(0.4914, 0.4822, 0.4465),
std_rgb=(0.2023, 0.1994, 0.2010)):
assert (ds_metainfo is not None)
self.mean = np.array(mean_rgb, np.float32)[:, np.newaxis, np.newaxis]
self.std = np.array(std_rgb, np.float32)[:, np.newaxis, np.newaxis]
def __call__(self, img):
img = random_crop(img=img, size=self.resize_value)
img = random_flip(img=img, x_random=True)
img -= self.mean
img /= self.std
return img
class CIFARValTransform(object):
"""
CIFAR-10 validation transform.
"""
def __init__(self,
ds_metainfo,
mean_rgb=(0.4914, 0.4822, 0.4465),
std_rgb=(0.2023, 0.1994, 0.2010)):
assert (ds_metainfo is not None)
self.mean = np.array(mean_rgb, np.float32)[:, np.newaxis, np.newaxis]
self.std = np.array(std_rgb, np.float32)[:, np.newaxis, np.newaxis]
def __call__(self, img):
img -= self.mean
img /= self.std
return img
| 2.640625 | 3 |
talana_prueba/competition/task.py | morwen1/talana_prueba | 0 | 12758716 | <gh_stars>0
#PYTHON
import random
from datetime import timedelta
# DJANGO
from config import celery_app
from celery.decorators import task
from django.db.models import Q
#MODELS
from talana_prueba.competition.models import CompetitionTicketModel as competition
@task(name="EventTask", soft_time_limit=timedelta(minutes=1).seconds, time_limit=timedelta(minutes=1).seconds)
def EventTask():
if len(competition.objects.filter(status='winner')) == 0 :
contestant = competition.objects.all()
tickets = [ i[0] for i in contestant.values_list("id") ]
winner = None
last_ticket = len(tickets)
random.shuffle(tickets)
iterations = 10
for i in range(10):
winner = random.randint(0 , last_ticket)
winner = contestant.get(id=winner)
winner.status = 'winner'
winner.save()
print(f"winner!! {winner}")
else :
print("winner exist")
| 2.21875 | 2 |
dl-mnist/module.py | wenzhengchen/Steady-state-Non-Line-of-Sight-Imaging | 3 | 12758717 |
import numpy as np
import tensorflow as tf
from ops import instance_norm, conv2d, deconv2d, lrelu
######################################################################
def generator_multiunet(image, gf_dim, reuse=False, name="generator", output_c_dim=-1, istraining=True):
if istraining:
dropout_rate = 0.5
else:
dropout_rate = 1.0
with tf.variable_scope(name):
# image is 256 x 256 x input_c_dim
if reuse:
tf.get_variable_scope().reuse_variables()
else:
assert tf.get_variable_scope().reuse is False
# image is (256 x 256 x input_c_dim)
e1 = instance_norm(conv2d(image, gf_dim, name='g_e1_conv'), 'g_bn_e1')
# e1 is (128 x 128 x self.gf_dim)
e2 = instance_norm(conv2d(lrelu(e1), gf_dim * 2, name='g_e2_conv'), 'g_bn_e2')
# e2 is (64 x 64 x self.gf_dim*2)
e3 = instance_norm(conv2d(lrelu(e2), gf_dim * 4, name='g_e3_conv'), 'g_bn_e3')
# e3 is (32 x 32 x self.gf_dim*4)
e4 = instance_norm(conv2d(lrelu(e3), gf_dim * 8, name='g_e4_conv'), 'g_bn_e4')
# e4 is (16 x 16 x self.gf_dim*8)
e5 = instance_norm(conv2d(lrelu(e4), gf_dim * 8, name='g_e5_conv'), 'g_bn_e5')
# e5 is (8 x 8 x self.gf_dim*8)
e6 = instance_norm(conv2d(lrelu(e5), gf_dim * 8, name='g_e6_conv'), 'g_bn_e6')
# e6 is (4 x 4 x self.gf_dim*8)
e7 = instance_norm(conv2d(lrelu(e6), gf_dim * 8, ks=3, s=1, padding='VALID', name='g_e7_conv'), 'g_bn_e7')
# e7 is (2 x 2 x self.gf_dim*8)
e8 = instance_norm(conv2d(lrelu(e7), gf_dim * 16, ks=2, s=1, padding='VALID', name='g_e8_conv'), 'g_bn_e8')
# e8 is (1 x 1 x self.gf_dim*8)
d1 = deconv2d(tf.nn.relu(e8), gf_dim * 8, ks=2, s=1, padding='VALID', name='g_d1')
d1 = tf.nn.dropout(d1, dropout_rate)
d1 = tf.concat([instance_norm(d1, 'g_bn_d1'), e7], 3)
# d1 is (2 x 2 x self.gf_dim*8*2)
d2 = deconv2d(tf.nn.relu(d1), gf_dim * 8, ks=3, s=1, padding='VALID', name='g_d2')
d2 = tf.nn.dropout(d2, dropout_rate)
d2 = tf.concat([instance_norm(d2, 'g_bn_d2'), e6], 3)
# d2 is (4 x 4 x self.gf_dim*8*2)
d3 = deconv2d(tf.nn.relu(d2), gf_dim * 8, name='g_d3')
d3 = tf.nn.dropout(d3, dropout_rate)
d3 = tf.concat([instance_norm(d3, 'g_bn_d3'), e5], 3)
# d3 is (8 x 8 x self.gf_dim*8*2)
d4 = deconv2d(tf.nn.relu(d3), gf_dim * 8, name='g_d4')
d4 = tf.concat([instance_norm(d4, 'g_bn_d4'), e4], 3)
# d4 is (16 x 16 x self.gf_dim*8*2)
d5 = deconv2d(tf.nn.relu(d4), gf_dim * 4, name='g_d5')
d5 = tf.concat([instance_norm(d5, 'g_bn_d5'), e3], 3)
# d5 is (32 x 32 x self.gf_dim*4*2)
d6 = deconv2d(tf.nn.relu(d5), gf_dim * 2, name='g_d6')
d6 = tf.concat([instance_norm(d6, 'g_bn_d6'), e2], 3)
# d6 is (64 x 64 x self.gf_dim*2*2)
d7 = deconv2d(tf.nn.relu(d6), gf_dim, name='g_d7')
d7 = tf.concat([instance_norm(d7, 'g_bn_d7'), e1], 3)
# d7 is (128 x 128 x self.gf_dim*1*2)
d6_pre = deconv2d(tf.nn.relu(d5), output_c_dim, name='g_d6_pre')
# d6_pre is (64 x 64 x output_c_dim)
d7_pre = deconv2d(tf.nn.relu(d6), output_c_dim, name='g_d7_pre')
# d7_pre is (128 x 128 x output_c_dim)
d8_pre = deconv2d(tf.nn.relu(d7), output_c_dim, name='g_d8_pre')
# d8_pre is (256 x 256 x output_c_dim)
return tf.nn.tanh(d8_pre), tf.nn.tanh(d7_pre), tf.nn.tanh(d6_pre)
| 2.1875 | 2 |
cca_zoo/generate_data.py | sunshiding/cca_zoo | 6 | 12758718 | <filename>cca_zoo/generate_data.py
import numpy as np
from scipy.linalg import toeplitz
def gaussian(x, mu, sig, dn):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) * dn / (np.sqrt(2 * np.pi) * sig)
def generate_mai(m: int, k: int, N: int, M: int, sparse_variables_1: int = None, sparse_variables_2: int = None,
signal: float = None,
structure='identity', sigma=0.1, decay=0.5):
mean = np.zeros(N + M)
cov = np.zeros((N + M, N + M))
p = np.arange(0, k)
p = decay ** p
# Covariance Bit
if structure == 'identity':
cov_1 = np.eye(N)
cov_2 = np.eye(M)
elif structure == 'gaussian':
x = np.linspace(-1, 1, N)
x_tile = np.tile(x, (N, 1))
mu_tile = np.transpose(x_tile)
dn = 2 / (N - 1)
cov_1 = gaussian(x_tile, mu_tile, sigma, dn)
cov_1 /= cov_1.max()
x = np.linspace(-1, 1, M)
x_tile = np.tile(x, (M, 1))
mu_tile = np.transpose(x_tile)
dn = 2 / (M - 1)
cov_2 = gaussian(x_tile, mu_tile, sigma, dn)
cov_2 /= cov_2.max()
elif structure == 'toeplitz':
c = np.arange(0, N)
c = sigma ** c
cov_1 = toeplitz(c, c)
c = np.arange(0, M)
c = sigma ** c
cov_2 = toeplitz(c, c)
cov[:N, :N] = cov_1
cov[N:, N:] = cov_2
# Sparse Bits
res_cov_1 = np.copy(cov_1)
up = np.random.rand(N, k)
for _ in range(k):
if sparse_variables_1 is not None:
first = np.random.randint(N - sparse_variables_1)
up[:first, _] = 0
up[(first + sparse_variables_1):, _] = 0
up[:, _] /= np.sqrt((up[:, _].T @ res_cov_1 @ up[:, _]))
res_cov_1 -= np.outer(up[:, _], up[:, _]) @ res_cov_1 @ np.outer(up[:, _], up[:, _])
# Elimination step:
for _ in range(k):
mat_1 = up.T @ cov_1 @ up
up[:, (_ + 1):] -= np.outer(up[:, _], np.diag(mat_1[_, (_ + 1):]))
# TODO this is where we could fix to work out how to have more than one orthogonal. Think we should be able to deflate
res_cov_2 = np.copy(cov_2)
vp = np.random.rand(M, k)
for _ in range(k):
if sparse_variables_2 is not None:
first = np.random.randint(N - sparse_variables_2)
vp[:first, _] = 0
vp[(first + sparse_variables_2):, _] = 0
vp[:, _] /= np.sqrt((vp[:, _].T @ res_cov_2 @ vp[:, _]))
res_cov_2 -= np.outer(vp[:, _], vp[:, _]) @ res_cov_2 @ np.outer(vp[:, _], vp[:, _])
for _ in range(k):
mat_2 = vp.T @ cov_2 @ vp
vp[:, (_ + 1):] -= np.outer(vp[:, _], np.diag(mat_2[_, (_ + 1):]))
sparse_vec = np.zeros((N, M))
for _ in range(k):
sparse_vec += signal * p[_] * np.outer(up[:, _], vp[:, _])
# Cross Bit
cross = cov[:N, :N] @ sparse_vec @ cov[N:, N:]
cov[N:, :N] = cross.T
cov[:N, N:] = cross
X = np.random.multivariate_normal(mean, cov, m)
Y = X[:, N:]
X = X[:, :N]
return X, Y, up, vp, cov
def generate_witten(m, k, N, M, sigma, tau, sparse_variables_1=2, sparse_variables_2=2):
z = np.random.rand(m, k)
up = np.random.rand(k, N)
vp = np.random.rand(k, M)
up[:, sparse_variables_1:] = 0
vp[:, sparse_variables_2:] = 0
X = z @ up + sigma * np.random.normal(0, 1, (m, N))
Y = z @ vp + tau * np.random.normal(0, 1, (m, M))
return X, Y, up.T, vp.T
def generate_candola(m, k, N, M, sigma, tau, sparse_variables_1=None, sparse_variables_2=None):
# m data points
# k dimensions
# N unitary matrix size U
# M unitary matrix size V
X = np.random.rand(N, N)
# in QR decomposition Q is orthogonal, R is upper triangular
q, r = np.linalg.qr(X)
# turns r into random 1s and -1s
r = np.diag(np.diag(r) / np.abs(np.diag(r)))
# returns a pxp matrix
u = q @ r
# check np.linalg.norm(u.T@u - np.linalg.eye(N))
X = np.random.rand(M, M)
q, r = np.linalg.qr(X)
r = np.diag(np.diag(r) / np.abs(np.diag(r)))
# returns a qxq matrix
v = q @ r
# returns mxk the latent space
Z = np.random.rand(m, k)
# extract first k columns from the
up = u[:, : k]
vp = v[:, : k]
lam = np.zeros(N)
for i in range(N):
lam[i] = (((3 * N + 2) - 2 * i) / (2 * N))
mu = np.zeros(M)
for i in range(M):
mu[i] = np.sqrt(((3 * M + 2) - 2 * i) / (2 * M))
dL = np.diag(lam)
dM = np.diag(mu)
# NxN, Nxk (orthogonal columns), kxm
X = (dL @ up @ Z.T) + (sigma * np.random.rand(N, m))
# MxM, Mxk (orthogonal columns), kxm
Y = (dM @ vp @ Z.T) + (tau * np.random.rand(M, m))
return X.T, Y.T, up.T, vp.T
| 2.5625 | 3 |
scripts/extract_ssv2.py | 18wh1a0590/NLP-Text-Generation | 25 | 12758719 | <gh_stars>10-100
import os
import subprocess
import json
from glob import glob
from joblib import Parallel, delayed
out_h = 256
out_w = 256
in_folder = 'data/ssv2/'
out_folder = 'data/ssv2_{}x{}q5'.format(out_w,out_h)
split_dir = "splits/ssv2_OTAM"
wc = os.path.join(split_dir, "*.txt")
def run_cmd(cmd):
try:
os.mkdir(cmd[1])
subprocess.call(cmd[0])
except:
pass
try:
os.mkdir(out_folder)
except:
pass
for fn in glob(wc):
classes = []
vids = []
print(fn)
if "train" in fn:
cur_split = "train"
elif "val" in fn:
cur_split = "val"
elif "test" in fn:
cur_split = "test"
with open(fn, "r") as f:
data = f.readlines()
c = [x.split(os.sep)[-2].strip() for x in data]
v = [x.split(os.sep)[-1].strip() for x in data]
vids.extend(v)
classes.extend(c)
try:
os.mkdir(os.path.join(out_folder, cur_split))
except:
pass
for c in list(set(classes)):
try:
os.mkdir(os.path.join(out_folder, cur_split, c))
except:
pass
cmds = []
for v, c in zip(vids, classes):
source_vid = os.path.join(in_folder, "{}.webm".format(v))
extract_dir = os.path.join(out_folder, cur_split, c, v)
if os.path.exists(extract_dir):
continue
out_wc = os.path.join(extract_dir, '%08d.jpg')
print(source_vid, out_wc)
scale_string = 'scale={}:{}'.format(out_w, out_h)
os.mkdir(extract_dir)
try:
cmd = ['ffmpeg', '-i', source_vid, '-vf', scale_string, '-q:v', '5', out_wc]
cmds.append((cmd, extract_dir))
subprocess.call(cmd)
except:
pass
#Parallel(n_jobs=8, require='sharedmem')(delayed(run_cmd)(cmds[i]) for i in range(0, len(cmds))) | 2.21875 | 2 |
Anaconda-files/Program_19d.py | arvidl/dynamical-systems-with-applications-using-python | 106 | 12758720 | # Program 19d: Generalized synchronization.
# See Figure 19.8(a).
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
# Constants
mu = 5.7
sigma = 16
b = 4
r = 45.92
g = 8 # When g=4, there is no synchronization.
tmax = 100
t = np.arange(0.0, tmax, 0.1)
def rossler_lorenz_odes(X,t):
x1, x2, x3, y1, y2, y3, z1, z2, z3 = X
dx1 = -(x2 + x3)
dx2 = x1 + 0.2*x2
dx3 = 0.2 + x3 * (x1 - mu)
dy1 = sigma * (y2 - y1) - g * (y1 - x1)
dy2 = -y1 * y3 + r*y1 - y2
dy3 = y1 * y2 - b*y3
dz1 = sigma * (z2 - z1) - g * (z1 - x1)
dz2 = -z1*z3 + r*z1 - z2
dz3 = z1*z2 - b*z3
return (dx1, dx2, dx3, dy1, dy2, dy3, dz1, dz2, dz3)
y0 = [2, -10, 44, 30, 10, 20, 31, 11, 22]
X = odeint(rossler_lorenz_odes, y0, t, rtol=1e-6)
x1, x2, x3, y1, y2, y3, x1, z2, z3 = X.T # unpack columns
plt.figure(1)
# Delete first 500 iterates.
plt.plot(y2[500:len(y2)], z2[500:len(z2)])
plt.xlabel(r'$y_2$', fontsize=15)
plt.ylabel(r'$z_2$', fontsize=15)
plt.show()
| 3.15625 | 3 |
api/config/__init__.py | p3t3r67x0/foodie | 1 | 12758721 | from pydantic import BaseSettings
class CommonSettings(BaseSettings):
APP_NAME: str = 'foodieFOX Authentication'
DEBUG_MODE: bool = False
class ServerSettings(BaseSettings):
HOST: str = '127.0.0.1'
PORT: int = 5000
class DatabaseSettings(BaseSettings):
REALM_APP_ID: str
DB_NAME: str
DB_URL: str
class AuthSettings(BaseSettings):
JWT_SECRET_KEY: str
JWT_ACCESS_TOKEN_EXPIRE_MINUTES: int = 15
SECURE_COOKIE: bool = False
class Settings(CommonSettings, ServerSettings, DatabaseSettings, AuthSettings):
PHOTON_URL: str
CORS_URL: str
APP_URL: str
API_URL: str
class Config:
env_file = '.env'
settings = Settings()
| 1.921875 | 2 |
AdvancedJan2022/SetsTuplesStacksQueues/colors.py | ayk-dev/python-advanced-Jan2022 | 0 | 12758722 | <reponame>ayk-dev/python-advanced-Jan2022
from collections import deque
line = deque(input().split())
all_colors = {"red", "yellow", "blue", "orange", "purple", "green"}
main_colors = {"red", "yellow", "blue"}
secondary_colors = {
"orange": ['red', 'yellow'],
"purple": ['blue', 'red'],
"green": ['yellow', 'blue'],
}
formed_colors = []
while line:
first = line.popleft()
second = line.pop() if line else ''
string_result = first + second
if string_result in all_colors:
formed_colors.append(string_result)
continue
string_result = second + first
if string_result in all_colors:
formed_colors.append(string_result)
continue
first = first[:-1]
second = second[:-1]
if first:
line.insert(len(line) // 2, first)
if second:
line.insert(len(line) // 2, second)
final_colors = []
for color in formed_colors:
if color in main_colors:
final_colors.append(color)
continue
if color in secondary_colors.keys():
is_valid = True
for required_color in secondary_colors[color]:
if required_color not in formed_colors:
is_valid = False
break
if is_valid:
final_colors.append(color)
print(final_colors) | 3.484375 | 3 |
model-optimizer/extensions/front/ATenToEmbeddingBag.py | monroid/openvino | 2,406 | 12758723 | <gh_stars>1000+
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from extensions.ops.embedding_bag import EmbeddingBagOffsetsSum, EmbeddingBagPackedSum
from extensions.ops.rank import Rank
from mo.front.common.partial_infer.utils import int64_array
from mo.front.common.replacement import FrontReplacementPattern
from mo.front.tf.graph_utils import create_op_with_const_inputs
from mo.graph.graph import Graph, rename_node
from mo.ops.broadcast import Broadcast
from mo.ops.concat import Concat
from mo.ops.shape import Shape
from mo.ops.unsqueeze import Unsqueeze
from mo.utils.shape import node_to_get_shape_value_of_indices, get_canonical_axis_index_node, \
get_shape_values_by_indices_node
class AtenToEmbeddingBag(FrontReplacementPattern):
"""
Converts the ATen layer to EmbeddingBag layer.
"""
enabled = True
def find_and_replace_pattern(self, graph: Graph):
for node in graph.get_op_nodes(op='ATen', operator='embedding_bag'):
assert node.soft_get('mode') == 0, 'ATen::embedding_bag has unsupported mode, only "sum" ' \
'mode is supported for node {}.'.format(node.id)
node_name = node.soft_get('name', node.id)
rename_node(node, node_name + '/TBR')
is_packed = False
if len(node.in_ports()) < 3 or node.in_port(2).disconnected():
is_packed = True
embedding_bag = EmbeddingBagPackedSum(graph, {'name': node_name}).create_node()
else:
embedding_bag = EmbeddingBagOffsetsSum(graph, {'name': node_name}).create_node()
node.in_port(2).get_connection().set_destination(embedding_bag.in_port(2))
rename_node(embedding_bag, node_name)
node.in_port(0).get_connection().set_destination(embedding_bag.in_port(0))
node.in_port(1).get_connection().set_destination(embedding_bag.in_port(1))
node.out_port(0).get_connection().set_source(embedding_bag.out_port(0))
if len(node.in_ports()) == 4 and not node.in_port(3).disconnected():
if is_packed:
node.in_port(3).get_connection().set_destination(embedding_bag.in_port(2))
else:
# connect per_sample_weights
node.in_port(3).get_connection().set_destination(embedding_bag.in_port(4))
weights_shape_node = Shape(graph, {'name': node_name + '/WeightsShape'}).create_node()
weights_rank_node = Rank(graph, {'name': node_name + '/WeightsRank'}).create_node()
last_dim_node = get_canonical_axis_index_node(weights_rank_node, -1)
weights_last_dim = get_shape_values_by_indices_node(weights_shape_node, last_dim_node)
weights_first_dim = node_to_get_shape_value_of_indices(weights_shape_node, [0])
zero_col_node = create_op_with_const_inputs(graph, Broadcast, {0: int64_array([0])},
{'name': node_name + '/Broadcast'})
zero_col_node.in_port(1).connect(weights_last_dim.out_port(0))
default_embeddings_node = create_op_with_const_inputs(graph, Unsqueeze, {1: int64_array(0)},
{'name': node_name + '/Unsqueeze'})
default_embeddings_node.in_port(0).connect(zero_col_node.out_port(0))
# expand embedding table with zeros
weights_concat = Concat(graph, {'axis': 0, 'in_ports_count': 2,
'name': node_name + '/Concat'}).create_node()
embedding_bag.in_port(0).get_connection().set_destination(weights_concat.in_port(0))
weights_concat.in_port(0).get_connection().add_destination(weights_shape_node.in_port(0))
weights_concat.in_port(0).get_connection().add_destination(weights_rank_node.in_port(0))
weights_concat.in_port(1).connect(default_embeddings_node.out_port(0))
weights_concat.out_port(0).connect(embedding_bag.in_port(0))
# point default index to expanded part of embedding table
weights_first_dim.out_port(0).connect(embedding_bag.in_port(3))
| 1.75 | 2 |
deafrica/tests/test_s2_gap_report.py | caitlinadams/deafrica-scripts | 3 | 12758724 | from unittest.mock import patch
import boto3
from moto import mock_s3
from urlpath import URL
from deafrica.monitoring import s2_gap_report
from deafrica.monitoring.s2_gap_report import (
get_and_filter_cogs_keys,
generate_buckets_diff,
)
from deafrica.tests.conftest import (
COGS_REGION,
INVENTORY_BUCKET_NAME,
INVENTORY_MANIFEST_FILE,
INVENTORY_DATA_FILE,
INVENTORY_FOLDER,
INVENTORY_BUCKET_SOURCE_NAME,
REGION,
REPORT_FOLDER,
TEST_DATA_DIR,
)
DATA_FOLDER = "sentinel_2"
INVENTORY_MANIFEST_FILE = TEST_DATA_DIR / DATA_FOLDER / INVENTORY_MANIFEST_FILE
INVENTORY_DATA_FILE = TEST_DATA_DIR / DATA_FOLDER / INVENTORY_DATA_FILE
@mock_s3
def test_get_and_filter_cogs_keys(
s3_inventory_data_file: URL,
s3_inventory_manifest_file: URL,
):
s3_client = boto3.client("s3", region_name=COGS_REGION)
s3_client.create_bucket(
Bucket=INVENTORY_BUCKET_NAME,
CreateBucketConfiguration={
"LocationConstraint": COGS_REGION,
},
)
# Upload inventory manifest
s3_client.upload_file(
str(INVENTORY_MANIFEST_FILE),
INVENTORY_BUCKET_NAME,
str(s3_inventory_manifest_file),
)
# Upload inventory data
s3_client.upload_file(
str(INVENTORY_DATA_FILE),
INVENTORY_BUCKET_NAME,
str(s3_inventory_data_file),
)
print(list(boto3.resource("s3").Bucket("test-inventory-bucket").objects.all()))
s3_inventory_path = URL(
f"s3://{INVENTORY_BUCKET_NAME}/{INVENTORY_FOLDER}/{INVENTORY_BUCKET_NAME}/"
)
with patch.object(
s2_gap_report, "SOURCE_INVENTORY_PATH", str(s3_inventory_path)
), patch.object(s2_gap_report, "BASE_FOLDER_NAME", str(INVENTORY_FOLDER)):
scenes_list = get_and_filter_cogs_keys()
assert len(scenes_list) == 6
@mock_s3
def test_generate_buckets_diff(
s3_inventory_data_file: URL,
s3_inventory_manifest_file: URL,
):
s3_client_cogs = boto3.client("s3", region_name=COGS_REGION)
s3_client_cogs.create_bucket(
Bucket=INVENTORY_BUCKET_SOURCE_NAME,
CreateBucketConfiguration={
"LocationConstraint": COGS_REGION,
},
)
# Upload inventory manifest
s3_client_cogs.upload_file(
str(INVENTORY_MANIFEST_FILE),
INVENTORY_BUCKET_SOURCE_NAME,
str(s3_inventory_manifest_file),
)
# Upload inventory data
s3_client_cogs.upload_file(
str(INVENTORY_DATA_FILE),
INVENTORY_BUCKET_SOURCE_NAME,
str(s3_inventory_data_file),
)
print(list(boto3.resource("s3").Bucket("test-cogs-inventory-bucket").objects.all()))
s3_client = boto3.client("s3", region_name=REGION)
s3_client.create_bucket(
Bucket=INVENTORY_BUCKET_NAME,
CreateBucketConfiguration={
"LocationConstraint": REGION,
},
)
# Upload inventory manifest
s3_client.upload_file(
str(INVENTORY_MANIFEST_FILE),
INVENTORY_BUCKET_NAME,
str(s3_inventory_manifest_file),
)
# Upload inventory data
s3_client.upload_file(
str(INVENTORY_DATA_FILE),
INVENTORY_BUCKET_NAME,
str(s3_inventory_data_file),
)
print(list(boto3.resource("s3").Bucket("test-inventory-bucket").objects.all()))
s3_inventory_path = URL(
f"s3://{INVENTORY_BUCKET_NAME}/{INVENTORY_FOLDER}/{INVENTORY_BUCKET_NAME}/"
)
s3_cogs_inventory_path = URL(
f"s3://{INVENTORY_BUCKET_SOURCE_NAME}/{INVENTORY_FOLDER}/{INVENTORY_BUCKET_NAME}/"
)
with patch.object(
s2_gap_report, "SOURCE_INVENTORY_PATH", str(s3_cogs_inventory_path)
), patch.object(
s2_gap_report, "SENTINEL_2_INVENTORY_PATH", str(s3_inventory_path)
), patch.object(
s2_gap_report, "BASE_FOLDER_NAME", str(INVENTORY_FOLDER)
):
# No differences
generate_buckets_diff(bucket_name=INVENTORY_BUCKET_NAME)
assert (
len(
s3_client.list_objects_v2(
Bucket=INVENTORY_BUCKET_NAME, Prefix=REPORT_FOLDER
).get("Contents", [])
)
== 0
)
| 1.945313 | 2 |
hoodzapp/admin.py | philipkariuki/neighbourhood | 0 | 12758725 | from django.contrib import admin
from .models import Post, Thahood, UserProfile, Business
# Register your models here.
admin.site.register(Post)
admin.site.register(UserProfile)
admin.site.register(Thahood)
admin.site.register(Business) | 1.445313 | 1 |
DataLog.py | croomjm/Cubli | 4 | 12758726 | from collections import deque
import os
class DataLog:
"""
Class to contain saved data and save to json file when commanded.
All logged data are limited to self.__buffer_length unique records.
Exclusively uses jsonpickle library to read/write log data.
"""
def __init__(self,logDir = None, buffer_length = 5*10**4,logging = True):
self.__buffer_length = buffer_length
self.log = {
'__buffer_length': self.__buffer_length, #max number of data points to save
'start_time': None,
'counts': {
i: deque(maxlen = self.__buffer_length) for i in ['time', 0, 1, 2]
},
'commanded_velocity': {
i: deque(maxlen = self.__buffer_length) for i in ['time', 0, 1, 2]
},
'commanded_throttle': {
i: deque(maxlen = self.__buffer_length) for i in ['time', 0, 1, 2]
},
'measured_velocity': {
i: deque(maxlen = self.__buffer_length) for i in ['time', 0, 1, 2]
},
'command_latency': deque(maxlen = self.__buffer_length),
'measurement_to_command_latency': deque(maxlen = self.__buffer_length),
'iteration_latency': deque(maxlen = self.__buffer_length),
'velocity_units': None,
'PID_parameters': None
}
#set path for saving log file if it exists
#otherwise, use current directory
self.logDir = os.getcwd() + '/'
if logDir:
logDir = str(logDir)
if os.path.isdir(logDir):
self.logDir = logDir
def saveLog(self, fileDir = None, fileName = None, baseName = None):
"""
Save all log information to file.
If fileDir is provided, file is written to this directory (if it exists). Otherwise, the file path created in __init__ is used.
If fileName is provided, file is written with this file name. Otherwise, a file name is generated based on the file in which DataLog instance is created. This fileName will override a baseName if provided.
If baseName is provided, use this to construct the file name automatically.
"""
import jsonpickle
jsonpickle.set_encoder_options('simplejson', sort_keys=True, indent=4)
jsonpickle.set_preferred_backend('simplejson')
#check supplied fileDir (if provided)
if fileDir:
fileDir = str(fileDir)
if not os.path.isdir(fileDir):
raise RuntimeError('Provided filePath is not an existing directory. Aborting saveLog method.')
return
else:
fileDir = self.logDir
#check supplied fileName or baseName (if provided)
if fileName:
fileName = fileDir + str(fileName) + '.json'
else:
from datetime import datetime
if baseName:
baseName = str(baseName)
else:
#use file name of this class
baseName = os.path.basename(__file__).split('.')[0]
fileName = fileDir + baseName + '_' + str(datetime.now()) + '.json'
#save the log file
try:
with open(fileName, 'w') as f:
f.write(jsonpickle.encode(self.log, keys = True))
print 'Log file successfully saved as {0}'.format(fileName)
except:
print 'Error saving log file.'
def updateLog(self, kwDict):
for input_key in kwDict:
if isinstance(self.log[input_key],deque):
self.log[input_key].append(kwDict[input_key])
elif type(self.log[input_key]) is dict:
for key in self.log[input_key]:
self.log[input_key][key].append(kwDict[input_key][key])
else:
self.log[input_key] = kwDict[input_key]
#raise TypeError("Function 'updateLog' received input in 'kwDict' that was neither dictType or an instance of deque.")
def openLog(self, filePath):
import jsonpickle
jsonpickle.set_encoder_options('simplejson', sort_keys=True, indent=4)
jsonpickle.set_preferred_backend('simplejson')
filePath = str(filePath)
try:
with open(filePath, 'r') as f:
obj = jsonpickle.decode(f.read(), keys = True)
return obj
except:
raise RuntimeError('Unable to open and decode json file at {0}'.format(filePath))
| 3.046875 | 3 |
decuen/memories/uniform.py | ziyadedher/decuen | 2 | 12758727 | """Implementation of a possibly bounded uniform experience replay manager."""
import random
from typing import List, Optional
from decuen.memories._memory import Memory
from decuen.structs import Trajectory, Transition
class UniformMemory(Memory):
"""Sized uniform memory mechanism, stores memories up to a maximum amount if specified."""
_transitions_cap: Optional[int]
_trajectories_cap: Optional[int]
def __init__(self, transition_replay_num: int = 1, trajectory_replay_num: int = 1,
transitions_cap: Optional[int] = None, trajectories_cap: Optional[int] = None) -> None:
"""Initialize a uniform memory mechanism."""
super().__init__([], [], transition_replay_num, trajectory_replay_num)
self._transitions_cap = transitions_cap
self._trajectories_cap = trajectories_cap
def store_transition(self, transition: Transition) -> None:
"""Store a transition in this memory mechanism's buffer with any needed associated information."""
self.transition = transition
if self._transitions_cap is not None and len(self._transition_buffer) == self._transitions_cap:
self._transition_buffer.pop(0)
self._transition_buffer.append(transition)
def _replay_transitions(self, num: int) -> List[Transition]:
return random.choices(self._transition_buffer, k=num)
def store_trajectory(self, trajectory: Trajectory) -> None:
"""Store a trajectory in this memory mechanism's buffer consisting of a sequence of transitions."""
self.trajectory = trajectory
if self._trajectories_cap is not None and len(self._trajectory_buffer) == self._trajectories_cap:
self._trajectory_buffer.pop(0)
self._trajectory_buffer.append(trajectory)
def _replay_trajectories(self, num: int) -> List[Trajectory]:
return random.choices(self._trajectory_buffer, k=num)
| 3.375 | 3 |
pexels_api/errors.py | quattrococodrilo/unofficial_pexels_api_client | 0 | 12758728 | class BasePexelError(Exception):
pass
class EndpointNotExists(BasePexelError):
def __init__(self, end_point, _enum) -> None:
options = _enum.__members__.keys()
self.message = f'Endpoint "{end_point}" not exists. Valid endpoints: {", ".join(options)}'
super().__init__(self.message)
class ParamNotExists(BasePexelError):
def __init__(self, name, _enum, param) -> None:
options = _enum.__members__.keys()
self.message = f'{param} not exists in {name}. Valid params: {", ".join(options)}'
super().__init__(self.message)
class IdNotFound(BasePexelError):
def __init__(self, end_point) -> None:
self.message = f'ID is needed for "{end_point}"'
super().__init__(self.message)
| 2.484375 | 2 |
backend/backend.py | pchamuczynski/sauna | 0 | 12758729 | <reponame>pchamuczynski/sauna
import threading
import inspect
import time
import python_weather
import asyncio
class SaunaBackend:
MAX_SAUNA_TEMP = 120
MIN_SAUNA_TEMP = 40
MAX_HOUSE_TEMP = 30
MIN_HOUSE_TEMP = 10
def __init__(self):
self.lock = threading.Lock()
self.house_oven_on = False
self.sauna_oven_on = False
self.sauna_temp_setting = 80
self.house_temp_setting = 22
self.current_house_temp = 10
self.current_sauna_temp = 10
self.current_external_temp = 20
self.sauna_heating_enabled = False
self.house_heating_enabled = False
self.run = True
self.temp_update_thread = threading.Thread(target=self.__updateTemp)
self.temp_update_thread.start()
self.oven_control_thread = threading.Thread(target=self.__ovenControl)
self.oven_control_thread.start()
self.event_loop = asyncio.new_event_loop()
self.background_thread = threading.Thread(target=self.startBackgroundLoop, args=(self.event_loop,), daemon=True)
self.background_thread.start()
self.task = asyncio.run_coroutine_threadsafe(self.__getWeather(), self.event_loop)
# loop = asyncio.get_event_loop()
# loop.run_until_complete(self.__getWeather())
def stop(self):
self.run = False
self.temp_update_thread.join()
def increaseHouseTemp(self):
with self.lock:
self.house_temp_setting = self.__increment(
self.house_temp_setting, self.MAX_HOUSE_TEMP)
def decreaseHouseTemp(self):
with self.lock:
self.house_temp_setting = self.__decrement(
self.house_temp_setting, self.MIN_HOUSE_TEMP)
def increaseSaunaTemp(self):
with self.lock:
self.sauna_temp_setting = self.__increment(
self.sauna_temp_setting, self.MAX_HOUSE_TEMP)
def decreaseSaunaTemp(self):
with self.lock:
self.sauna_temp_setting = self.__decrement(
self.sauna_temp_setting, self.MIN_HOUSE_TEMP)
def currentHouseTemp(self):
with self.lock:
return self.current_house_temp
def currentSaunaTemp(self):
with self.lock:
return self.current_sauna_temp
def currentExternalTemp(self):
with self.lock:
return self.current_external_temp
def enableSaunaHeating(self):
with self.lock:
self.sauna_heating_enabled = True
def disableSaunaHeating(self):
with self.lock:
self.sauna_heating_enabled = False
self.__switchSaunaOven(False)
def enableHouseHeating(self):
with self.lock:
self.house_heating_enabled = True
def disableHouseHeating(self):
with self.lock:
self.house_heating_enabled = False
self.__switchHouseOven(False)
async def __getWeather(self):
while self.run:
client = python_weather.Client(format=python_weather.METRIC)
weather = await client.find("Biały Kościół, dolnośląskie, Pl")
print("Current weather in Biały Kościół: " + str(weather.current.temperature))
await client.close()
print(weather)
print(weather.current)
self.current_external_temp = int(weather.current.temperature)
time.sleep(1)
def __ovenControl(self):
while self.run:
if self.house_heating_enabled:
if self.current_house_temp < self.house_temp_setting - 1:
self.__switchHouseOven(True)
elif self.current_house_temp > self.house_temp_setting:
self.__switchHouseOven(False)
if self.sauna_heating_enabled:
if self.current_sauna_temp < self.sauna_temp_setting - 1:
self.__switchSaunaOven(True)
else:
self.__switchSaunaOven(False)
time.sleep(1)
def __switchSaunaOven(self, new_state):
if self.sauna_oven_on != new_state:
print('switching sauna oven to ' + str(new_state))
self.sauna_oven_on = new_state
def __switchHouseOven(self, new_state):
if self.house_oven_on != new_state:
print('switching house oven to ' + str(new_state))
self.house_oven_on = new_state
def __updateTemp(self):
while self.run:
with self.lock:
if self.house_oven_on:
if self.current_house_temp < self.MAX_HOUSE_TEMP:
self.current_house_temp += 1
elif self.current_house_temp > self.current_external_temp:
self.current_house_temp -= 1
elif self.current_house_temp < self.current_external_temp:
self.current_house_temp += 1
if self.sauna_oven_on:
if self.current_sauna_temp < self.MAX_SAUNA_TEMP:
self.current_sauna_temp += 1
elif self.current_sauna_temp > self.current_house_temp:
self.current_sauna_temp -= 1
elif self.current_sauna_temp < self.current_house_temp:
self.current_sauna_temp += 1
time.sleep(1)
def __increment(self, var, max):
return var if var == max else var + 1
def __decrement(self, var, min):
return var if var == min else var - 1
def startBackgroundLoop(self, loop: asyncio.AbstractEventLoop):
asyncio.set_event_loop(loop)
loop.run_forever()
| 2.71875 | 3 |
tutorials/Jupyter/inputs/sources/dlmove.py | Oltanis/undergrad_MC_course | 2 | 12758730 | <reponame>Oltanis/undergrad_MC_course<filename>tutorials/Jupyter/inputs/sources/dlmove.py
"""Containers for DL CONTROL file MC move type descriptions
Moves are part of the DL CONTROL file input. Each type of
move gets a class here.
The classification of the available Move types is as follows:
Move
MCMove
AtomMove
MoleculeMove
... [others, some of which are untested in regression tests]
VolumeMove (aka NPT move)
VolumeVectorMove
VolumeOrthoMove
VolumeCubicMove
The dlmove.from_string(dlstr) function provides a factory method to
generate the appropriate Move from DL CONTROL style input.
"""
def parse_atom(dlstr):
"""Atoms are expected as 'Name core|shell' only"""
try:
tokens = dlstr.split()
atom = {"id": "{} {}".format(tokens[0], tokens[1])}
except IndexError:
raise ValueError("Unrecognised Atom: {!s}".format(dlstr))
return atom
def print_atom(atom):
"""Return atom record for DL CONTROL"""
return atom["id"]
def parse_molecule(dlstr):
"""Molecules are 'Name" only"""
tokens = dlstr.split()
molecule = {"id": tokens[0]}
return molecule
def print_molecule(molecule):
"""Return molecule record for DL CONTROL"""
return molecule["id"]
def parse_atom_swap(dlstr):
"""Swaps are e.g., 'atom1 core atom2 core' """
try:
tok = dlstr.split()
swap = {"id1": "{} {}".format(tok[0], tok[1]), \
"id2": "{} {}".format(tok[2], tok[3])}
except IndexError:
raise ValueError("Unrecognised atom swap: {!r}".format(dlstr))
return swap
def print_atom_swap(swap):
"""Return atom swap string for DL CONTROL"""
return "{} {}".format(swap["id1"], swap["id2"])
def parse_molecule_swap(dlstr):
"""Swap records have two tokens"""
try:
tokens = dlstr.split()
swap = {"id1": tokens[0], "id2": tokens[1]}
except IndexError:
raise ValueError("Unrecognised molecule swap: {!r}".format(dlstr))
return swap
def print_molecule_swap(swap):
"""Return a swap for DL CONTROL output"""
return "{} {}".format(swap["id1"], swap["id2"])
def parse_atom_gcmc(dlstr):
"""GCMC Atoms include a chemical potential/partial pressure"""
try:
tok = dlstr.split()
atom = {"id": "{} {}".format(tok[0], tok[1]), "molpot": float(tok[2])}
except (IndexError, TypeError):
raise ValueError("Unrecognised GCMC Atom: {!r}".format(dlstr))
return atom
def parse_molecule_gcmc(dlstr):
"""Grand Canonical MC includes chemical potential/partial pressure"""
try:
tok = dlstr.split()
molecule = {"id": tok[0], "molpot": float(tok[1])}
except (IndexError, TypeError):
raise ValueError("Unrecognised GCMC Molecule: {!r}".format(dlstr))
return molecule
def print_gcmc(gcmc):
"""Return string version of chemical potential records for DL CONTROL"""
return "{} {}".format(gcmc["id"], gcmc["molpot"])
class Move(object):
"""This classifies all moves"""
key = None
@classmethod
def from_string(cls, dlstr):
"""To be implemented by MCMove or VolumeMove"""
NotImplementedError("Should be implemented by subclass")
class MCMove(Move):
"""MC moves involve atoms or molecules"""
parse_mover = staticmethod(None)
print_mover = staticmethod(None)
def __init__(self, pfreq, movers):
"""pfreq (int): percentage probability of move per step"""
self.pfreq = pfreq
self.movers = movers
def __str__(self):
"""Return well-formed DL CONTROL block"""
strme = []
move = "move {} {} {}".format(self.key, len(self.movers), self.pfreq)
strme.append(move)
for mover in self.movers:
strme.append(self.print_mover(mover))
return "\n".join(strme)
def __repr__(self):
"""Return a readable form"""
repme = "pfreq= {!r}, movers= {!r}".format(self.pfreq, self.movers)
return "{}({})".format(type(self).__name__, repme)
@classmethod
def from_string(cls, dlstr):
"""Generate an instance from a DL CONTROL entry"""
lines = dlstr.splitlines()
line = lines.pop(0)
pfreq = MCMove._parse_move_statement(line)[2]
movers = []
for line in lines:
mover = cls.parse_mover(line)
movers.append(mover)
return cls(pfreq, movers)
@staticmethod
def _parse_move_statement(dlstr):
"""Parse move line"""
try:
tokens = dlstr.lower().split()
if tokens[0] != "move":
raise ValueError("Expected 'move' statement")
mtype, nmove, pfreq = tokens[1], int(tokens[2]), int(tokens[3])
except IndexError:
raise ValueError("Badly formed 'move' statement?")
return mtype, nmove, pfreq
class GCMove(Move):
"""Grand Canonical insertions have an extra minimum insertion
distance parameter cf standard MCMove types
"""
parse_mover = staticmethod(None)
print_mover = staticmethod(None)
def __init__(self, pfreq, rmin, movers):
"""Initalise GCMove parameters
Arguments:
pfreq (integer): percentage
rmin (float): grace distance
movers ():
"""
self.pfreq = pfreq
self.rmin = rmin
self.movers = movers
def __str__(self):
"""Return well-formed DL CONTROL block"""
strme = []
move = "move {} {} {} {}".format(self.key, len(self.movers),
self.pfreq, self.rmin)
strme.append(move)
for mover in self.movers:
strme.append(self.print_mover(mover))
return "\n".join(strme)
def __repr__(self):
"""Return a GCMove (subsclass) represetation"""
repme = "pfreq= {!r}, rmin= {!r}, movers= {!r}"\
.format(self.pfreq, self.rmin, self.movers)
return "{}({})".format(type(self).__name__, repme)
@classmethod
def from_string(cls, dlstr):
"""Generate instance form well-formed DL CONTROL string"""
lines = dlstr.splitlines()
line = lines.pop(0)
pfreq, rmin = GCMove._parse_move_statement(line)[2:]
movers = []
for line in lines:
mover = cls.parse_mover(line)
movers.append(mover)
return cls(pfreq, rmin, movers)
@staticmethod
def _parse_move_statement(dlstr):
"""Parse GC move line"""
try:
tokens = dlstr.lower().split()
if tokens[0] != "move":
raise ValueError("Expected 'move' statement")
mtype, nmove, pfreq, rmin = \
tokens[1], int(tokens[2]), int(tokens[3]), float(tokens[4])
except IndexError:
raise ValueError("Badly formed 'move' statement?")
return mtype, nmove, pfreq, rmin
class VolumeMove(Move):
"""Container for volume (NPT) moves"""
def __init__(self, pfreq, sampling=None):
"""Initialise continaer
Arguemnts:
pfreq (integer): percentage
sampling (string): description
"""
self.pfreq = pfreq
self.sampling = sampling
def __str__(self):
"""Return well-formed DL CONTROL file string"""
if self.sampling is not None:
strme = "move volume {} {} {}"\
.format(self.key, self.sampling, self.pfreq)
else:
strme = "move volume {} {}".format(self.key, self.pfreq)
return strme
def __repr__(self):
"""Return a readable string"""
repme = "pfreq= {!r}, sampling= {!r}".format(self.pfreq, self.sampling)
return "{}({})".format(type(self).__name__, repme)
@classmethod
def from_string(cls, dlstr):
"""E.g., 'move volume vector|ortho|cubic [sampling-type] pfreq' """
tokens = dlstr.split()
try:
sampling = None
pfreq = int(tokens[-1])
# sampling-type is an optional one or two (string) tokens
if len(tokens) == 5:
sampling = tokens[3]
if len(tokens) == 6:
sampling = "{} {}".format(tokens[3], tokens[4])
except (IndexError, TypeError):
raise ValueError("VolumeMove: unrecognised: {!r}".format(dlstr))
return cls(pfreq, sampling)
class AtomMove(MCMove):
"""Concrete class for atom moves"""
key = "atom"
parse_mover = staticmethod(parse_atom)
print_mover = staticmethod(print_atom)
class MoleculeMove(MCMove):
"""Concrete class for molecule moves"""
key = "molecule"
parse_mover = staticmethod(parse_molecule)
print_mover = staticmethod(print_molecule)
class RotateMoleculeMove(MCMove):
"""Concrete class for rotate molecule moves"""
key = "rotatemol"
parse_mover = staticmethod(parse_molecule)
print_mover = staticmethod(print_molecule)
class SwapAtomMove(MCMove):
"""Concrete class for swap atom moves"""
key = "swapatoms"
parse_mover = staticmethod(parse_atom_swap)
print_mover = staticmethod(print_atom_swap)
class SwapMoleculeMove(MCMove):
"""Concrete class for swap molecule moves"""
key = "swapmols"
parse_mover = staticmethod(parse_molecule_swap)
print_mover = staticmethod(print_molecule_swap)
class InsertAtomMove(GCMove):
"""Concrete class for Grand Canonical atom moves"""
key = "gcinsertatom"
parse_mover = staticmethod(parse_atom_gcmc)
print_mover = staticmethod(print_gcmc)
class InsertMoleculeMove(GCMove):
"""Concrete class for Grand Canonical molecule moves"""
key = "gcinsertmol"
parse_mover = staticmethod(parse_molecule_gcmc)
print_mover = staticmethod(print_gcmc)
class SemiWidomAtomMove(MCMove):
"""No exmaples are available"""
key = "semiwidomatoms"
# Format needs to be confirmed
class SemiGrandAtomMove(MCMove):
"""No examples are available"""
key = "semigrandatoms"
# Format needs to be confirmed
class SemiGrandMoleculeMove(MCMove):
"""NO examples are available"""
key = "semigrandmol"
# Format needs to be confirmed
class VolumeVectorMove(VolumeMove):
"""Concrete class for vector volume moves"""
key = "vector"
class VolumeOrthoMove(VolumeMove):
"""Concrete class for ortho volume moves"""
key = "ortho"
class VolumeCubicMove(VolumeMove):
"""Concrete class for cubic volume moves"""
key = "cubic"
def from_string(dlstr):
"""Factory method to return an instance from a well-formed
DL CONTROL file move statement (a block of 1 plus n lines)
Argument:
dlstr (string) move statement plus atom/molecule
descriptions
"""
moves_volume = {"vector": VolumeVectorMove,
"ortho": VolumeOrthoMove,
"cubic": VolumeCubicMove}
moves_mc = {"atom": AtomMove,
"molecule": MoleculeMove,
"rotatemol": RotateMoleculeMove,
"gcinsertatom": InsertAtomMove,
"gcinsertmol": InsertMoleculeMove}
lines = dlstr.splitlines()
tokens = lines[0].lower().split()
if tokens[0] != "move" or len(tokens) < 4:
raise ValueError("Expected: 'move key ...': got {!r}".format(lines[0]))
key = tokens[1]
# We need to allow for possible DL key abbreviations
if key.startswith("atom"):
key = "atom"
if key.startswith("molecu"):
key = "molecule"
if key.startswith("rotatemol"):
key = "rotatemol"
inst = None
if key == "volume":
subkey = tokens[2]
if subkey in moves_volume:
inst = moves_volume[subkey].from_string(dlstr)
else:
if key in moves_mc:
inst = moves_mc[key].from_string(dlstr)
if inst is None:
raise ValueError("Move unrecognised: {!r}".format(dlstr))
return inst
| 3.09375 | 3 |
qf_31_对象属性和类属性.py | tianming-jianai/QFPython | 0 | 12758731 | class Person(object):
type = '人类' # 类属性
def __init__(self, name, age):
self.name = name
self.age = age
# 对象 p 是通过 Person 类创建出来的
p = Person('zs', 18)
# Person类存在哪个地方
# 只要创建了一个实例对象,这个实例对象就有自己的name和age属性
# 对象属性,每个实例对象都单独保存的属性
# 每个实例对象之间的属性没有关联,互不影响
# 获取类属性:可以通过类对象和实例对象获取
print(Person.type)
print(p.type)
p.type = 'monkey' # 并不会修改类属性,而是给实例对象添加了一个新的属性
print(p.type)
print(Person.type)
# 类属性只能通过类对象修改,实例对象无法修改类属性
Person.type = 'human'
print(p.type)
| 4.125 | 4 |
src/haizea/core/scheduler/vm_scheduler.py | Hamdy/haizea | 1 | 12758732 | # -------------------------------------------------------------------------- #
# Copyright 2006-2009, University of Chicago #
# Copyright 2008-2009, Distributed Systems Architecture Group, Universidad #
# Complutense de Madrid (dsa-research.org) #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
"""This module provides the main classes for Haizea's VM Scheduler. All the
scheduling code that decides when and where a lease is scheduled is contained
in the VMScheduler class (except for the code that specifically decides
what physical machines each virtual machine is mapped to, which is factored out
into the "mapper" module). This module also provides the classes for the
reservations that will be placed in the slot table and correspond to VMs.
"""
import haizea.common.constants as constants
from haizea.common.utils import round_datetime_delta, round_datetime, estimate_transfer_time, pretty_nodemap, get_config, get_clock, get_persistence
from haizea.core.leases import Lease, Capacity
from haizea.core.scheduler.slottable import ResourceReservation, ResourceTuple
from haizea.core.scheduler import ReservationEventHandler, RescheduleLeaseException, NormalEndLeaseException, EnactmentError, NotSchedulableException, InconsistentScheduleError, InconsistentLeaseStateError, MigrationResourceReservation
from operator import attrgetter, itemgetter
from mx.DateTime import TimeDelta
import logging
class VMScheduler(object):
"""The Haizea VM Scheduler
This class is responsible for taking a lease and scheduling VMs to satisfy
the requirements of that lease.
"""
def __init__(self, slottable, resourcepool, mapper, max_in_future):
"""Constructor
The constructor does little more than create the VM scheduler's
attributes. However, it does expect (in the arguments) a fully-constructed
SlotTable, ResourcePool, and Mapper (these are constructed in the
Manager's constructor).
Arguments:
slottable -- Slot table
resourcepool -- Resource pool where enactment commands will be sent to
mapper -- Mapper
"""
self.slottable = slottable
self.resourcepool = resourcepool
self.mapper = mapper
self.logger = logging.getLogger("VMSCHED")
# Register the handlers for the types of reservations used by
# the VM scheduler
self.handlers = {}
self.handlers[VMResourceReservation] = ReservationEventHandler(
sched = self,
on_start = VMScheduler._handle_start_vm,
on_end = VMScheduler._handle_end_vm)
self.handlers[ShutdownResourceReservation] = ReservationEventHandler(
sched = self,
on_start = VMScheduler._handle_start_shutdown,
on_end = VMScheduler._handle_end_shutdown)
self.handlers[SuspensionResourceReservation] = ReservationEventHandler(
sched = self,
on_start = VMScheduler._handle_start_suspend,
on_end = VMScheduler._handle_end_suspend)
self.handlers[ResumptionResourceReservation] = ReservationEventHandler(
sched = self,
on_start = VMScheduler._handle_start_resume,
on_end = VMScheduler._handle_end_resume)
self.handlers[MemImageMigrationResourceReservation] = ReservationEventHandler(
sched = self,
on_start = VMScheduler._handle_start_migrate,
on_end = VMScheduler._handle_end_migrate)
self.max_in_future = max_in_future
self.future_leases = set()
def schedule(self, lease, nexttime, earliest):
""" The scheduling function
This particular function doesn't do much except call __schedule_asap
and __schedule_exact (which do all the work).
Arguments:
lease -- Lease to schedule
nexttime -- The next time at which the scheduler can allocate resources.
earliest -- The earliest possible starting times on each physical node
"""
if lease.get_type() == Lease.BEST_EFFORT:
return self.__schedule_asap(lease, nexttime, earliest, allow_in_future = self.can_schedule_in_future())
elif lease.get_type() == Lease.ADVANCE_RESERVATION:
return self.__schedule_exact(lease, nexttime, earliest)
elif lease.get_type() == Lease.IMMEDIATE:
return self.__schedule_asap(lease, nexttime, earliest, allow_in_future = False)
def estimate_migration_time(self, lease):
""" Estimates the time required to migrate a lease's VMs
This function conservatively estimates that all the VMs are going to
be migrated to other nodes. Since all the transfers are intra-node,
the bottleneck is the transfer from whatever node has the most
memory to transfer.
Note that this method only estimates the time to migrate the memory
state files for the VMs. Migrating the software environment (which may
or may not be a disk image) is the responsibility of the preparation
scheduler, which has it's own set of migration scheduling methods.
Arguments:
lease -- Lease that might be migrated
"""
migration = get_config().get("migration")
if migration == constants.MIGRATE_YES:
vmrr = lease.get_last_vmrr()
mem_in_pnode = dict([(pnode,0) for pnode in set(vmrr.nodes.values())])
for pnode in vmrr.nodes.values():
mem = vmrr.resources_in_pnode[pnode].get_by_type(constants.RES_MEM)
mem_in_pnode[pnode] += mem
max_mem_to_transfer = max(mem_in_pnode.values())
bandwidth = self.resourcepool.info.get_migration_bandwidth()
return estimate_transfer_time(max_mem_to_transfer, bandwidth)
elif migration == constants.MIGRATE_YES_NOTRANSFER:
return TimeDelta(seconds=0)
def schedule_migration(self, lease, vmrr, nexttime):
""" Schedules migrations for a lease
Arguments:
lease -- Lease being migrated
vmrr -- The VM reservation before which the migration will take place
nexttime -- The next time at which the scheduler can allocate resources.
"""
# Determine what migrations have to be done. We do this by looking at
# the mapping in the previous VM RR and in the new VM RR
last_vmrr = lease.get_last_vmrr()
vnode_migrations = dict([(vnode, (last_vmrr.nodes[vnode], vmrr.nodes[vnode])) for vnode in vmrr.nodes])
# Determine if we actually have to migrate
mustmigrate = False
for vnode in vnode_migrations:
if vnode_migrations[vnode][0] != vnode_migrations[vnode][1]:
mustmigrate = True
break
if not mustmigrate:
return []
# If Haizea is configured to migrate without doing any transfers,
# then we just return a nil-duration migration RR
if get_config().get("migration") == constants.MIGRATE_YES_NOTRANSFER:
start = nexttime
end = nexttime
res = {}
migr_rr = MemImageMigrationResourceReservation(lease, start, end, res, vmrr, vnode_migrations)
migr_rr.state = ResourceReservation.STATE_SCHEDULED
return [migr_rr]
# Figure out what migrations can be done simultaneously
migrations = []
while len(vnode_migrations) > 0:
pnodes = set()
migration = {}
for vnode in vnode_migrations:
origin = vnode_migrations[vnode][0]
dest = vnode_migrations[vnode][1]
if not origin in pnodes and not dest in pnodes:
migration[vnode] = vnode_migrations[vnode]
pnodes.add(origin)
pnodes.add(dest)
for vnode in migration:
del vnode_migrations[vnode]
migrations.append(migration)
# Create migration RRs
start = max(last_vmrr.post_rrs[-1].end, nexttime)
bandwidth = self.resourcepool.info.get_migration_bandwidth()
migr_rrs = []
for m in migrations:
vnodes_to_migrate = m.keys()
max_mem_to_migrate = max([lease.requested_resources[vnode].get_quantity(constants.RES_MEM) for vnode in vnodes_to_migrate])
migr_time = estimate_transfer_time(max_mem_to_migrate, bandwidth)
end = start + migr_time
res = {}
for (origin,dest) in m.values():
resorigin = Capacity([constants.RES_NETOUT])
resorigin.set_quantity(constants.RES_NETOUT, bandwidth)
resdest = Capacity([constants.RES_NETIN])
resdest.set_quantity(constants.RES_NETIN, bandwidth)
res[origin] = self.slottable.create_resource_tuple_from_capacity(resorigin)
res[dest] = self.slottable.create_resource_tuple_from_capacity(resdest)
migr_rr = MemImageMigrationResourceReservation(lease, start, start + migr_time, res, vmrr, m)
migr_rr.state = ResourceReservation.STATE_SCHEDULED
migr_rrs.append(migr_rr)
start = end
return migr_rrs
def cancel_vm(self, vmrr):
""" Cancels a VM resource reservation
Arguments:
vmrr -- VM RR to be cancelled
"""
# If this VM RR is part of a lease that was scheduled in the future,
# remove that lease from the set of future leases.
if vmrr.lease in self.future_leases:
self.future_leases.remove(vmrr.lease)
get_persistence().persist_future_leases(self.future_leases)
# If there are any pre-RRs that are scheduled, remove them
for rr in vmrr.pre_rrs:
if rr.state == ResourceReservation.STATE_SCHEDULED:
self.slottable.remove_reservation(rr)
# If there are any post RRs, remove them
for rr in vmrr.post_rrs:
self.slottable.remove_reservation(rr)
# Remove the reservation itself
self.slottable.remove_reservation(vmrr)
def can_suspend_at(self, lease, t):
""" Determines if it is possible to suspend a lease before a given time
Arguments:
vmrr -- VM RR to be preempted
t -- Time by which the VM must be preempted
"""
# TODO: Make more general, should determine vmrr based on current time
# This won't currently break, though, since the calling function
# operates on the last VM RR.
vmrr = lease.get_last_vmrr()
time_until_suspend = t - vmrr.start
min_duration = self.__compute_scheduling_threshold(lease)
can_suspend = time_until_suspend >= min_duration
return can_suspend
def preempt_vm(self, vmrr, t):
""" Preempts a VM reservation at a given time
This method assumes that the lease is, in fact, preemptable,
that the VMs are running at the given time, and that there is
enough time to suspend the VMs before the given time (all these
checks are done in the lease scheduler).
Arguments:
vmrr -- VM RR to be preempted
t -- Time by which the VM must be preempted
"""
# Save original start and end time of the vmrr
old_start = vmrr.start
old_end = vmrr.end
# Schedule the VM suspension
self.__schedule_suspension(vmrr, t)
# Update the VMRR in the slot table
self.slottable.update_reservation(vmrr, old_start, old_end)
# Add the suspension RRs to the VM's post-RRs
for susprr in vmrr.post_rrs:
self.slottable.add_reservation(susprr)
def get_future_reschedulable_leases(self):
""" Returns a list of future leases that are reschedulable.
Currently, this list is just the best-effort leases scheduled
in the future as determined by the backfilling algorithm.
Advance reservation leases, by their nature, cannot be
rescheduled to find a "better" starting time.
"""
return list(self.future_leases)
def can_schedule_in_future(self):
""" Returns True if the backfilling algorithm would allow a lease
to be scheduled in the future.
"""
if self.max_in_future == -1: # Unlimited
return True
else:
return len(self.future_leases) < self.max_in_future
def get_utilization(self, time):
""" Computes resource utilization (currently just CPU-based)
This utilization information shows what
portion of the physical resources is used by each type of reservation
(e.g., 70% are running a VM, 5% are doing suspensions, etc.)
Arguments:
time -- Time at which to determine utilization
"""
total = self.slottable.get_total_capacity(restype = constants.RES_CPU)
util = {}
reservations = self.slottable.get_reservations_at(time)
for r in reservations:
for node in r.resources_in_pnode:
if isinstance(r, VMResourceReservation):
use = r.resources_in_pnode[node].get_by_type(constants.RES_CPU)
util[type(r)] = use + util.setdefault(type(r),0.0)
elif isinstance(r, SuspensionResourceReservation) or isinstance(r, ResumptionResourceReservation) or isinstance(r, ShutdownResourceReservation):
use = r.vmrr.resources_in_pnode[node].get_by_type(constants.RES_CPU)
util[type(r)] = use + util.setdefault(type(r),0.0)
util[None] = total - sum(util.values())
if total != 0:
for k in util:
util[k] /= total
return util
def __schedule_exact(self, lease, nexttime, earliest):
""" Schedules VMs that must start at an exact time
This type of lease is "easy" to schedule because we know the exact
start time, which means that's the only starting time we have to
check. So, this method does little more than call the mapper.
Arguments:
lease -- Lease to schedule
nexttime -- The next time at which the scheduler can allocate resources.
earliest -- The earliest possible starting times on each physical node
"""
# Determine the start and end time
start = lease.start.requested
end = start + lease.duration.requested
# Convert Capacity objects in lease object into ResourceTuples that
# we can hand over to the mapper.
requested_resources = dict([(k,self.slottable.create_resource_tuple_from_capacity(v)) for k,v in lease.requested_resources.items()])
# Let the mapper do its magiv
mapping, actualend, preemptions = self.mapper.map(lease,
requested_resources,
start,
end,
strictend = True)
# If no mapping was found, tell the lease scheduler about it
if mapping == None:
raise NotSchedulableException, "Not enough resources in specified interval"
# Create VM resource reservations
res = {}
for (vnode,pnode) in mapping.items():
vnode_res = requested_resources[vnode]
if res.has_key(pnode):
res[pnode].incr(vnode_res)
else:
res[pnode] = ResourceTuple.copy(vnode_res)
vmrr = VMResourceReservation(lease, start, end, mapping, res)
vmrr.state = ResourceReservation.STATE_SCHEDULED
# Schedule shutdown for the VM
self.__schedule_shutdown(vmrr)
return vmrr, preemptions
def __schedule_asap(self, lease, nexttime, earliest, allow_in_future = None):
""" Schedules VMs as soon as possible
This method is a bit more complex that __schedule_exact because
we need to figure out what "as soon as possible" actually is.
This involves attempting several mappings, at different points
in time, before we can schedule the lease.
This method will always check, at least, if the lease can be scheduled
at the earliest possible moment at which the lease could be prepared
(e.g., if the lease can't start until 1 hour in the future because that's
the earliest possible time at which the disk images it requires can
be transferred, then that's when the scheduler will check). Note, however,
that this "earliest possible moment" is determined by the preparation
scheduler.
Additionally, if the lease can't be scheduled at the earliest
possible moment, it can also check if the lease can be scheduled
in the future. This partially implements a backfilling algorithm
(the maximum number of future leases is stored in the max_in_future
attribute of VMScheduler), the other part being implemented in the
__process_queue method of LeaseScheduler.
Note that, if the method is allowed to scheduled in the future,
and assuming that the lease doesn't request more resources than
the site itself, this method will always schedule the VMs succesfully
(since there's always an empty spot somewhere in the future).
Arguments:
lease -- Lease to schedule
nexttime -- The next time at which the scheduler can allocate resources.
earliest -- The earliest possible starting times on each physical node
allow_in_future -- Boolean indicating whether the scheduler is
allowed to schedule the VMs in the future.
"""
#
# STEP 1: PROLEGOMENA
#
lease_id = lease.id
remaining_duration = lease.duration.get_remaining_duration()
shutdown_time = self.__estimate_shutdown_time(lease)
# We might be scheduling a suspended lease. If so, we will
# also have to schedule its resumption. Right now, just
# figure out if this is such a lease.
mustresume = (lease.get_state() in (Lease.STATE_SUSPENDED_PENDING, Lease.STATE_SUSPENDED_QUEUED, Lease.STATE_SUSPENDED_SCHEDULED))
# This is the minimum duration that we must be able to schedule.
# See __compute_scheduling_threshold for more details.
min_duration = self.__compute_scheduling_threshold(lease)
#
# STEP 2: FIND THE CHANGEPOINTS
#
# Find the changepoints, and the available nodes at each changepoint
# We need to do this because the preparation scheduler may have
# determined that some nodes might require more time to prepare
# than others (e.g., if using disk image caching, some nodes
# might have the required disk image predeployed, while others
# may require transferring the image to that node).
#
# The end result of this step is a list (cps) where each entry
# is a (t,nodes) pair, where "t" is the time of the changepoint
# and "nodes" is the set of nodes that are available at that time.
if not mustresume:
# If this is not a suspended lease, then the changepoints
# are determined based on the "earliest" parameter.
cps = [(node, e.time) for node, e in earliest.items()]
cps.sort(key=itemgetter(1))
curcp = None
changepoints = []
nodes = []
for node, time in cps:
nodes.append(node)
if time != curcp:
changepoints.append([time, set(nodes)])
curcp = time
else:
changepoints[-1][1] = set(nodes)
else:
# If the lease is suspended, we take into account that, if
# migration is disabled, we can only schedule the lease
# on the nodes it is currently scheduled on.
if get_config().get("migration") == constants.MIGRATE_NO:
vmrr = lease.get_last_vmrr()
onlynodes = set(vmrr.nodes.values())
else:
onlynodes = None
changepoints = list(set([x.time for x in earliest.values()]))
changepoints.sort()
changepoints = [(x, onlynodes) for x in changepoints]
# If we can schedule VMs in the future,
# we also consider future changepoints
if allow_in_future:
res = self.slottable.get_reservations_ending_after(changepoints[-1][0])
# We really only care about changepoints where VMs end (which is
# when resources become available)
futurecp = [r.get_final_end() for r in res if isinstance(r, VMResourceReservation)]
# Corner case: Sometimes we're right in the middle of a ShutdownReservation, so it won't be
# included in futurecp.
futurecp += [r.end for r in res if isinstance(r, ShutdownResourceReservation) and not r.vmrr in res]
if not mustresume:
futurecp = [(p,None) for p in futurecp]
else:
futurecp = [(p,onlynodes) for p in futurecp]
else:
futurecp = []
#
# STEP 3: FIND A MAPPING
#
# In this step we find a starting time and a mapping for the VMs,
# which involves going through the changepoints in order and seeing
# if we can find a mapping.
# Most of the work is done in the __find_fit_at_points
# If resuming, we also have to allocate enough time for the resumption
if mustresume:
duration = remaining_duration + self.__estimate_resume_time(lease)
else:
duration = remaining_duration
duration += shutdown_time
in_future = False
# Convert Capacity objects in lease object into ResourceTuples that
# we can hand over to the mapper.
requested_resources = dict([(k,self.slottable.create_resource_tuple_from_capacity(v)) for k,v in lease.requested_resources.items()])
# First, try to find a mapping assuming we can't schedule in the future
start, end, mapping, preemptions = self.__find_fit_at_points(lease,
requested_resources,
changepoints,
duration,
min_duration)
if start == None and not allow_in_future:
# We did not find a suitable starting time. This can happen
# if we're unable to schedule in the future
raise NotSchedulableException, "Could not find enough resources for this request"
# If we haven't been able to fit the lease, check if we can
# reserve it in the future
if start == None and allow_in_future:
start, end, mapping, preemptions = self.__find_fit_at_points(lease,
requested_resources,
futurecp,
duration,
min_duration
)
# TODO: The following will also raise an exception if a lease
# makes a request that could *never* be satisfied with the
# current resources.
if start == None:
raise InconsistentScheduleError, "Could not find a mapping in the future (this should not happen)"
in_future = True
#
# STEP 4: CREATE RESERVATIONS
#
# At this point, the lease is feasible. We just need to create
# the reservations for the VMs and, possibly, for the VM resumption,
# suspension, and shutdown.
# VM resource reservation
res = {}
for (vnode,pnode) in mapping.items():
vnode_res = requested_resources[vnode]
if res.has_key(pnode):
res[pnode].incr(vnode_res)
else:
res[pnode] = ResourceTuple.copy(vnode_res)
vmrr = VMResourceReservation(lease, start, end, mapping, res)
vmrr.state = ResourceReservation.STATE_SCHEDULED
# VM resumption resource reservation
if mustresume:
self.__schedule_resumption(vmrr, start)
# If the mapper couldn't find a mapping for the full duration
# of the lease, then we need to schedule a suspension.
mustsuspend = (vmrr.end - vmrr.start) < remaining_duration
if mustsuspend:
self.__schedule_suspension(vmrr, end)
else:
# Compensate for any overestimation
if (vmrr.end - vmrr.start) > remaining_duration + shutdown_time:
vmrr.end = vmrr.start + remaining_duration + shutdown_time
self.__schedule_shutdown(vmrr)
if in_future:
self.future_leases.add(lease)
get_persistence().persist_future_leases(self.future_leases)
susp_str = res_str = ""
if mustresume:
res_str = " (resuming)"
if mustsuspend:
susp_str = " (suspending)"
self.logger.info("Lease #%i has been scheduled on nodes %s from %s%s to %s%s" % (lease.id, mapping.values(), start, res_str, end, susp_str))
return vmrr, preemptions
def __find_fit_at_points(self, lease, requested_resources, changepoints, duration, min_duration):
""" Tries to map a lease in a given list of points in time
This method goes through a given list of points in time and tries
to find the earliest time at which that lease can be allocated
resources.
Arguments:
lease -- Lease to schedule
requested_resources -- A dictionary of lease node -> ResourceTuple.
changepoints -- The list of changepoints
duration -- The amount of time requested
min_duration -- The minimum amount of time that should be allocated
Returns:
start -- The time at which resources have been found for the lease
actualend -- The time at which the resources won't be available. Note
that this is not necessarily (start + duration) since the mapper
might be unable to find enough resources for the full requested duration.
mapping -- A mapping of lease nodes to physical nodes
preemptions -- A list of
(if no mapping is found, all these values are set to None)
"""
found = False
for time, onlynodes in changepoints:
start = time
end = start + duration
self.logger.debug("Attempting to map from %s to %s" % (start, end))
# If suspension is disabled, we will only accept mappings that go
# from "start" strictly until "end".
susptype = get_config().get("suspension")
if susptype == constants.SUSPENSION_NONE or (lease.numnodes > 1 and susptype == constants.SUSPENSION_SERIAL):
strictend = True
else:
strictend = False
# Let the mapper work its magic
mapping, actualend, preemptions = self.mapper.map(lease,
requested_resources,
start,
end,
strictend = strictend,
onlynodes = onlynodes)
# We have a mapping; we still have to check if it satisfies
# the minimum duration.
if mapping != None:
if actualend < end:
actualduration = actualend - start
if actualduration >= min_duration:
self.logger.debug("This lease can be scheduled from %s to %s (will require suspension)" % (start, actualend))
found = True
break
else:
self.logger.debug("This starting time does not allow for the requested minimum duration (%s < %s)" % (actualduration, min_duration))
else:
self.logger.debug("This lease can be scheduled from %s to %s (full duration)" % (start, end))
found = True
break
if found:
return start, actualend, mapping, preemptions
else:
return None, None, None, None
def __compute_susprem_times(self, vmrr, time, direction, exclusion, rate, override = None):
""" Computes the times at which suspend/resume operations would have to start
When suspending or resuming a VM, the VM's memory is dumped to a
file on disk. To correctly estimate the time required to suspend
a lease with multiple VMs, Haizea makes sure that no two
suspensions/resumptions happen at the same time (e.g., if eight
memory files were being saved at the same time to disk, the disk's
performance would be reduced in a way that is not as easy to estimate
as if only one file were being saved at a time). Based on a number
of parameters, this method estimates the times at which the
suspend/resume commands would have to be sent to guarantee this
exclusion.
Arguments:
vmrr -- The VM reservation that will be suspended/resumed
time -- The time at which the suspend should end or the resume should start.
direction -- DIRECTION_BACKWARD: start at "time" and compute the times going
backward (for suspensions) DIRECTION_FORWARD: start at time "time" and compute
the times going forward.
exclusion -- SUSPRES_EXCLUSION_GLOBAL (memory is saved to global filesystem)
or SUSPRES_EXCLUSION_LOCAL (saved to local filesystem)
rate -- The rate at which an individual VM is suspended/resumed
override -- If specified, then instead of computing the time to
suspend/resume VM based on its memory and the "rate" parameter,
use this override value.
"""
times = [] # (start, end, {pnode -> vnodes})
enactment_overhead = get_config().get("enactment-overhead")
if exclusion == constants.SUSPRES_EXCLUSION_GLOBAL:
# Global exclusion (which represents, e.g., reading/writing the memory image files
# from a global file system) meaning no two suspensions/resumptions can happen at
# the same time in the entire resource pool.
t = time
t_prev = None
for (vnode,pnode) in vmrr.nodes.items():
if override == None:
mem = vmrr.lease.requested_resources[vnode].get_quantity(constants.RES_MEM)
op_time = self.__compute_suspend_resume_time(mem, rate)
else:
op_time = override
op_time += enactment_overhead
t_prev = t
if direction == constants.DIRECTION_FORWARD:
t += op_time
times.append((t_prev, t, {pnode:[vnode]}))
elif direction == constants.DIRECTION_BACKWARD:
t -= op_time
times.append((t, t_prev, {pnode:[vnode]}))
elif exclusion == constants.SUSPRES_EXCLUSION_LOCAL:
# Local exclusion (which represents, e.g., reading the memory image files
# from a local file system) means no two resumptions can happen at the same
# time in the same physical node.
pervnode_times = [] # (start, end, vnode)
vnodes_in_pnode = {}
for (vnode,pnode) in vmrr.nodes.items():
vnodes_in_pnode.setdefault(pnode, []).append(vnode)
for pnode in vnodes_in_pnode:
t = time
t_prev = None
for vnode in vnodes_in_pnode[pnode]:
if override == None:
mem = vmrr.lease.requested_resources[vnode].get_quantity(constants.RES_MEM)
op_time = self.__compute_suspend_resume_time(mem, rate)
else:
op_time = override
t_prev = t
if direction == constants.DIRECTION_FORWARD:
t += op_time
pervnode_times.append((t_prev, t, vnode))
elif direction == constants.DIRECTION_BACKWARD:
t -= op_time
pervnode_times.append((t, t_prev, vnode))
# Consolidate suspend/resume operations happening at the same time
uniq_times = set([(start, end) for (start, end, vnode) in pervnode_times])
for (start, end) in uniq_times:
vnodes = [x[2] for x in pervnode_times if x[0] == start and x[1] == end]
node_mappings = {}
for vnode in vnodes:
pnode = vmrr.nodes[vnode]
node_mappings.setdefault(pnode, []).append(vnode)
times.append([start,end,node_mappings])
# Add the enactment overhead
for t in times:
num_vnodes = sum([len(vnodes) for vnodes in t[2].values()])
overhead = TimeDelta(seconds = num_vnodes * enactment_overhead)
if direction == constants.DIRECTION_FORWARD:
t[1] += overhead
elif direction == constants.DIRECTION_BACKWARD:
t[0] -= overhead
# Fix overlaps
if direction == constants.DIRECTION_FORWARD:
times.sort(key=itemgetter(0))
elif direction == constants.DIRECTION_BACKWARD:
times.sort(key=itemgetter(1))
times.reverse()
prev_start = None
prev_end = None
for t in times:
if prev_start != None:
start = t[0]
end = t[1]
if direction == constants.DIRECTION_FORWARD:
if start < prev_end:
diff = prev_end - start
t[0] += diff
t[1] += diff
elif direction == constants.DIRECTION_BACKWARD:
if end > prev_start:
diff = end - prev_start
t[0] -= diff
t[1] -= diff
prev_start = t[0]
prev_end = t[1]
return times
def __schedule_shutdown(self, vmrr):
""" Schedules the shutdown of a VM reservation
Arguments:
vmrr -- The VM reservation that will be shutdown
"""
shutdown_time = self.__estimate_shutdown_time(vmrr.lease)
start = vmrr.end - shutdown_time
end = vmrr.end
shutdown_rr = ShutdownResourceReservation(vmrr.lease, start, end, vmrr.resources_in_pnode, vmrr.nodes, vmrr)
shutdown_rr.state = ResourceReservation.STATE_SCHEDULED
vmrr.update_end(start)
# If there are any post RRs, remove them
for rr in vmrr.post_rrs:
self.slottable.remove_reservation(rr)
vmrr.post_rrs = []
vmrr.post_rrs.append(shutdown_rr)
def __schedule_suspension(self, vmrr, suspend_by):
""" Schedules the suspension of a VM reservation
Most of the work is done in __compute_susprem_times. See that
method's documentation for more details.
Arguments:
vmrr -- The VM reservation that will be suspended
suspend_by -- The time by which the VMs should be suspended.
"""
config = get_config()
susp_exclusion = config.get("suspendresume-exclusion")
override = get_config().get("override-suspend-time")
rate = config.get("suspend-rate")
if suspend_by < vmrr.start or suspend_by > vmrr.end:
raise InconsistentScheduleError, "Tried to schedule a suspension by %s, which is outside the VMRR's duration (%s-%s)" % (suspend_by, vmrr.start, vmrr.end)
# Find the suspension times
times = self.__compute_susprem_times(vmrr, suspend_by, constants.DIRECTION_BACKWARD, susp_exclusion, rate, override)
# Create the suspension resource reservations
suspend_rrs = []
for (start, end, node_mappings) in times:
suspres = {}
all_vnodes = []
for (pnode,vnodes) in node_mappings.items():
num_vnodes = len(vnodes)
r = Capacity([constants.RES_MEM,constants.RES_DISK])
mem = 0
for vnode in vnodes:
mem += vmrr.lease.requested_resources[vnode].get_quantity(constants.RES_MEM)
r.set_quantity(constants.RES_MEM, mem * num_vnodes)
r.set_quantity(constants.RES_DISK, mem * num_vnodes)
suspres[pnode] = self.slottable.create_resource_tuple_from_capacity(r)
all_vnodes += vnodes
susprr = SuspensionResourceReservation(vmrr.lease, start, end, suspres, all_vnodes, vmrr)
susprr.state = ResourceReservation.STATE_SCHEDULED
suspend_rrs.append(susprr)
suspend_rrs.sort(key=attrgetter("start"))
susp_start = suspend_rrs[0].start
if susp_start < vmrr.start:
raise InconsistentScheduleError, "Determined suspension should start at %s, before the VMRR's start (%s) -- Suspend time not being properly estimated?" % (susp_start, vmrr.start)
vmrr.update_end(susp_start)
# If there are any post RRs, remove them
for rr in vmrr.post_rrs:
self.slottable.remove_reservation(rr)
vmrr.post_rrs = []
# Add the suspension RRs to the VM RR
for susprr in suspend_rrs:
vmrr.post_rrs.append(susprr)
def __schedule_resumption(self, vmrr, resume_at):
""" Schedules the resumption of a VM reservation
Most of the work is done in __compute_susprem_times. See that
method's documentation for more details.
Arguments:
vmrr -- The VM reservation that will be resumed
resume_at -- The time at which the resumption should start
"""
config = get_config()
resm_exclusion = config.get("suspendresume-exclusion")
override = get_config().get("override-resume-time")
rate = config.get("resume-rate")
if resume_at < vmrr.start or resume_at > vmrr.end:
raise InconsistentScheduleError, "Tried to schedule a resumption at %s, which is outside the VMRR's duration (%s-%s)" % (resume_at, vmrr.start, vmrr.end)
# Find the resumption times
times = self.__compute_susprem_times(vmrr, resume_at, constants.DIRECTION_FORWARD, resm_exclusion, rate, override)
# Create the resumption resource reservations
resume_rrs = []
for (start, end, node_mappings) in times:
resmres = {}
all_vnodes = []
for (pnode,vnodes) in node_mappings.items():
num_vnodes = len(vnodes)
r = Capacity([constants.RES_MEM,constants.RES_DISK])
mem = 0
for vnode in vnodes:
mem += vmrr.lease.requested_resources[vnode].get_quantity(constants.RES_MEM)
r.set_quantity(constants.RES_MEM, mem * num_vnodes)
r.set_quantity(constants.RES_DISK, mem * num_vnodes)
resmres[pnode] = self.slottable.create_resource_tuple_from_capacity(r)
all_vnodes += vnodes
resmrr = ResumptionResourceReservation(vmrr.lease, start, end, resmres, all_vnodes, vmrr)
resmrr.state = ResourceReservation.STATE_SCHEDULED
resume_rrs.append(resmrr)
resume_rrs.sort(key=attrgetter("start"))
resm_end = resume_rrs[-1].end
if resm_end > vmrr.end:
raise InconsistentScheduleError, "Determined resumption would end at %s, after the VMRR's end (%s) -- Resume time not being properly estimated?" % (resm_end, vmrr.end)
vmrr.update_start(resm_end)
# Add the resumption RRs to the VM RR
for resmrr in resume_rrs:
vmrr.pre_rrs.append(resmrr)
def __compute_suspend_resume_time(self, mem, rate):
""" Compute the time to suspend/resume a single VM
Arguments:
mem -- Amount of memory used by the VM
rate -- The rate at which an individual VM is suspended/resumed
"""
time = float(mem) / rate
time = round_datetime_delta(TimeDelta(seconds = time))
return time
def __estimate_suspend_time(self, lease):
""" Estimate the time to suspend an entire lease
Most of the work is done in __estimate_suspend_resume_time. See
that method's documentation for more details.
Arguments:
lease -- Lease that is going to be suspended
"""
rate = get_config().get("suspend-rate")
override = get_config().get("override-suspend-time")
if override != None:
return override
else:
return self.__estimate_suspend_resume_time(lease, rate)
def __estimate_resume_time(self, lease):
""" Estimate the time to resume an entire lease
Most of the work is done in __estimate_suspend_resume_time. See
that method's documentation for more details.
Arguments:
lease -- Lease that is going to be resumed
"""
rate = get_config().get("resume-rate")
override = get_config().get("override-resume-time")
if override != None:
return override
else:
return self.__estimate_suspend_resume_time(lease, rate)
def __estimate_suspend_resume_time(self, lease, rate):
""" Estimate the time to suspend/resume an entire lease
Note that, unlike __compute_suspend_resume_time, this estimates
the time to suspend/resume an entire lease (which may involve
suspending several VMs)
Arguments:
lease -- Lease that is going to be suspended/resumed
rate -- The rate at which an individual VM is suspended/resumed
"""
susp_exclusion = get_config().get("suspendresume-exclusion")
enactment_overhead = get_config().get("enactment-overhead")
mem = 0
for vnode in lease.requested_resources:
mem += lease.requested_resources[vnode].get_quantity(constants.RES_MEM)
if susp_exclusion == constants.SUSPRES_EXCLUSION_GLOBAL:
return lease.numnodes * (self.__compute_suspend_resume_time(mem, rate) + enactment_overhead)
elif susp_exclusion == constants.SUSPRES_EXCLUSION_LOCAL:
# Overestimating
return lease.numnodes * (self.__compute_suspend_resume_time(mem, rate) + enactment_overhead)
def __estimate_shutdown_time(self, lease):
""" Estimate the time to shutdown an entire lease
Arguments:
lease -- Lease that is going to be shutdown
"""
enactment_overhead = get_config().get("enactment-overhead").seconds
return get_config().get("shutdown-time") + (enactment_overhead * lease.numnodes)
def __compute_scheduling_threshold(self, lease):
""" Compute the scheduling threshold (the 'minimum duration') of a lease
To avoid thrashing, Haizea will not schedule a lease unless all overheads
can be correctly scheduled (which includes image transfers, suspensions, etc.).
However, this can still result in situations where a lease is prepared,
and then immediately suspended because of a blocking lease in the future.
The scheduling threshold is used to specify that a lease must
not be scheduled unless it is guaranteed to run for a minimum amount of
time (the rationale behind this is that you ideally don't want leases
to be scheduled if they're not going to be active for at least as much time
as was spent in overheads).
An important part of computing this value is the "scheduling threshold factor".
The default value is 1, meaning that the lease will be active for at least
as much time T as was spent on overheads (e.g., if preparing the lease requires
60 seconds, and we know that it will have to be suspended, requiring 30 seconds,
Haizea won't schedule the lease unless it can run for at least 90 minutes).
In other words, a scheduling factor of F required a minimum duration of
F*T. A value of 0 could lead to thrashing, since Haizea could end up with
situations where a lease starts and immediately gets suspended.
Arguments:
lease -- Lease for which we want to find the scheduling threshold
"""
# TODO: Take into account other things like boot overhead, migration overhead, etc.
config = get_config()
threshold = config.get("force-scheduling-threshold")
if threshold != None:
# If there is a hard-coded threshold, use that
return threshold
else:
factor = config.get("scheduling-threshold-factor")
# First, figure out the "safe duration" (the minimum duration
# so that we at least allocate enough time for all the
# overheads).
susp_overhead = self.__estimate_suspend_time(lease)
safe_duration = susp_overhead
if lease.get_state() == Lease.STATE_SUSPENDED_QUEUED:
resm_overhead = self.__estimate_resume_time(lease)
safe_duration += resm_overhead
# TODO: Incorporate other overheads into the minimum duration
min_duration = safe_duration
# At the very least, we want to allocate enough time for the
# safe duration (otherwise, we'll end up with incorrect schedules,
# where a lease is scheduled to suspend, but isn't even allocated
# enough time to suspend).
# The factor is assumed to be non-negative. i.e., a factor of 0
# means we only allocate enough time for potential suspend/resume
# operations, while a factor of 1 means the lease will get as much
# running time as spend on the runtime overheads involved in setting
# it up
threshold = safe_duration + (min_duration * factor)
return threshold
#-------------------------------------------------------------------#
# #
# SLOT TABLE EVENT HANDLERS #
# #
#-------------------------------------------------------------------#
def _handle_start_vm(self, l, rr):
""" Handles the start of a VMResourceReservation
Arguments:
l -- Lease the VMResourceReservation belongs to
rr -- THe VMResourceReservation
"""
self.logger.debug("LEASE-%i Start of handleStartVM" % l.id)
l.print_contents()
lease_state = l.get_state()
if lease_state == Lease.STATE_READY:
l.set_state(Lease.STATE_ACTIVE)
rr.state = ResourceReservation.STATE_ACTIVE
now_time = get_clock().get_time()
l.start.actual = now_time
try:
self.resourcepool.start_vms(l, rr)
except EnactmentError, exc:
self.logger.error("Enactment error when starting VMs.")
# Right now, this is a non-recoverable error, so we just
# propagate it upwards to the lease scheduler
# In the future, it may be possible to react to these
# kind of errors.
raise
elif lease_state == Lease.STATE_RESUMED_READY:
l.set_state(Lease.STATE_ACTIVE)
rr.state = ResourceReservation.STATE_ACTIVE
# No enactment to do here, since all the suspend/resume actions are
# handled during the suspend/resume RRs
else:
raise InconsistentLeaseStateError(l, doing = "starting a VM")
# If this was a future reservation (as determined by backfilling),
# remove that status, since the future is now.
if rr.lease in self.future_leases:
self.future_leases.remove(l)
get_persistence().persist_future_leases(self.future_leases)
l.print_contents()
self.logger.debug("LEASE-%i End of handleStartVM" % l.id)
self.logger.info("Started VMs for lease %i on nodes %s" % (l.id, rr.nodes.values()))
def _handle_end_vm(self, l, rr):
""" Handles the end of a VMResourceReservation
Arguments:
l -- Lease the VMResourceReservation belongs to
rr -- THe VMResourceReservation
"""
self.logger.debug("LEASE-%i Start of handleEndVM" % l.id)
self.logger.vdebug("LEASE-%i Before:" % l.id)
l.print_contents()
now_time = round_datetime(get_clock().get_time())
diff = now_time - rr.start
l.duration.accumulate_duration(diff)
rr.state = ResourceReservation.STATE_DONE
self.logger.vdebug("LEASE-%i After:" % l.id)
l.print_contents()
self.logger.debug("LEASE-%i End of handleEndVM" % l.id)
self.logger.info("Stopped VMs for lease %i on nodes %s" % (l.id, rr.nodes.values()))
def _handle_unscheduled_end_vm(self, l, vmrr):
""" Handles the unexpected end of a VMResourceReservation
Arguments:
l -- Lease the VMResourceReservation belongs to
rr -- THe VMResourceReservation
"""
self.logger.info("LEASE-%i The VM has ended prematurely." % l.id)
for rr in vmrr.post_rrs:
self.slottable.remove_reservation(rr)
vmrr.post_rrs = []
vmrr.end = get_clock().get_time()
self._handle_end_vm(l, vmrr)
def _handle_start_suspend(self, l, rr):
""" Handles the start of a SuspensionResourceReservation
Arguments:
l -- Lease the SuspensionResourceReservation belongs to
rr -- The SuspensionResourceReservation
"""
self.logger.debug("LEASE-%i Start of handleStartSuspend" % l.id)
l.print_contents()
rr.state = ResourceReservation.STATE_ACTIVE
try:
self.resourcepool.suspend_vms(l, rr)
except EnactmentError, exc:
self.logger.error("Enactment error when suspending VMs.")
# Right now, this is a non-recoverable error, so we just
# propagate it upwards to the lease scheduler
# In the future, it may be possible to react to these
# kind of errors.
raise
if rr.is_first():
l.set_state(Lease.STATE_SUSPENDING)
l.print_contents()
self.logger.info("Suspending lease %i..." % (l.id))
self.logger.debug("LEASE-%i End of handleStartSuspend" % l.id)
def _handle_end_suspend(self, l, rr):
""" Handles the end of a SuspensionResourceReservation
Arguments:
l -- Lease the SuspensionResourceReservation belongs to
rr -- The SuspensionResourceReservation
"""
self.logger.debug("LEASE-%i Start of handleEndSuspend" % l.id)
l.print_contents()
# TODO: React to incomplete suspend
self.resourcepool.verify_suspend(l, rr)
rr.state = ResourceReservation.STATE_DONE
if rr.is_last():
l.set_state(Lease.STATE_SUSPENDED_PENDING)
l.print_contents()
self.logger.debug("LEASE-%i End of handleEndSuspend" % l.id)
self.logger.info("Lease %i suspended." % (l.id))
if l.get_state() == Lease.STATE_SUSPENDED_PENDING:
raise RescheduleLeaseException
def _handle_start_resume(self, l, rr):
""" Handles the start of a ResumptionResourceReservation
Arguments:
l -- Lease the ResumptionResourceReservation belongs to
rr -- The ResumptionResourceReservation
"""
self.logger.debug("LEASE-%i Start of handleStartResume" % l.id)
l.print_contents()
try:
self.resourcepool.resume_vms(l, rr)
except EnactmentError, exc:
self.logger.error("Enactment error when resuming VMs.")
# Right now, this is a non-recoverable error, so we just
# propagate it upwards to the lease scheduler
# In the future, it may be possible to react to these
# kind of errors.
raise
rr.state = ResourceReservation.STATE_ACTIVE
if rr.is_first():
l.set_state(Lease.STATE_RESUMING)
l.print_contents()
self.logger.info("Resuming lease %i..." % (l.id))
self.logger.debug("LEASE-%i End of handleStartResume" % l.id)
def _handle_end_resume(self, l, rr):
""" Handles the end of a ResumptionResourceReservation
Arguments:
l -- Lease the ResumptionResourceReservation belongs to
rr -- The ResumptionResourceReservation
"""
self.logger.debug("LEASE-%i Start of handleEndResume" % l.id)
l.print_contents()
# TODO: React to incomplete resume
self.resourcepool.verify_resume(l, rr)
rr.state = ResourceReservation.STATE_DONE
if rr.is_last():
l.set_state(Lease.STATE_RESUMED_READY)
self.logger.info("Resumed lease %i" % (l.id))
for vnode, pnode in rr.vmrr.nodes.items():
self.resourcepool.remove_ramfile(pnode, l.id, vnode)
l.print_contents()
self.logger.debug("LEASE-%i End of handleEndResume" % l.id)
def _handle_start_shutdown(self, l, rr):
""" Handles the start of a ShutdownResourceReservation
Arguments:
l -- Lease the SuspensionResourceReservation belongs to
rr -- The SuspensionResourceReservation
"""
self.logger.debug("LEASE-%i Start of handleStartShutdown" % l.id)
l.print_contents()
rr.state = ResourceReservation.STATE_ACTIVE
try:
self.resourcepool.stop_vms(l, rr)
except EnactmentError, exc:
self.logger.error("Enactment error when shutting down VMs.")
# Right now, this is a non-recoverable error, so we just
# propagate it upwards to the lease scheduler
# In the future, it may be possible to react to these
# kind of errors.
raise
l.print_contents()
self.logger.debug("LEASE-%i End of handleStartShutdown" % l.id)
def _handle_end_shutdown(self, l, rr):
""" Handles the end of a SuspensionResourceReservation
Arguments:
l -- Lease the SuspensionResourceReservation belongs to
rr -- The SuspensionResourceReservation
"""
self.logger.debug("LEASE-%i Start of handleEndShutdown" % l.id)
l.print_contents()
rr.state = ResourceReservation.STATE_DONE
l.print_contents()
self.logger.debug("LEASE-%i End of handleEndShutdown" % l.id)
self.logger.info("Lease %i's VMs have shutdown." % (l.id))
raise NormalEndLeaseException
def _handle_start_migrate(self, l, rr):
""" Handles the start of a MemImageMigrationResourceReservation
Arguments:
l -- Lease the MemImageMigrationResourceReservation belongs to
rr -- The MemImageMigrationResourceReservation
"""
self.logger.debug("LEASE-%i Start of handleStartMigrate" % l.id)
l.print_contents()
rr.state = ResourceReservation.STATE_ACTIVE
l.print_contents()
self.logger.debug("LEASE-%i End of handleStartMigrate" % l.id)
self.logger.info("Migrating lease %i..." % (l.id))
def _handle_end_migrate(self, l, rr):
""" Handles the end of a MemImageMigrationResourceReservation
Arguments:
l -- Lease the MemImageMigrationResourceReservation belongs to
rr -- The MemImageMigrationResourceReservation
"""
self.logger.debug("LEASE-%i Start of handleEndMigrate" % l.id)
l.print_contents()
for vnode in rr.transfers:
origin = rr.transfers[vnode][0]
dest = rr.transfers[vnode][1]
# Update RAM files
self.resourcepool.remove_ramfile(origin, l.id, vnode)
self.resourcepool.add_ramfile(dest, l.id, vnode, l.requested_resources[vnode].get_quantity(constants.RES_MEM))
rr.state = ResourceReservation.STATE_DONE
l.print_contents()
self.logger.debug("LEASE-%i End of handleEndMigrate" % l.id)
self.logger.info("Migrated lease %i..." % (l.id))
class VMResourceReservation(ResourceReservation):
def __init__(self, lease, start, end, nodes, res):
ResourceReservation.__init__(self, lease, start, end, res)
self.nodes = nodes # { vnode -> pnode }
self.pre_rrs = []
self.post_rrs = []
# ONLY for simulation
self.__update_prematureend()
def update_start(self, time):
self.start = time
# ONLY for simulation
self.__update_prematureend()
def update_end(self, time):
self.end = time
# ONLY for simulation
self.__update_prematureend()
# ONLY for simulation
def __update_prematureend(self):
if self.lease.duration.known != None:
remdur = self.lease.duration.get_remaining_known_duration()
rrdur = self.end - self.start
if remdur < rrdur:
self.prematureend = self.start + remdur
# Kludgy, but this corner case actually does happen
# (because of preemptions, it may turn out that
# the premature end time coincides with the
# starting time of the VMRR)
if self.prematureend == self.start:
self.prematureend += 1
else:
self.prematureend = None
else:
self.prematureend = None
def get_final_end(self):
if len(self.post_rrs) == 0:
return self.end
else:
return self.post_rrs[-1].end
def is_suspending(self):
return len(self.post_rrs) > 0 and isinstance(self.post_rrs[0], SuspensionResourceReservation)
def is_shutting_down(self):
return len(self.post_rrs) > 0 and isinstance(self.post_rrs[0], ShutdownResourceReservation)
def print_contents(self, loglevel=constants.LOGLEVEL_VDEBUG):
logger = logging.getLogger("LEASES")
for resmrr in self.pre_rrs:
resmrr.print_contents(loglevel)
logger.log(loglevel, "--")
logger.log(loglevel, "Type : VM")
logger.log(loglevel, "Nodes : %s" % pretty_nodemap(self.nodes))
if self.prematureend != None:
logger.log(loglevel, "Premature end : %s" % self.prematureend)
ResourceReservation.print_contents(self, loglevel)
for susprr in self.post_rrs:
logger.log(loglevel, "--")
susprr.print_contents(loglevel)
class SuspensionResourceReservation(ResourceReservation):
def __init__(self, lease, start, end, res, vnodes, vmrr):
ResourceReservation.__init__(self, lease, start, end, res)
self.vmrr = vmrr
self.vnodes = vnodes
def print_contents(self, loglevel=constants.LOGLEVEL_VDEBUG):
logger = logging.getLogger("LEASES")
logger.log(loglevel, "Type : SUSPEND")
logger.log(loglevel, "Vnodes : %s" % self.vnodes)
ResourceReservation.print_contents(self, loglevel)
def is_first(self):
return (self == self.vmrr.post_rrs[0])
def is_last(self):
return (self == self.vmrr.post_rrs[-1])
class ResumptionResourceReservation(ResourceReservation):
def __init__(self, lease, start, end, res, vnodes, vmrr):
ResourceReservation.__init__(self, lease, start, end, res)
self.vmrr = vmrr
self.vnodes = vnodes
def print_contents(self, loglevel=constants.LOGLEVEL_VDEBUG):
logger = logging.getLogger("LEASES")
logger.log(loglevel, "Type : RESUME")
logger.log(loglevel, "Vnodes : %s" % self.vnodes)
ResourceReservation.print_contents(self, loglevel)
def is_first(self):
resm_rrs = [r for r in self.vmrr.pre_rrs if isinstance(r, ResumptionResourceReservation)]
return (self == resm_rrs[0])
def is_last(self):
resm_rrs = [r for r in self.vmrr.pre_rrs if isinstance(r, ResumptionResourceReservation)]
return (self == resm_rrs[-1])
class ShutdownResourceReservation(ResourceReservation):
def __init__(self, lease, start, end, res, vnodes, vmrr):
ResourceReservation.__init__(self, lease, start, end, res)
self.vmrr = vmrr
self.vnodes = vnodes
def print_contents(self, loglevel=constants.LOGLEVEL_VDEBUG):
logger = logging.getLogger("LEASES")
logger.log(loglevel, "Type : SHUTDOWN")
ResourceReservation.print_contents(self, loglevel)
class MemImageMigrationResourceReservation(MigrationResourceReservation):
def __init__(self, lease, start, end, res, vmrr, transfers):
MigrationResourceReservation.__init__(self, lease, start, end, res, vmrr, transfers)
def print_contents(self, loglevel=constants.LOGLEVEL_VDEBUG):
logger = logging.getLogger("LEASES")
logger.log(loglevel, "Type : MEM IMAGE MIGRATION")
logger.log(loglevel, "Transfers : %s" % self.transfers)
ResourceReservation.print_contents(self, loglevel)
| 1.515625 | 2 |
sentience/framework/abc/train.py | jtdutta1/Sentience | 0 | 12758733 | <gh_stars>0
from abc import ABC, abstractmethod
class Trainer(ABC):
"""
Base class for trainers. Define your own trainer classes using this as your base.
"""
@abstractmethod
def train(self,
state_path,
save_state_interval,
resume=False,
model_checkpoint_path=None):
"""
Train and evaluate your model. Override for custom training mechanism.
Keyword arguments:-
state_path -- Path to save the state of the training cycle.
save_state_interval -- Number of epochs after which the state needs to be saved.
resume -- Resumes from the saved state at path state_path if True. checkpoint_path
also needs to be supplied.
model_checkpoint_path -- Path to the saved weights to load and continue training.
"""
pass
@abstractmethod
def check_state_file_path(self, state_path):
"""
Checks if a file exists at the defined path. If found a new file name is created by
appending an integer starting from 1 until a file of the same name doesn't exist.
"""
pass
@abstractmethod
def create_state(self):
"""
Create a state object during training.
"""
pass
@abstractmethod
def save_state(self, save_path):
"""
Save the state of the training cycle.
Keyword arguments:-
save_path -- Path to save the state to.
"""
pass
# @abstractmethod
# def load_state(self, save_path):
# """
# Load a saved state to resume training from.
# Keyword arguments:-
# save_path -- Path to load the state from.
# """
# pass | 3.34375 | 3 |
indico/modules/events/papers/controllers/api.py | Leats/indico | 0 | 12758734 | <gh_stars>0
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from flask import request, session
from marshmallow_enum import EnumField
from webargs import fields, validate
from werkzeug.exceptions import Forbidden
from indico.modules.events.papers.controllers.base import RHPaperBase
from indico.modules.events.papers.models.comments import PaperReviewComment
from indico.modules.events.papers.models.reviews import (PaperAction, PaperCommentVisibility, PaperReview,
PaperReviewType, PaperTypeProxy)
from indico.modules.events.papers.models.revisions import PaperRevisionState
from indico.modules.events.papers.operations import (create_comment, create_paper_revision, create_review,
delete_comment, judge_paper, reset_paper_state, update_comment,
update_review)
from indico.modules.events.papers.schemas import PaperSchema
from indico.modules.events.papers.util import is_type_reviewing_possible
from indico.util.i18n import _
from indico.util.marshmallow import max_words, not_empty
from indico.web.args import parser, use_kwargs
class RHPaperDetails(RHPaperBase):
def _process(self):
return PaperSchema(context={'user': session.user}).jsonify(self.paper)
class RHResetPaperState(RHPaperBase):
def _check_paper_protection(self):
if self.paper.state == PaperRevisionState.submitted:
return False
# managers and judges can always reset
return self.paper.event.can_manage(session.user) or self.paper.can_judge(session.user)
def _process(self):
if self.paper.state != PaperRevisionState.submitted:
reset_paper_state(self.paper)
return '', 204
class RHCreatePaperComment(RHPaperBase):
def _check_paper_protection(self):
return self.paper.can_comment(session.user)
@use_kwargs({
'comment': fields.String(validate=not_empty),
'visibility': EnumField(PaperCommentVisibility, missing=None)
})
def _process(self, comment, visibility):
create_comment(self.paper, comment, visibility, session.user)
return '', 204
class RHCommentActions(RHPaperBase):
normalize_url_spec = {
'locators': {
lambda self: self.comment
}
}
def _check_access(self):
RHPaperBase._check_access(self)
if not self.comment.can_edit(session.user):
raise Forbidden
def _process_args(self):
RHPaperBase._process_args(self)
self.comment = (PaperReviewComment.query
.filter(PaperReviewComment.id == request.view_args['comment_id'],
~PaperReviewComment.is_deleted)
.first_or_404())
def _process_DELETE(self):
delete_comment(self.comment)
return '', 204
@use_kwargs({
'comment': fields.String(validate=not_empty),
'visibility': EnumField(PaperCommentVisibility)
}, partial=True)
def _process_PATCH(self, comment=None, visibility=None):
update_comment(self.comment, comment, visibility)
return '', 204
class RHJudgePaper(RHPaperBase):
def _check_paper_protection(self):
return self.paper.can_judge(session.user, check_state=True)
@use_kwargs({
'action': EnumField(PaperAction, required=True),
'comment': fields.String()
})
def _process(self, action, comment):
judge_paper(self.paper, action, comment, judge=session.user)
return '', 204
class RHSubmitNewRevision(RHPaperBase):
PAPER_REQUIRED = False
ALLOW_LOCKED = True
def _check_paper_protection(self):
if not self.event.cfp.is_manager(session.user):
if not RHPaperBase._check_paper_protection(self):
return False
if not self.contribution.is_user_associated(session.user, check_abstract=True):
return False
paper = self.contribution.paper
return paper is None or paper.state == PaperRevisionState.to_be_corrected
@use_kwargs({
'files': fields.List(fields.Field(), location='files', required=True)
})
def _process(self, files):
create_paper_revision(self.paper, session.user, files)
return '', 204
def _parse_review_args(event, review_type):
args_schema = {
'proposed_action': EnumField(PaperAction, required=True),
'comment': fields.String(missing='')
}
for question in event.cfp.get_questions_for_review_type(review_type):
attrs = {}
if question.is_required:
attrs['required'] = True
else:
attrs['missing'] = None
if question.field_type == 'rating':
field_cls = fields.Integer
elif question.field_type == 'text':
validators = []
if question.field_data['max_length']:
validators.append(validate.Length(max=question.field_data['max_length']))
if question.field_data['max_words']:
validators.append(max_words(question.field_data['max_words']))
attrs['validate'] = validators
field_cls = fields.String
elif question.field_type == 'bool':
field_cls = fields.Bool
else:
raise Exception('Invalid question field type: {}'.format(question.field_type))
args_schema['question_{}'.format(question.id)] = field_cls(**attrs)
data = parser.parse(args_schema)
questions_data = {k: v for k, v in data.iteritems() if k.startswith('question_')}
review_data = {k: v for k, v in data.iteritems() if not k.startswith('question_')}
return questions_data, review_data
class RHCreateReview(RHPaperBase):
normalize_url_spec = {
'locators': {
lambda self: self.paper,
lambda self: self.type
}
}
def _check_access(self):
RHPaperBase._check_access(self)
if not is_type_reviewing_possible(self.event.cfp, self.type.instance):
raise Forbidden(_('Reviewing is currently not possible'))
def _check_paper_protection(self):
if self.paper.last_revision.get_reviews(user=session.user, group=self.type.instance):
return False
return self.paper.can_review(session.user, check_state=True)
def _process_args(self):
RHPaperBase._process_args(self)
self.type = PaperTypeProxy(PaperReviewType[request.view_args['review_type']])
def _process(self):
questions_data, review_data = _parse_review_args(self.event, self.type)
create_review(self.paper, self.type, session.user, review_data, questions_data)
return '', 204
class RHUpdateReview(RHPaperBase):
normalize_url_spec = {
'locators': {
lambda self: self.review
}
}
def _check_paper_protection(self):
return self.review.can_edit(session.user, check_state=True)
def _check_access(self):
RHPaperBase._check_access(self)
if not is_type_reviewing_possible(self.event.cfp, self.review.type):
raise Forbidden(_('Reviewing is currently not possible'))
def _process_args(self):
RHPaperBase._process_args(self)
self.review = (PaperReview.query
.filter(PaperReview.id == request.view_args['review_id'])
.first_or_404())
def _process(self):
questions_data, review_data = _parse_review_args(self.event, self.review.type)
update_review(self.review, review_data, questions_data)
return '', 204
| 1.820313 | 2 |
src/unicon/plugins/nxos/service_statements.py | TestingBytes/unicon.plugins | 1 | 12758735 | <filename>src/unicon/plugins/nxos/service_statements.py
"""
Module:
unicon.plugins.service.nxos
Authors:
pyATS TEAM (<EMAIL>, <EMAIL>)
Description:
Module for defining all Services Statement, handlers(callback) and Statement
list for service dialog would be defined here.
"""
from time import sleep
from unicon.eal.dialogs import Statement
from unicon.plugins.nxos.patterns import NxosPatterns
from unicon.plugins.nxos.service_patterns import ReloadPatterns
from unicon.plugins.nxos.service_patterns import HaNxosReloadPatterns
from unicon.plugins.generic.service_statements import send_response,\
login_handler, password_handler
from unicon.plugins.generic.service_statements import save_env,\
auto_provision, reload_proceed, auto_install_dialog, \
setup_dialog, config_byte, login_notready, redundant, confirm_reset,\
press_enter, confirm_config, module_reload, save_module_cfg,\
secure_passwd_std
from unicon.plugins.utils import (get_current_credential,
common_cred_username_handler, common_cred_password_handler, )
def run_level():
sleep(100)
def nxos_system_up():
sleep(100)
def admin_password_handler(spawn, context, session):
""" handles admin password prompt
"""
credential = get_current_credential(context=context, session=session)
if credential:
common_cred_password_handler(
spawn=spawn, context=context, credential=credential,
session=session, reuse_current_credential=True)
else:
spawn.sendline(context['tacacs_password'])
# Additional statement specific to nxos
pat = HaNxosReloadPatterns()
reboot = Statement(pattern=pat.reboot,
action=send_response,
args={'response': 'y'},
loop_continue=True,
continue_timer=True)
secure_password = Statement(pattern=pat.secure_password,
action=send_response,
args={'response': 'n'},
loop_continue=True,
continue_timer=True)
admin_password = Statement(pattern=pat.admin_password,
action=admin_password_handler,
args=None,
loop_continue=True,
continue_timer=False)
enable_vdc = Statement(pattern=pat.enable_vdc,
action=send_response,
args={'response': 'no'},
loop_continue=True,
continue_timer=True)
snmp_port = Statement(pattern=pat.snmp_port,
action=send_response,
args={'response': ''},
loop_continue=True,
continue_timer=True)
boot_vdc = Statement(pattern=pat.boot_vdc,
action=send_response,
args={'response': 'yes'},
loop_continue=True,
continue_timer=True)
login_stmt = Statement(pattern=pat.username,
action=login_handler,
args=None,
loop_continue=True,
continue_timer=False)
password_stmt = Statement(pattern=pat.password,
action=password_handler,
args=None,
loop_continue=False,
continue_timer=False)
useracess1 = Statement(pattern=pat.useracess,
action=login_handler,
args=None,
loop_continue=True,
continue_timer=False)
run_init = Statement(pattern=pat.run_init,
action=run_level,
args=None,
loop_continue=True,
continue_timer=False)
system_up = Statement(pattern=pat.system_up,
action=nxos_system_up,
args=None,
loop_continue=True,
continue_timer=False)
# TODO finalise on this step
loader_prompt = None
rommon_prompt = None
# for nxos single rp reload
pat = ReloadPatterns()
reload_confirm_nxos = Statement(pattern=pat.reload_confirm_nxos,
action=send_response,
args={'response': 'y'},
loop_continue=True,
continue_timer=False)
# reload statement list for nxos single-rp
nxos_reload_statement_list = [save_env, confirm_reset, reload_confirm_nxos,
press_enter, login_stmt, password_stmt,
confirm_config, setup_dialog,
auto_install_dialog, module_reload,
save_module_cfg, secure_passwd_std,
admin_password, auto_provision, enable_vdc]
# reload statement list for nxos dual-rp
ha_nxos_reload_statement_list = [save_env, reboot, secure_password,
auto_provision, reload_proceed,
auto_install_dialog, admin_password,
setup_dialog, config_byte, enable_vdc,
snmp_port, boot_vdc, login_notready,
redundant, login_stmt, password_stmt,
system_up, run_init, useracess1]
additional_connection_dialog = [enable_vdc, boot_vdc, snmp_port,
admin_password, secure_password, auto_provision]
# Statements for commit verification on NXOS
pat = NxosPatterns()
commit_verification_stmt = Statement(pattern=pat.commit_verification,
action='sendline()',
args=None, loop_continue=True,
continue_timer=False)
config_commit_stmt_list = [commit_verification_stmt] | 2.078125 | 2 |
tspdb/tests/test_module.py | swipswaps/tspdb | 43 | 12758736 |
import numpy as np
from tspdb.src.pindex.predict import get_prediction_range, get_prediction
from tspdb.src.pindex.pindex_managment import TSPI
from tspdb.src.pindex.pindex_utils import index_ts_mapper
import time
import timeit
import pandas as pd
from tspdb.src.hdf_util import read_data
from tspdb.src.tsUtils import randomlyHideValues
from scipy.stats import norm
from sklearn.metrics import r2_score
import tspdb
def r2_var(y,y_h,X):
average = np.mean(X**2) - np.mean(X)**2
return 1 - sum((y-y_h)**2)/sum((y-average)**2)
def create_table_data():
obs = np.arange(10**5).astype('float')
means = obs
var = np.zeros(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
print(obs_9)
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7,'var': var })
df.to_csv('testdata/tables/ts_basic_5.csv',index_label = 'time')
timestamps = pd.date_range('2012-10-01 00:00:00', periods = 10**5, freq='5s')
df.index = timestamps
df.to_csv('testdata/tables/ts_basic_ts_5_5.csv', index_label = 'time')
# real time series variance constant
data = read_data('testdata/MixtureTS2.h5')
obs = data['obs'][:]
means = data['means'][:]
var = np.ones(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7 ,'var': var })
df.index_label = 'time'
df.to_csv('testdata/tables/MixtureTS2.csv', index_label = 'time')
# real time series variance constant
data = read_data('testdata/MixtureTS.h5')
obs = data['obs'][:]
means = data['means'][:]
var = np.ones(obs.shape)
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7,'var': var })
df.to_csv('testdata/tables/MixtureTS.csv', index_label = 'time')
# real time series varaince harmonics
data = read_data('testdata/MixtureTS_var.h5')
obs = data['obs'][:]
means = data['means'][:]
var = data['var'][:]
obs_9 = randomlyHideValues(np.array(obs), 0.9)[0]
obs_7 = randomlyHideValues(np.array(obs), 0.7)[0]
df = pd.DataFrame(data ={'ts':obs, 'means': means, 'ts_9':obs_9, 'ts_7' : obs_7, 'var': var })
df.to_csv('testdata/tables/MixtureTS_var.csv', index_label = 'time')
def create_tables(interface):
dir_ = tspdb.__path__[0]+'/tests/'
for table in ['mixturets2','ts_basic_5','ts_basic_ts_5_5','mixturets_var']:
df = pd.read_csv(dir_+'testdata/tables/%s.csv'%table, engine = 'python')
if table == 'ts_basic_ts_5_5': df['time'] = df['time'].astype('datetime64[ns]')
interface.create_table(table, df, 'time', include_index = False)
def update_test(interface, init_points = 10**4 , update_points = [1000,100,5000,10000], T = 1000, direct_var = True ,index_name = 'ts_basic_test_pindex'):
df = pd.DataFrame(data ={'ts': np.arange(init_points).astype('float')})
interface.create_table('ts_basic_test', df, 'row_id', index_label='row_id')
time_series_table = ['ts_basic_test','ts', 'row_id']
T0 = 1000
gamma = 0.5
k = 2
k_var = 1
agg_interval = 1.
conn = interface.engine.raw_connection()
cur = conn.cursor()
cur.execute('''SELECT create_pindex('%s','%s','%s','%s', "T" => %s, k => %s, k_var => %s, agg_interval => %s, var_direct => %s)'''%('ts_basic_test','row_id','ts', index_name, T, k,k_var, agg_interval, direct_var))
cur.close()
conn.commit()
conn.close()
for points in update_points:
df = pd.DataFrame(data = {'ts':np.arange(init_points,points+init_points).astype('float')}, index = np.arange(init_points,points+init_points) )
interface.bulk_insert('ts_basic_test', df, index_label='row_id')
init_points += points
print ('successfully updated %s points' %points)
def ts_table_tests(init_points = 10**4 , update_points = [1000,100,5000,10000], T = 1000, direct_var = True ,index_name = 'ts_basic_ts_pindex'):
interface = SqlImplementation(driver="postgresql", host="localhost", database="querytime_test",user="aalomar",password="<PASSWORD>")
df = pd.DataFrame(data ={'ts': np.arange(init_points).astype('float')})
timestamps = pd.date_range('2012-10-01 00:00:00', periods = init_points+1, freq='5s')
end = timestamps[-1]
df.index = timestamps[:-1]
interface.create_table('ts_basic_ts', df, 'timestamp', index_label='timestamp')
time_series_table = ['ts_basic_ts','ts', 'timestamp']
T0 = 1000
gamma = 0.5
k = 2
k_var = 1
TSPD = TSPI(_dir = 'C:/Program Files/PostgreSQL/10/data/', agg_interval = 5, T = T,T_var = T, rank = k, rank_var = k_var, col_to_row_ratio = 10, index_name = index_name,gamma = gamma, interface= interface ,time_series_table = time_series_table, direct_var = direct_var )
TSPD.create_index()
interface = SqlImplementation(driver="postgresql", host="localhost", database="querytime_test",user="aalomar",password="<PASSWORD>")
for points in update_points:
df = pd.DataFrame(data = {'ts':np.arange(init_points,points+init_points).astype('float')} )
timestamps = pd.date_range(end, periods = points+1, freq='5s')
end = timestamps[-1]
df.index = timestamps[:-1]
interface.bulk_insert('ts_basic_ts', df, index_label='timestamp')
init_points += points
print ('successfully updated %s points' %points)
def create_pindex_test(interface,table_name, T,T_var, k ,k_var, direct_var,value_column= ['ts'], index_name = None , agg_interval = 1., col_to_row_ratio= 10, time_column = 'row_id'):
T0 = 1000
gamma = 0.5
if index_name is None: index_name = 'pindex'
value_column = ','.join(value_column)
interface.engine.execute('''SELECT create_pindex('%s','%s','{%s}','%s', T => %s,t_var =>%s, k => %s, k_var => %s, agg_interval => %s, var_direct => %s, col_to_row_ratio => %s)'''%(table_name,time_column, value_column, index_name, T, T_var, k,k_var, agg_interval, direct_var, col_to_row_ratio))
| 2.3125 | 2 |
fn/stream.py | bmintz/fn.py | 2,260 | 12758737 | from sys import version_info
if version_info[0] == 2:
from sys import maxint
else:
from sys import maxsize as maxint
from itertools import chain
from .iters import map, range
class Stream(object):
__slots__ = ("_last", "_collection", "_origin")
class _StreamIterator(object):
__slots__ = ("_stream", "_position")
def __init__(self, stream):
self._stream = stream
self._position = -1 # not started yet
def __next__(self):
# check if elements are available for next position
# return next element or raise StopIteration
self._position += 1
if (len(self._stream._collection) > self._position or
self._stream._fill_to(self._position)):
return self._stream._collection[self._position]
raise StopIteration()
if version_info[0] == 2:
next = __next__
def __init__(self, *origin):
self._collection = []
self._last = -1 # not started yet
self._origin = iter(origin) if origin else []
def __lshift__(self, rvalue):
iterator = rvalue() if callable(rvalue) else rvalue
self._origin = chain(self._origin, iterator)
return self
def cursor(self):
"""Return position of next evaluated element"""
return self._last + 1
def _fill_to(self, index):
if self._last >= index:
return True
while self._last < index:
try:
n = next(self._origin)
except StopIteration:
return False
self._last += 1
self._collection.append(n)
return True
def __iter__(self):
return self._StreamIterator(self)
def __getitem__(self, index):
if isinstance(index, int):
# todo: i'm not sure what to do with negative indices
if index < 0: raise TypeError("Invalid argument type")
self._fill_to(index)
elif isinstance(index, slice):
low, high, step = index.indices(maxint)
if step == 0: raise ValueError("Step must not be 0")
return self.__class__() << map(self.__getitem__, range(low, high, step or 1))
else:
raise TypeError("Invalid argument type")
return self._collection.__getitem__(index)
| 3.0625 | 3 |
CNN/Predict_cnn.py | phillnguyen/schnablelab | 0 | 12758738 | # 10/4/18
# chenyong
# predict leaf counts using trained model
"""
Make predictions of Leaf counts using trained models
"""
import os.path as op
import sys
import numpy as np
import pandas as pd
import pickle
import matplotlib.pyplot as plt
import matplotlib as mpl
from PIL import Image
from schnablelab.apps.base import ActionDispatcher, OptionParser, glob
from schnablelab.apps.headers import Slurm_header, Slurm_gpu_constraint_header, Slurm_gpu_header
from schnablelab.apps.natsort import natsorted
from glob import glob
from PIL import Image
import cv2
from pathlib import Path
def main():
actions = (
('keras', 'using keras model to make prediction'),
('dpp', 'using dpp model to make prediction'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def dpp(args):
"""
%prog model_dir img_dir output_prefix
using your trained dpp model to make predictions.
"""
p = OptionParser(dpp.__doc__)
p.set_slurm_opts(jn=True, gpu=True)
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
model_dir, img_dir, otp = args
header = Slurm_gpu_constraint_header%(opts.time, opts.memory, otp, otp, otp, opts.gpu) \
if opts.gpu \
else Slurm_gpu_header%(opts.time, opts.memory, otp, otp, otp)
if opts.env:
header += 'ml anaconda \nsource activate %s\n'%opts.env
cmd = "python -m schnablelab.CNN.CNN_LeafCount_Predict %s %s %s.csv\n"%(model_dir, img_dir, otp)
header += cmd
f0 = open('%s.slurm'%otp, 'w')
f0.write(header)
f0.close()
print('%s.slurm generate, you can submit to a gpu node now.'%otp)
def keras(args):
"""
%prog model_name img_dir target_size output_prefix
using your trained model to make predictions. Target size is the input_shape when
you train your model. an invalid target_size example is 224,224,3
"""
from keras.models import load_model
p = OptionParser(keras.__doc__)
p.set_slurm_opts()
p.add_option('--img_num', default='all',
help='specify how many images used for prediction in the dir')
opts, args = p.parse_args(args)
if len(args) == 0:
sys.exit(not p.print_help())
model, img_dir, ts, otp = args
ts = tuple([int(i) for i in ts.split(',')][:-1])
print(ts)
p = Path(img_dir)
ps = list(p.glob('*.png'))[:int(opts.img_num)] \
if opts.img_num!='all' \
else list(p.glob('*.png'))
imgs = []
fns = []
for i in ps:
print(i.name)
fns.append(i.name)
img = cv2.imread(str(i))
img = cv2.resize(img, ts)
imgs.append(img)
imgs_arr = np.asarray(imgs)
my_model = load_model(model)
pre_prob = my_model.predict(imgs_arr)
df = pd.DataFrame(pre_prob)
clss = df.shape[1]
headers = ['class_%s'%i for i in range(1, clss+1)]
df.columns = headers
df['image'] = fns
headers.insert(0, 'image')
df_final = df[headers]
df_final.to_csv('%s.csv'%otp, sep='\t', index=False)
if __name__ == "__main__":
main()
| 3.046875 | 3 |
src/spyd/server/binding/binding_service.py | DanSeraf/spyd | 4 | 12758739 | import traceback
from twisted.application import service
from twisted.internet import reactor, task
from spyd.server.binding.binding import Binding
from spyd.server.metrics.rate_aggregator import RateAggregator
class BindingService(service.Service):
def __init__(self, client_protocol_factory, metrics_service):
self.bindings = set()
self.client_protocol_factory = client_protocol_factory
self.metrics_service = metrics_service
self.flush_rate_aggregator = RateAggregator(metrics_service, 'flush_all_rate', 1.0)
reactor.addSystemEventTrigger('during', 'flush_bindings', self.flush_all)
self.flush_looping_call = task.LoopingCall(reactor.fireSystemEvent, 'flush_bindings')
def startService(self):
for binding in self.bindings:
binding.listen(self.client_protocol_factory)
self.flush_looping_call.start(0.033)
service.Service.startService(self)
def stopService(self):
self.flush_looping_call.stop()
service.Service.stopService(self)
def add_binding(self, interface, port, maxclients, maxdown, maxup, max_duplicate_peers):
binding = Binding(reactor, self.metrics_service, interface, port, maxclients=maxclients, channels=2, maxdown=maxdown, maxup=maxup, max_duplicate_peers=max_duplicate_peers)
self.bindings.add(binding)
def flush_all(self):
reactor.callLater(0, reactor.addSystemEventTrigger, 'during', 'flush_bindings', self.flush_all)
try:
for binding in self.bindings:
binding.flush()
self.flush_rate_aggregator.tick()
except:
traceback.print_exc()
| 2.15625 | 2 |
ics/structures/srad_star2_settings.py | hollinsky-intrepid/python_ics | 0 | 12758740 | # This file was auto generated; Do not modify, if you value your sanity!
import ctypes
try: # 10
from op_eth_general_settings import op_eth_general_settings
from op_eth_settings import op_eth_settings
from can_settings import can_settings
from canfd_settings import canfd_settings
from lin_settings import lin_settings
from iso9141_keyword2000_settings import iso9141_keyword2000_settings
from s_text_api_settings import s_text_api_settings
from timesync_icshardware_settings import timesync_icshardware_settings
from rad_reporting_settings import rad_reporting_settings
from ethernet_settings2 import ethernet_settings2
except:
from ics.structures.op_eth_general_settings import op_eth_general_settings
from ics.structures.op_eth_settings import op_eth_settings
from ics.structures.can_settings import can_settings
from ics.structures.canfd_settings import canfd_settings
from ics.structures.lin_settings import lin_settings
from ics.structures.iso9141_keyword2000_settings import iso9141_keyword2000_settings
from ics.structures.s_text_api_settings import s_text_api_settings
from ics.structures.timesync_icshardware_settings import timesync_icshardware_settings
from ics.structures.rad_reporting_settings import rad_reporting_settings
from ics.structures.ethernet_settings2 import ethernet_settings2
class srad_star2_settings(ctypes.Structure):
_pack_ = 2
_fields_ = [
('perf_en', ctypes.c_uint16),
('opEthGen', op_eth_general_settings),
('opEth1', op_eth_settings),
('opEth2', op_eth_settings),
('can1', can_settings),
('canfd1', canfd_settings),
('can2', can_settings),
('canfd2', canfd_settings),
('network_enables', ctypes.c_uint16),
('network_enables_2', ctypes.c_uint16),
('lin1', lin_settings),
('misc_io_initial_ddr', ctypes.c_uint16),
('misc_io_initial_latch', ctypes.c_uint16),
('misc_io_report_period', ctypes.c_uint16),
('misc_io_on_report_events', ctypes.c_uint16),
('misc_io_analog_enable', ctypes.c_uint16),
('ain_sample_period', ctypes.c_uint16),
('ain_threshold', ctypes.c_uint16),
('pwr_man_timeout', ctypes.c_uint32),
('pwr_man_enable', ctypes.c_uint16),
('network_enabled_on_boot', ctypes.c_uint16),
('iso15765_separation_time_offset', ctypes.c_uint16),
('iso_9141_kwp_enable_reserved', ctypes.c_uint16),
('iso9141_kwp_settings_1', iso9141_keyword2000_settings),
('iso_parity_1', ctypes.c_uint16),
('iso_msg_termination_1', ctypes.c_uint16),
('idle_wakeup_network_enables_1', ctypes.c_uint16),
('idle_wakeup_network_enables_2', ctypes.c_uint16),
('network_enables_3', ctypes.c_uint16),
('idle_wakeup_network_enables_3', ctypes.c_uint16),
('can_switch_mode', ctypes.c_uint16),
('text_api', s_text_api_settings),
('pc_com_mode', ctypes.c_uint16),
('timeSyncSettings', timesync_icshardware_settings),
('hwComLatencyTestEn', ctypes.c_uint16),
('reporting', rad_reporting_settings),
('ethernet', ethernet_settings2),
]
# Extra names go here:
SRADStar2Settings = srad_star2_settings
# End of extra names
| 1.5 | 2 |
AlgorithmTest/BOJ_STEP_PYTHON/Step8/BOJ1712.py | bluesky0960/AlgorithmTest | 0 | 12758741 | #https://www.acmicpc.net/problem/1712
a, b, b2 = map(int, input().split())
if b >= b2:
print(-1)
else:
bx = b2 - b
count = a // bx + 1
print(count) | 3.21875 | 3 |
tests/ad/profiles/test_profiles_api.py | Rogdham/pyTenable | 1 | 12758742 | import responses
from tests.ad.conftest import RE_BASE
@responses.activate
def test_profiles_list(api):
responses.add(responses.GET,
f'{RE_BASE}/profiles',
json=[{
'id': 1,
'name': 'profile name',
'deleted': False,
'directories': [1, 2],
'dirty': True,
'hasEverBeenCommitted': True
}]
)
resp = api.profiles.list()
assert isinstance(resp, list)
assert len(resp) == 1
assert resp[0]['id'] == 1
assert resp[0]['name'] == 'profile name'
assert resp[0]['deleted'] is False
assert resp[0]['directories'] == [1, 2]
assert resp[0]['dirty'] is True
assert resp[0]['has_ever_been_committed'] is True
@responses.activate
def test_profiles_create(api):
responses.add(responses.POST,
f'{RE_BASE}/profiles',
json=[{
'id': 1,
'name': 'profile name',
'deleted': False,
'directories': [1, 2],
'dirty': True,
'hasEverBeenCommitted': True
}]
)
resp = api.profiles.create(name='profile name',
directories=[1, 2])
assert isinstance(resp, list)
assert len(resp) == 1
assert resp[0]['id'] == 1
assert resp[0]['name'] == 'profile name'
assert resp[0]['deleted'] is False
assert resp[0]['directories'] == [1, 2]
assert resp[0]['dirty'] is True
assert resp[0]['has_ever_been_committed'] is True
@responses.activate
def test_profiles_details(api):
responses.add(responses.GET,
f'{RE_BASE}/profiles/1',
json={
'id': 1,
'name': 'profile name',
'deleted': False,
'directories': [1, 2],
'dirty': True,
'hasEverBeenCommitted': True
}
)
resp = api.profiles.details(profile_id='1')
assert isinstance(resp, dict)
assert resp['id'] == 1
assert resp['name'] == 'profile name'
assert resp['deleted'] is False
assert resp['directories'] == [1, 2]
assert resp['dirty'] is True
assert resp['has_ever_been_committed'] is True
@responses.activate
def test_profiles_update(api):
responses.add(responses.PATCH,
f'{RE_BASE}/profiles/1',
json={
'id': 1,
'name': 'profile name',
'deleted': False,
'directories': [1, 2],
'dirty': True,
'hasEverBeenCommitted': True
}
)
resp = api.profiles.update(profile_id='1',
name='profile name',
deleted=True,
directories=[1, 2])
assert isinstance(resp, dict)
assert resp['id'] == 1
assert resp['name'] == 'profile name'
assert resp['deleted'] is False
assert resp['directories'] == [1, 2]
assert resp['dirty'] is True
assert resp['has_ever_been_committed'] is True
@responses.activate
def test_profiles_delete(api):
responses.add(responses.DELETE,
f'{RE_BASE}/profiles/1',
json=None
)
resp = api.profiles.delete(profile_id='1')
assert resp is None
@responses.activate
def test_profiles_copy_profile(api):
responses.add(responses.POST,
f'{RE_BASE}/profiles/from/1',
json={
'id': 1,
'name': 'copied profile',
'deleted': False,
'directories': [1, 2],
'dirty': True,
'hasEverBeenCommitted': True
}
)
resp = api.profiles.copy_profile(from_id='1',
name='copied profile',
directories=[1, 2])
assert isinstance(resp, dict)
assert resp['id'] == 1
assert resp['name'] == 'copied profile'
assert resp['deleted'] is False
assert resp['directories'] == [1, 2]
assert resp['dirty'] is True
assert resp['has_ever_been_committed'] is True
@responses.activate
def test_profiles_commit(api):
responses.add(responses.POST,
f'{RE_BASE}/profiles/1/commit',
json=None
)
resp = api.profiles.commit(profile_id='1')
assert resp is None
@responses.activate
def test_profiles_unstage(api):
responses.add(responses.POST,
f'{RE_BASE}/profiles/1/unstage',
json=None
)
resp = api.profiles.unstage(profile_id='1')
assert resp is None
| 2.234375 | 2 |
google/ads/google_ads/v6/proto/common/real_time_bidding_setting_pb2.py | arammaliachi/google-ads-python | 0 | 12758743 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/common/real_time_bidding_setting.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/common/real_time_bidding_setting.proto',
package='google.ads.googleads.v6.common',
syntax='proto3',
serialized_options=b'\n\"com.google.ads.googleads.v6.commonB\033RealTimeBiddingSettingProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v6/common;common\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V6.Common\312\002\036Google\\Ads\\GoogleAds\\V6\\Common\352\002\"Google::Ads::GoogleAds::V6::Common',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n>google/ads/googleads/v6/common/real_time_bidding_setting.proto\x12\x1egoogle.ads.googleads.v6.common\x1a\x1cgoogle/api/annotations.proto\"8\n\x16RealTimeBiddingSetting\x12\x13\n\x06opt_in\x18\x02 \x01(\x08H\x00\x88\x01\x01\x42\t\n\x07_opt_inB\xf6\x01\n\"com.google.ads.googleads.v6.commonB\x1bRealTimeBiddingSettingProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v6/common;common\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V6.Common\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V6\\Common\xea\x02\"Google::Ads::GoogleAds::V6::Commonb\x06proto3'
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_REALTIMEBIDDINGSETTING = _descriptor.Descriptor(
name='RealTimeBiddingSetting',
full_name='google.ads.googleads.v6.common.RealTimeBiddingSetting',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='opt_in', full_name='google.ads.googleads.v6.common.RealTimeBiddingSetting.opt_in', index=0,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_opt_in', full_name='google.ads.googleads.v6.common.RealTimeBiddingSetting._opt_in',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=128,
serialized_end=184,
)
_REALTIMEBIDDINGSETTING.oneofs_by_name['_opt_in'].fields.append(
_REALTIMEBIDDINGSETTING.fields_by_name['opt_in'])
_REALTIMEBIDDINGSETTING.fields_by_name['opt_in'].containing_oneof = _REALTIMEBIDDINGSETTING.oneofs_by_name['_opt_in']
DESCRIPTOR.message_types_by_name['RealTimeBiddingSetting'] = _REALTIMEBIDDINGSETTING
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RealTimeBiddingSetting = _reflection.GeneratedProtocolMessageType('RealTimeBiddingSetting', (_message.Message,), {
'DESCRIPTOR' : _REALTIMEBIDDINGSETTING,
'__module__' : 'google.ads.googleads.v6.common.real_time_bidding_setting_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.common.RealTimeBiddingSetting)
})
_sym_db.RegisterMessage(RealTimeBiddingSetting)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 1.117188 | 1 |
Pipeline/display.py | siddharthbharthulwar/Synthetic-Vision-System | 0 | 12758744 | <filename>Pipeline/display.py
from terraingrid import TerrainGrid
import matplotlib.pyplot as plt
import cv2 as cv
import numpy as np
from skimage.measure import _structural_similarity as ssim
import os
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
def compare_images(num, imageA, imageB, title, template):
pt = r"C:\Users\siddh\Desktop\IEEE\resultimg"
plt.imshow(imageA, cmap = 'gist_gray')
plt.imsave(os.path.join(pt, (str(num) + "org.png")), imageA, cmap = 'gist_gray')
plt.imshow(imageB, cmap = 'gist_gray')
plt.imsave(os.path.join(pt, (str(num) + "pre.png")), imageB, cmap = 'gist_gray')
plt.imshow(template, cmap = 'gist_gray')
plt.imsave(os.path.join(pt, (str(num) + "tem.png")), template, cmap = 'gist_gray')
# compute the mean squared error and structural similarity
# index for the images
m = mse(imageA, imageB)
s = ssim.compare_ssim(imageA, imageB)
# setup the figure
fig = plt.figure(title)
plt.suptitle("MSE: %.2f, SSIM: %.2f" % (m, s))
#show template
ax = fig.add_subplot(1, 3, 1)
plt.imshow(template, cmap = plt.cm.gray)
plt.axis("off")
# show first image
ax = fig.add_subplot(1, 3, 2)
plt.imshow(imageA, cmap = plt.cm.gray)
plt.axis("off")
# show the second image
ax = fig.add_subplot(1, 3, 3)
plt.imshow(imageB, cmap = plt.cm.gray)
plt.axis("off")
# show the images
plt.show()
rd0 = r"D:\Documents\School\2019-20\ISEF 2020\HighProcessed\r_37ez2.tif"
rd1 = r"D:\Documents\School\2019-20\ISEF 2020\HighProcessed\r_37fz1.tif"
rasdf = r"D:\Documents\School\2019-20\ISEF 2020\HighProcessed\r_37hn2.tif"
ehamr = r"D:\Documents\School\2019-20\ISEF 2020\HighProcessed\r_25dn1.tif"
r2 = r"D:\Documents\School\2019-20\ISEF 2020\HighProcessed\r_37fz2.tif"
path = r"C:\Users\siddh\Documents\DSMS\R_25GN1\r_25gn1.tif"
rotterdam = r"D:\Documents\School\2019-20\ISEF 2020\HighProcessed\r_51bz2.tif"
truthpath = r"D:\Documents\School\2019-20\ISEF 2020\TRUTHS\buildings.png"
#~~~~~~~~~~~~~~~~~~~~Testing Environments~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# #1: [7200:8200, 4600:5600]
# #2: [7200:8200, 5600:6600]
# #3: [6700:7700, 1500:2500]
# #4: [5250:6250, 5250, 6250]
# #5:
def compare_dem(number, bound1, bound2, bound3, bound4):
a = TerrainGrid((rd0), (1,1), 1)
a.show(-5, 50)
a.arrayValues = a.arrayValues[bound1:bound2, bound3: bound4]
a.kernelclassv2(3, 100, 200, 50000, 0.5, True, 11, 2, 20, False)
fb = a.final_building_labels
plt.imshow(fb)
plt.title('fb')
plt.show()
num = (np.amax(fb) + np.amin(fb)) / 2
fb = cv.threshold(a.final_building_labels, num, 1, cv.THRESH_BINARY_INV)[1].astype('uint8')
truths = cv.imread(truthpath, cv.IMREAD_GRAYSCALE)[bound1:bound2, bound3: bound4]
truths = cv.threshold(truths, 10, 1, cv.THRESH_BINARY)[1].astype('uint8')
compare_images(number, truths, fb, "Ground Truths vs Prediction", a.arrayValues)
#compare_dem(1, 7200,8200, 4600,5600)
#compare_dem(2, 7200,8200, 5600,6600)
#compare_dem(3, 6700,7700, 1500,2500)
#compare_dem(4, 5250,6250, 5250, 6250)
compare_dem(5, 8200, 9000, 7400, 8200)
| 2.625 | 3 |
The_Basics/The_Basics_Functions_and_Conditionals.py | vishu22314558/py-code | 0 | 12758745 | <filename>The_Basics/The_Basics_Functions_and_Conditionals.py
grade_1 = [9.5,8.5,6.45,21]
grade_1_sum = sum(grade_1)
print(grade_1_sum)
grade_1_len = len(grade_1)
print(grade_1_len)
grade_avg = grade_1_sum/grade_1_len
print(grade_avg)
# create Function
def mean(myList):
the_mean = sum(myList) / len(myList)
return the_mean
print(mean([10,1,1,10]))
def mean_1(value):
if type(value)== dict:
the_mean = sum(value.values())/len(value)
return the_mean
else:
the_mean = sum(value) / len(value)
return the_mean
dic = {"1":10,"2":20}
LIS = [10,20]
print(mean_1(dic))
print(mean_1(LIS))
def mean_2(value):
if isinstance(value , dict):
the_mean = sum(value.values())/len(value)
return the_mean
else:
the_mean = sum(value) / len(value)
return the_mean
dic_1 = {"1":10,"2":20}
LIS_1 = [10,20]
print(mean_2(dic))
print(mean_2(LIS_1))
def foo(temperature):
if temperature > 7:
return "Warm"
else:
return "Cold"
| 4.03125 | 4 |
python/testData/psi/IfStatement.py | jnthn/intellij-community | 2 | 12758746 | <gh_stars>1-10
if 0:
pass
if 1:
pass
else:
pass
if 2:
pass
elif 3:
pass
if 4:
pass
elif 5:
pass
else:
1
| 1.914063 | 2 |
Leetcode/leetcode/editor/cn/[2]两数相加.py | MathAdventurer/Python-Algorithms-and-Data-Structures | 1 | 12758747 | <filename>Leetcode/leetcode/editor/cn/[2]两数相加.py
# 给你两个 非空 的链表,表示两个非负的整数。它们每位数字都是按照 逆序 的方式存储的,并且每个节点只能存储 一位 数字。
#
# 请你将两个数相加,并以相同形式返回一个表示和的链表。
#
# 你可以假设除了数字 0 之外,这两个数都不会以 0 开头。
#
#
#
# 示例 1:
#
#
# 输入:l1 = [2,4,3], l2 = [5,6,4]
# 输出:[7,0,8]
# 解释:342 + 465 = 807.
#
#
# 示例 2:
#
#
# 输入:l1 = [0], l2 = [0]
# 输出:[0]
#
#
# 示例 3:
#
#
# 输入:l1 = [9,9,9,9,9,9,9], l2 = [9,9,9,9]
# 输出:[8,9,9,9,0,0,0,1]
#
#
#
#
# 提示:
#
#
# 每个链表中的节点数在范围 [1, 100] 内
# 0 <= Node.val <= 9
# 题目数据保证列表表示的数字不含前导零
#
# Related Topics 递归 链表 数学
# 👍 5586 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
import inspect
code = inspect.getsource(ListNode)
print(code)
return None
# leetcode submit region end(Prohibit modification and deletion)
| 3.859375 | 4 |
tests/classification/dataset_readers/boolq.py | shunk031/allennlp-models | 402 | 12758748 | <reponame>shunk031/allennlp-models
# -*- coding: utf-8 -*-
from allennlp.common.util import ensure_list
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp_models.classification import BoolQDatasetReader
from tests import FIXTURES_ROOT
class TestBoolqReader:
boolq_path = FIXTURES_ROOT / "classification" / "boolq.jsonl"
def test_boolq_dataset_reader_default_setting(self):
reader = BoolQDatasetReader()
instances = reader.read(self.boolq_path)
instances = ensure_list(instances)
assert len(instances) == 5
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens][:5] == [
"Persian",
"language",
"--",
"Persian",
"(/ˈpɜːrʒən,",
]
assert fields["label"].label == 1
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens][:5] == [
"Epsom",
"railway",
"station",
"--",
"Epsom",
]
assert fields["label"].label == 0
def test_boolq_dataset_reader_roberta_setting(self):
reader = BoolQDatasetReader(
tokenizer=PretrainedTransformerTokenizer("roberta-base", add_special_tokens=False),
token_indexers={"tokens": PretrainedTransformerIndexer("roberta-base")},
)
instances = reader.read(self.boolq_path)
instances = ensure_list(instances)
assert len(instances) == 5
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens][:5] == [
"<s>",
"Pers",
"ian",
"Ġlanguage",
"Ġ--",
]
assert [t.text for t in fields["tokens"].tokens][-5:] == [
"Ġspeak",
"Ġthe",
"Ġsame",
"Ġlanguage",
"</s>",
]
assert fields["label"].label == 1
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens][:5] == [
"<s>",
"E",
"ps",
"om",
"Ġrailway",
]
assert [t.text for t in fields["tokens"].tokens][-5:] == [
"Ġe",
"ps",
"om",
"Ġstation",
"</s>",
]
assert fields["label"].label == 0
| 2.421875 | 2 |
halo.py | heffra/superheffra | 0 | 12758749 | print("Halo")
print("This is my program in vscode ")
name = input("what your name : ")
if name == "Hero":
print("Wow your name {} ? , my name is Hero too, nice to meet you ! ".format(name))
else:
print("Halo {} , nice to meet you friend".format(name))
age = input("how old are you : ")
if age == "21":
print("wow it's same my age 21 too")
else:
print("wow that's great ~") | 4 | 4 |
d2_muotitJaLomakkeet/variables/flaskVars.py | Nuuttu/Koulu_PythonWeppiAppi | 0 | 12758750 | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
siteTitle = 'siteIndex'
name = 'Tuomo'
listOfThings = ['A thing', 'The Thing', 'Thing', 'A Big Thing']
return render_template('base.html',
name=name,
siteTitle=siteTitle,
listOfThings=listOfThings)
@app.route('/child')
def child():
siteTitle = 'Child page'
return render_template('child.html', siteTitle=siteTitle)
if __name__ == '__main__':
app.run(debug=True) | 2.4375 | 2 |