hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4836790f7f179775fa2922a17970fb8bf8a4796d
| 95
|
py
|
Python
|
test_tools.py
|
dahcase/gee_tools
|
4aa445e3a25ef2b9dd19762dc21ef5dcc9debc04
|
[
"MIT"
] | null | null | null |
test_tools.py
|
dahcase/gee_tools
|
4aa445e3a25ef2b9dd19762dc21ef5dcc9debc04
|
[
"MIT"
] | null | null | null |
test_tools.py
|
dahcase/gee_tools
|
4aa445e3a25ef2b9dd19762dc21ef5dcc9debc04
|
[
"MIT"
] | 1
|
2022-01-13T23:39:05.000Z
|
2022-01-13T23:39:05.000Z
|
# coding=utf-8
from geetools.tests import test_tools
import unittest
unittest.main(test_tools)
| 19
| 37
| 0.831579
|
28d0fb86c9812083762bb3cabbe48c954743b199
| 22,075
|
py
|
Python
|
dis_snek/api/http/http_requests/guild.py
|
ShardlessBun/Dis-Snek
|
fd15b06c531e82a9682a7b139deec0df7b2aa2d0
|
[
"MIT"
] | null | null | null |
dis_snek/api/http/http_requests/guild.py
|
ShardlessBun/Dis-Snek
|
fd15b06c531e82a9682a7b139deec0df7b2aa2d0
|
[
"MIT"
] | null | null | null |
dis_snek/api/http/http_requests/guild.py
|
ShardlessBun/Dis-Snek
|
fd15b06c531e82a9682a7b139deec0df7b2aa2d0
|
[
"MIT"
] | null | null | null |
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
from dis_snek.client.const import Absent, MISSING
from dis_snek.client.utils.serializer import dict_filter_none
from ..route import Route
if TYPE_CHECKING:
from dis_snek.models.discord.snowflake import Snowflake_Type
class GuildRequests:
request: Any
async def get_guilds(
self, limit: int = 200, before: Optional["Snowflake_Type"] = None, after: Optional["Snowflake_Type"] = None
) -> List[Dict]:
"""
Get a list of partial guild objects the current user is a member of req. `guilds` scope.
parameters:
limit: max number of guilds to return (1-200)
before: get guilds before this guild ID
after: get guilds after this guild ID
returns:
List[guilds]
"""
params: Dict[str, Union[int, str]] = {"limit": limit}
if before:
params["before"] = before
if after:
params["after"] = after
return await self.request(Route("GET", "/users/@me/guilds", params=params))
async def get_guild(self, guild_id: "Snowflake_Type", with_counts: Optional[bool] = True) -> dict:
"""
Get the guild object for the given ID.
parameters:
guild_id: the id of the guild
with_counts: when `true`, will return approximate member and presence counts for the guild
returns:
a guild object
"""
return await self.request(
Route("GET", f"/guilds/{guild_id}"), params={"with_counts": int(with_counts)} # type: ignore
)
async def get_guild_preview(self, guild_id: "Snowflake_Type") -> dict:
"""
Get a guild's preview.
parameters:
guild_id: the guilds ID
returns:
guild preview object
"""
return await self.request(Route("GET", f"/guilds/{guild_id}/preview"))
async def get_channels(self, guild_id: "Snowflake_Type") -> List[Dict]:
"""
Get a guilds channels.
parameters:
guild_id: the id of the guild
returns:
List of channels
"""
return await self.request(Route("GET", f"/guilds/{guild_id}/channels"))
async def get_roles(self, guild_id: "Snowflake_Type") -> List[Dict]:
"""
Get a guild's roles.
parameters:
guild_id: The ID of the guild
returns:
List of roles
"""
return await self.request(Route("GET", f"/guilds/{guild_id}/roles"))
async def modify_guild(self, guild_id: "Snowflake_Type", reason: Absent[str] = MISSING, **kwargs) -> None:
"""
Modify a guild's attributes.
parameters:
guild_id: The ID of the guild we want to modify
reason: The reason for this change
kwargs: The params to change
"""
expected = [
"name",
"region",
"verification_level",
"default_message_notifications",
"explicit_content_filter",
"afk_channel_id",
"afk_timeout",
"icon",
"owner_id",
"splash",
"discovery_splash",
"banner",
"system_channel_id",
"system_channel_flags",
"rules_channel_id",
"public_updates_channel_id",
"preferred_locale",
"features",
"description",
]
kwargs_copy = kwargs.copy()
for key, value in kwargs.items():
if key not in expected or value is MISSING:
del kwargs_copy[key]
# only do the request if there is something to modify
if kwargs_copy:
await self.request(Route("PATCH", f"/guilds/{guild_id}"), data=kwargs_copy, reason=reason)
async def delete_guild(self, guild_id: "Snowflake_Type") -> None:
"""
Delete the guild.
parameters:
guild_id: The ID of the guild that we want to delete
"""
return await self.request(Route("DELETE", f"/guilds/{guild_id}"))
async def add_guild_member(
self,
guild_id: "Snowflake_Type",
user_id: "Snowflake_Type",
access_token: str,
nick: str = None,
roles: List["Snowflake_Type"] = None,
mute: bool = False,
deaf: bool = False,
) -> dict:
"""
Add a user to the guild. All parameters to this endpoint except for `access_token`, `guild_id` and `user_id` are optional.
parameters:
guild_id: The ID of the guild
user_id: The ID of the user to add
access_token: The access token of the user
nick: value to set users nickname to
roles: array of role ids the member is assigned
mute: whether the user is muted in voice channels
deaf: whether the user is deafened in voice channels
returns:
Guild Member Object
"""
return await self.request(
Route("PUT", f"/guilds/{guild_id}/members/{user_id}"),
data=dict_filter_none(
{"access_token": access_token, "nick": nick, "roles": roles, "mute": mute, "deaf": deaf}
),
)
async def remove_guild_member(
self, guild_id: "Snowflake_Type", user_id: "Snowflake_Type", reason: Absent[str] = MISSING
) -> None:
"""
Remove a member from a guild.
parameters:
guild_id: The ID of the guild
user_id: The ID of the user to remove
reason: The reason for this action
"""
return await self.request(Route("DELETE", f"/guilds/{guild_id}/members/{user_id}"), reason=reason)
async def get_guild_bans(self, guild_id: "Snowflake_Type") -> List[dict]:
"""
Return a list of ban objects for the users banned from this guild.
parameters:
guild_id: The ID of the guild to query
returns:
List of ban objects
"""
return await self.request(Route("GET", f"/guilds/{guild_id}/bans"))
async def get_guild_ban(self, guild_id: "Snowflake_Type", user_id: "Snowflake_Type") -> Optional[dict]:
"""
Returns a ban object for the given user or a 404 not found if the ban cannot be found.
parameters:
guild_id: The ID of the guild to query
user_id: The ID of the user to query
returns:
Ban object if exists
raises:
Not found error if no ban exists
"""
return await self.request(Route("GET", f"/guilds/{guild_id}/bans/{user_id}"))
async def create_guild_ban(
self,
guild_id: "Snowflake_Type",
user_id: "Snowflake_Type",
delete_message_days: int = 0,
reason: Absent[str] = MISSING,
) -> None:
"""
Create a guild ban, and optionally delete previous messages sent by the banned user.
parameters:
guild_id: The ID of the guild to create the ban in
user_id: The ID of the user to ban
delete_message_days: number of days to delete messages for (0-7)
reason: The reason for this action
"""
return await self.request(
Route("PUT", f"/guilds/{guild_id}/bans/{user_id}"),
data={"delete_message_days": delete_message_days},
reason=reason,
)
async def remove_guild_ban(
self, guild_id: "Snowflake_Type", user_id: "Snowflake_Type", reason: Absent[str] = MISSING
) -> None:
"""
Remove a guild ban.
parameters:
guild_id: The ID of the guild to remove the ban in
user_id: The ID of the user to unban
reason: The reason for this action
"""
return await self.request(Route("DELETE", f"/guilds/{guild_id}/bans/{user_id}"), reason=reason)
async def get_guild_prune_count(
self, guild_id: "Snowflake_Type", days: int = 7, include_roles: List["Snowflake_Type"] = None
) -> dict:
"""
Returns an object with one 'pruned' key indicating the number of members that would be removed in a prune operation.
parameters:
guild_id: The ID of the guild to query
days: number of days to count prune for (1-30)
include_roles: role(s) to include
returns:
{"pruned": int}
"""
payload = {"days": days}
if include_roles:
payload["include_roles"] = ", ".join(include_roles)
return await self.request(Route("GET", f"/guilds/{guild_id}/prune"), params=payload)
async def begin_guild_prune(
self,
guild_id: "Snowflake_Type",
days: int = 7,
include_roles: Optional[List["Snowflake_Type"]] = None,
compute_prune_count: bool = True,
reason: Absent[str] = MISSING,
) -> dict:
"""
Begin a prune operation.
parameters:
guild_id: The ID of the guild to query
days: number of days to count prune for (1-30)
include_roles: role(s) to include
compute_prune_count: whether 'pruned' is returned, discouraged for large guilds
reason: The reason for this action
returns:
{"pruned": Optional[int]}
"""
payload = {"days": days, "compute_prune_count": compute_prune_count}
if include_roles:
payload["include_roles"] = ", ".join(include_roles)
return await self.request(Route("POST", f"/guilds/{guild_id}/prune"), data=payload, reason=reason)
async def get_guild_invites(self, guild_id: "Snowflake_Type") -> List[dict]:
"""
Returns a list of invite objects (with invite metadata) for the guild.
parameters:
guild_id: The ID of the guild to query
returns:
List of invite objects
"""
return await self.request(Route("GET", f"/guilds/{guild_id}/invites"))
async def create_guild_role(self, guild_id: "Snowflake_Type", payload: dict, reason: Absent[str] = MISSING) -> dict:
"""
Create a new role for the guild.
parameters:
guild_id: The ID of the guild
payload: A dict representing the role to add
reason: The reason for this action
returns:
Role object
"""
return await self.request(Route("POST", f"/guilds/{guild_id}/roles"), data=payload, reason=reason)
async def modify_guild_role_positions(
self, guild_id: "Snowflake_Type", role_id: "Snowflake_Type", position: int, reason: Absent[str] = MISSING
) -> List[dict]:
"""
Modify the position of a role in the guild.
parameters:
guild_id: The ID of the guild
role_id: The ID of the role to move
position: The new position of this role in the hierarchy
reason: The reason for this action
returns:
List of guild roles
"""
return await self.request(
Route("PATCH", f"/guilds/{guild_id}/roles"), data={"id": role_id, "position": position}, reason=reason
)
async def modify_guild_role(
self, guild_id: "Snowflake_Type", role_id: "Snowflake_Type", payload: dict, reason: Absent[str] = MISSING
) -> dict:
"""
Modify an existing role for the guild.
parameters:
guild_id: The ID of the guild
role_id: The ID of the role to move
payload: A dict representing the role to add
reason: The reason for this action
returns:
Role object
"""
return await self.request(Route("PATCH", f"/guilds/{guild_id}/roles/{role_id}"), data=payload, reason=reason)
async def delete_guild_role(
self, guild_id: "Snowflake_Type", role_id: "Snowflake_Type", reason: Absent[str] = MISSING
) -> None:
"""
Delete a guild role.
parameters:
role_id: The ID of the role to delete
reason: The reason for this action
guild_id: The ID of the guild
"""
return await self.request(Route("DELETE", f"/guilds/{guild_id}/roles/{role_id}"), reason=reason)
async def get_guild_voice_regions(self, guild_id: "Snowflake_Type") -> List[dict]:
"""
Returns a list of voice region objects for the guild. Unlike the similar /voice route, this returns VIP servers when the guild is VIP- enabled.
parameters:
guild_id: The ID of the guild to query
returns:
List of voice region objects
"""
return await self.request(Route("GET", f"/guilds/{guild_id}/regions"))
async def get_guild_integrations(self, guild_id: "Snowflake_Type") -> List[dict]:
"""
Returns a list of integration objects for the guild.
parameters:
guild_id: The ID of the guild to query
returns:
list of integration objects
"""
return await self.request(Route("GET", f"/guilds/{guild_id}/integrations"))
async def delete_guild_integration(
self, guild_id: "Snowflake_Type", integration_id: "Snowflake_Type", reason: Absent[str] = MISSING
) -> None:
"""
Delete an integration from the guild.
parameters:
guild_id: The ID of the guild
integration_id: The ID of the integration to remove
"""
return await self.request(Route("DELETE", f"/guilds/{guild_id}/integrations/{integration_id}"), reason=reason)
async def get_guild_widget_settings(self, guild_id: "Snowflake_Type") -> dict:
"""
Get guild widget settings.
parameters:
guild_id: The ID of the guild to query
returns:
guild widget object
"""
return await self.request(Route("GET", f"/guilds/{guild_id}/widget"))
async def get_guild_widget(self, guild_id: "Snowflake_Type") -> dict:
"""
Returns the widget for the guild.
parameters:
guild_id: The ID of the guild to query
returns:
Guild widget
"""
return await self.request(Route("GET", f"/guilds/{guild_id}/widget.json"))
async def get_guild_widget_image(self, guild_id: "Snowflake_Type", style: str = None) -> str:
"""
Get a url representing a png image widget for the guild.
For styles see: https://discord.com/developers/docs/resources/guild#get-guild-widget-image
parameters:
guild_id: The guild to query
style: The style of widget required.
returns:
A url pointing to this image
"""
route = Route("GET", f"/guilds/{guild_id}/widget.png{f'?style={style}' if style else ''}")
return route.url
async def get_guild_welcome_screen(self, guild_id: "Snowflake_Type") -> dict:
"""
Get the welcome screen for this guild.
parameters:
guild_id: The ID of the guild to query
returns:
Welcome screen object
"""
return await self.request(Route("GET", f"/guilds/{guild_id}/welcome-screen"))
async def get_guild_vanity_url(self, guild_id: "Snowflake_Type") -> dict:
"""
Get a partial invite object for the guilds vanity invite url.
parameters:
guild_id: The ID of the guild to query
returns:
`{"code": "abc", "uses": 420}` or `None`
"""
return await self.request(Route("GET", f"/guilds/{guild_id}/vanity-url"))
async def modify_guild_widget(
self, guild_id: "Snowflake_Type", enabled: bool = None, channel_id: "Snowflake_Type" = None
) -> dict:
"""
Modify a guild widget.
Args:
guild_id: The ID of the guild to modify.
enabled: Should the guild widget be enabled
channel_id: The widget's channel ID
returns:
Updated guild widget.
"""
return await self.request(
Route("PATCH", f"/guilds/{guild_id}/widget"),
data=dict_filter_none({"enabled": enabled, "channel_id": channel_id}),
)
async def modify_guild_welcome_screen(
self, guild_id: "Snowflake_Type", enabled: bool, welcome_channels: List["Snowflake_Type"], description: str
) -> dict:
"""
Modify the guild's welcome screen.
parameters:
guild_id: The ID of the guild.
enabled: Whether the welcome screen is enabled
welcome_channels: Channels linked in the welcome screen and their display options
description: The server description to show in the welcome screen
returns:
Updated welcome screen object
"""
return await self.request(
Route("PATCH", f"/guilds/{guild_id}/welcome-screen"),
data={"enabled": enabled, "welcome_channels": welcome_channels, "description": description},
)
async def modify_current_user_voice_state(
self,
guild_id: "Snowflake_Type",
channel_id: "Snowflake_Type",
suppress: bool = None,
request_to_speak_timestamp: str = None,
) -> None:
"""
Update the current user voice state.
parameters:
guild_id: The ID of the guild to update.
channel_id: The id of the channel the user is currently in
suppress: Toggle the user's suppress state.
request_to_speak_timestamp: Sets the user's request to speak
"""
return await self.request(
Route("PATCH", f"/guilds/{guild_id}/voice-states/@me"),
data=dict_filter_none(
{
"channel_id": channel_id,
"suppress": suppress,
"request_to_speak_timestamp": request_to_speak_timestamp,
}
),
)
async def modify_user_voice_state(
self, guild_id: "Snowflake_Type", user_id: "Snowflake_Type", channel_id: "Snowflake_Type", suppress: bool = None
) -> None:
"""
Modify the voice state of a user.
parameters:
guild_id: The ID of the guild.
user_id: The ID of the user to modify.
channel_id: The ID of the channel the user is currently in.
suppress: Toggles the user's suppress state.
"""
return await self.request(
Route("PATCH", f"/guilds/{guild_id}/voice-states/{user_id}"),
data=dict_filter_none({"channel_id": channel_id, "suppress": suppress}),
)
async def create_guild_from_guild_template(self, template_code: str, name: str, icon: str) -> dict:
"""
Creates a new guild based on a template.
note:
This endpoint can only be used by bots in less than 10 guilds.
parameters:
template_code: The code of the template to use.
name: The name of the guild (2-100 characters)
icon: Data URI scheme
returns:
The newly created guild object
"""
return await self.request(
Route("POST", f"/guilds/templates/{template_code}"), data={"name": name, "icon": icon}
)
async def get_guild_templates(self, guild_id: "Snowflake_Type") -> List[dict]:
"""
Returns an array of guild templates.
parameters:
guild_id: The ID of the guild to query.
returns:
An array of guild templates
"""
return await self.request(Route("GET", f"/guilds/{guild_id}/templates"))
async def create_guild_template(self, guild_id: "Snowflake_Type", name: str, description: str = None) -> dict:
"""
Create a guild template for the guild.
parameters:
guild_id: The ID of the guild to create a template for.
name: The name of the template
description: The description of the template
returns:
The created guild template
"""
return await self.request(
Route("POST", f"/guilds/{guild_id}/templates"),
data=dict_filter_none({"name": name, "description": description}),
)
async def sync_guild_template(self, guild_id: "Snowflake_Type", template_code: str) -> dict:
"""
Sync the template to the guild's current state.
parameters:
guild_id: The ID of the guild
template_code: The code for the template to sync
returns:
The updated guild template
"""
return await self.request(Route("PUT", f"/guilds/{guild_id}/templates/{template_code}"))
async def modify_guild_template(
self, guild_id: "Snowflake_Type", template_code: str, name: str = None, description: str = None
) -> dict:
"""
Modifies the template's metadata.
parameters:
guild_id: The ID of the guild
template_code: The template code
name: The name of the template
description: The description of the template
returns:
The updated guild template
"""
return await self.request(
Route("PATCH", f"/guilds/{guild_id}/templates/{template_code}"),
data=dict_filter_none({"name": name, "description": description}),
)
async def delete_guild_template(self, guild_id: "Snowflake_Type", template_code: str) -> dict:
"""
Delete the guild template.
parameters:
guild_id: The ID of the guild
template_code: The ID of the template
returns:
The deleted template object
"""
# why on earth does this return the deleted template object?
return await self.request(Route("DELETE", f"/guilds/{guild_id}/templates/{template_code}"))
| 33.497724
| 151
| 0.58872
|
e730b1dbd6eee3e1d7e20a1747b3cae6737a7fc1
| 3,128
|
py
|
Python
|
Python/NeonOcean.S4.Order/NeonOcean/S4/Order/Tools/Once.py
|
NeonOcean/Order
|
7e7cbdb26e98bb276c7b27cedc75164634e64148
|
[
"CC-BY-4.0"
] | null | null | null |
Python/NeonOcean.S4.Order/NeonOcean/S4/Order/Tools/Once.py
|
NeonOcean/Order
|
7e7cbdb26e98bb276c7b27cedc75164634e64148
|
[
"CC-BY-4.0"
] | null | null | null |
Python/NeonOcean.S4.Order/NeonOcean/S4/Order/Tools/Once.py
|
NeonOcean/Order
|
7e7cbdb26e98bb276c7b27cedc75164634e64148
|
[
"CC-BY-4.0"
] | null | null | null |
from __future__ import annotations
import typing
from NeonOcean.S4.Order.Tools import Exceptions
class Once:
def __init__ (self):
"""
An object for tracking whether or not sections of code that are only suppose to run once have done so already.
"""
self.Triggered = dict() # type: typing.Dict[str, typing.Set[typing.Any]]
def Block (self, identifier: str, reference: typing.Any = None) -> None:
"""
Signal that a section of code is blocked off.
:param identifier: This identifier should be unique to the section of code meant to be blocked. The line number and module name together should be sufficient.
:type identifier: str
:param reference: If you want the section of code to be run then blocked for every object, set the object as the reference. Other let this be None.
"""
if not isinstance(identifier, str):
raise Exceptions.IncorrectTypeException(identifier, "identifier", (str,))
if identifier not in self.Triggered:
identifierReferences = set() # type: typing.Set[typing.Any]
identifierReferences.add(reference)
self.Triggered[identifier] = set()
self.Triggered[identifier].add(reference)
def Unblock (self, identifier: str, reference: typing.Any = None) -> None:
"""
Unblock a previously blocked identifier and reference combination. If it is not blocked nothing will happen.
:param identifier: This identifier should be unique to the section of code meant to be blocked. The line number and module name together should be sufficient.
:type identifier: str
:param reference: If you want the section of code to be run then blocked for every object, set the object as the reference. Other let this be None.
"""
if identifier not in self.Triggered:
return
blockedReferences = self.Triggered[identifier]
if reference in blockedReferences:
blockedReferences.remove(reference)
def UnblockIdentifier (self, identifier: str) -> None:
"""
Remove all blocked references for this identifier.
:param identifier: This identifier should be unique to the section of code meant to be blocked. The line number and module name together should be sufficient.
:type identifier: str
"""
if identifier not in self.Triggered:
return
self.Triggered[identifier] = set()
def UnblockAll (self) -> None:
"""
Remove all blocked identifier and reference combinations.
"""
self.Triggered = dict()
def IsBlocked (self, identifier: str, reference: typing.Any = None) -> bool:
"""
Get whether or not this combination of identifier and reference has been blocked.
:param identifier: This identifier should be unique to the section of code meant to be blocked. The line number and module name together should be sufficient.
:type identifier: str
:param reference: If you want the section of code to be run then blocked for every object, set the object as the reference. Other let this be None.
"""
if not isinstance(identifier, str):
raise Exceptions.IncorrectTypeException(identifier, "identifier", (str,))
if identifier in self.Triggered:
if reference in self.Triggered[identifier]:
return True
return False
| 37.238095
| 160
| 0.745524
|
2f162433572bf74f8d9cc228a9e32411493bc956
| 60,905
|
py
|
Python
|
isstools/widgets/widget_batch_mode.py
|
elistavitski/isstools
|
482b40bb0017b1a480ff862c1a53e89dfac947ec
|
[
"BSD-3-Clause"
] | null | null | null |
isstools/widgets/widget_batch_mode.py
|
elistavitski/isstools
|
482b40bb0017b1a480ff862c1a53e89dfac947ec
|
[
"BSD-3-Clause"
] | null | null | null |
isstools/widgets/widget_batch_mode.py
|
elistavitski/isstools
|
482b40bb0017b1a480ff862c1a53e89dfac947ec
|
[
"BSD-3-Clause"
] | null | null | null |
import inspect
import re
import pkg_resources
from PyQt5 import uic, QtWidgets, QtCore
from matplotlib.backends.backend_qt5agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
from matplotlib.widgets import Cursor
from PyQt5 import uic, QtGui, QtCore, QtWidgets
from PyQt5.QtCore import QThread
import numpy as np
import collections
import time as ttime
from isstools.elements import elements
from isstools.trajectory.trajectory import trajectory_manager
from isstools.batch.batch import BatchManager
ui_path = pkg_resources.resource_filename('isstools', 'ui/ui_batch_mode.ui')
import json
import pandas as pd
class UIBatchMode(*uic.loadUiType(ui_path)):
def __init__(self,
plan_funcs,
motors_dict,
hhm,
RE,
db,
gen_parser,
adc_list,
enc_list,
xia,
run_prep_traj,
scan_figure,
create_log_scan,
sample_stages,
parent_gui,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
self.addCanvas()
self.plan_funcs = plan_funcs
self.plan_funcs_names = [plan.__name__ for plan in plan_funcs]
self.motors_dict = motors_dict
self.mot_list = self.motors_dict.keys()
self.mot_sorted_list = list(self.mot_list)
self.mot_sorted_list.sort()
self.traj_manager = trajectory_manager(hhm)
self.create_log_scan = create_log_scan
self.RE = RE
self.db = db
self.figure = scan_figure
self.run_prep_traj = run_prep_traj
self.gen_parser = gen_parser
self.sample_stages = sample_stages
self.parent_gui = parent_gui
self.batch_mode_uids = []
self.treeView_batch = elements.TreeView(self, 'all')
self.treeView_samples_loop = elements.TreeView(self, 'sample')
self.treeView_samples_loop_scans = elements.TreeView(self, 'scan', unique_elements=False)
self.treeView_samples = elements.TreeView(self, 'sample')
self.treeView_scans = elements.TreeView(self, 'scan')
self.push_batch_delete_all.clicked.connect(self.delete_all_batch)
self.gridLayout_22.addWidget(self.treeView_samples_loop, 1, 0)
self.gridLayout_22.addWidget(self.treeView_samples_loop_scans, 1, 1)
self.gridLayout_23.addWidget(self.treeView_samples, 0, 0)
self.gridLayout_24.addWidget(self.treeView_batch, 0, 0)
self.gridLayout_26.addWidget(self.treeView_scans, 0, 0)
self.treeView_batch.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
# self.treeView_samples.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
self.treeView_samples.setDragDropMode(QtWidgets.QAbstractItemView.DragOnly)
self.treeView_scans.setDragDropMode(QtWidgets.QAbstractItemView.DragOnly)
self.treeView_samples_loop.setDragDropMode(QtWidgets.QAbstractItemView.DropOnly)
self.treeView_samples_loop_scans.setDragDropMode(QtWidgets.QAbstractItemView.DropOnly)
self.batch_running = False
self.batch_pause = False
self.batch_abort = False
self.batch_results = {}
self.push_batch_pause.clicked.connect(self.pause_unpause_batch)
self.push_batch_abort.clicked.connect(self.abort_batch)
self.push_replot_batch.clicked.connect(self.plot_batches)
self.last_num_batch_text = 'i0'
self.last_den_batch_text = 'it'
self.analog_samp_time = '1'
self.enc_samp_time = '1'
self.adc_list = adc_list
self.enc_list = enc_list
self.xia = xia
self.treeView_batch.header().hide()
self.treeView_samples.header().hide()
self.treeView_scans.header().hide()
self.treeView_samples_loop.header().hide()
self.treeView_samples_loop_scans.header().hide()
self.push_create_sample.clicked.connect(self.create_new_sample_func)
self.push_get_sample.clicked.connect(self.get_sample_pos)
self.model_samples = QtGui.QStandardItemModel(self)
self.treeView_samples.setModel(self.model_samples)
self.push_add_sample.clicked.connect(self.add_new_sample_func)
self.push_delete_sample.clicked.connect(self.delete_current_sample)
self.model_batch = QtGui.QStandardItemModel(self)
self.treeView_batch.setModel(self.model_batch)
self.push_add_sample_loop.clicked.connect(self.add_new_sample_loop_func)
self.push_delete_sample_loop.clicked.connect(self.delete_current_samples_loop)
self.model_samples_loop = QtGui.QStandardItemModel(self)
self.treeView_samples_loop.setModel(self.model_samples_loop)
self.push_delete_sample_loop_scan.clicked.connect(self.delete_current_samples_loop_scans)
self.model_samples_loop_scans = QtGui.QStandardItemModel(self)
self.treeView_samples_loop_scans.setModel(self.model_samples_loop_scans)
self.push_create_scan.clicked.connect(self.create_new_scan_func)
self.push_delete_scan.clicked.connect(self.delete_current_scan)
self.push_add_scan.clicked.connect(self.add_new_scan_func)
self.model_scans = QtGui.QStandardItemModel(self)
self.treeView_scans.setModel(self.model_scans)
self.push_batch_run.clicked.connect(self.start_batch)
self.push_batch_print_steps.clicked.connect(self.print_batch)
self.push_batch_delete.clicked.connect(self.delete_current_batch)
self.comboBox_scans.addItems(self.plan_funcs_names)
self.comboBox_scans.currentIndexChanged.connect(self.populateParams_batch)
self.push_create_scan_update.clicked.connect(self.update_batch_traj)
try:
self.update_batch_traj()
except OSError as err:
print('Error loading:', err)
self.params1_batch = []
self.params2_batch = []
self.params3_batch = []
if len(self.plan_funcs) != 0:
self.populateParams_batch(0)
self.comboBox_sample_loop_motor.addItems(self.mot_sorted_list)
self.comboBox_sample_loop_motor.currentTextChanged.connect(self.update_loop_values)
self.spinBox_sample_loop_rep.valueChanged.connect(self.restore_add_loop)
self.spinBox_sample_loop_rep.valueChanged.connect(self.comboBox_sample_loop_motor.setDisabled)
self.spinBox_sample_loop_rep.valueChanged.connect(self.doubleSpinBox_motor_range_start.setDisabled)
self.spinBox_sample_loop_rep.valueChanged.connect(self.doubleSpinBox_motor_range_stop.setDisabled)
self.spinBox_sample_loop_rep.valueChanged.connect(self.doubleSpinBox_motor_range_step.setDisabled)
self.spinBox_sample_loop_rep.valueChanged.connect(self.radioButton_sample_rel.setDisabled)
self.spinBox_sample_loop_rep.valueChanged.connect(self.radioButton_sample_abs.setDisabled)
self.radioButton_sample_rel.toggled.connect(self.set_loop_values)
self.last_lut = 0
self.push_load_csv.clicked.connect(self.load_csv)
self.push_save_csv.clicked.connect(self.save_csv)
#checking which xystage to use:
self.stage_x = ''
self.stage_y = ''
for stage in self.sample_stages:
if stage['x'] in self.motors_dict and stage['y'] in self.motors_dict:
if self.motors_dict[stage['x']]['object'].connected and\
self.motors_dict[stage['y']]['object'].connected:
self.stage_x = stage['x']
self.stage_y = stage['y']
break
if self.stage_x == '' or self.stage_y == '':
print('No stage set! Batch mode will not work!')
def addCanvas(self):
self.figure_batch_waterfall = Figure()
self.figure_batch_waterfall.set_facecolor(color='#FcF9F6')
self.canvas_batch_waterfall = FigureCanvas(self.figure_batch_waterfall)
self.canvas_batch_waterfall.motor = ''
self.figure_batch_waterfall.ax = self.figure_batch_waterfall.add_subplot(111)
self.toolbar_batch_waterfall = NavigationToolbar(self.canvas_batch_waterfall, self, coordinates=True)
self.plot_batch_waterfall.addWidget(self.toolbar_batch_waterfall)
self.plot_batch_waterfall.addWidget(self.canvas_batch_waterfall)
self.canvas_batch_waterfall.draw_idle()
self.cursor_batch_waterfall = Cursor(self.figure_batch_waterfall.ax, useblit=True, color='green',
linewidth=0.75)
self.figure_batch_average = Figure()
self.figure_batch_average.set_facecolor(color='#FcF9F6')
self.canvas_batch_average = FigureCanvas(self.figure_batch_average)
self.canvas_batch_average.motor = ''
self.figure_batch_average.ax = self.figure_batch_average.add_subplot(111)
self.toolbar_batch_average = NavigationToolbar(self.canvas_batch_average, self, coordinates=True)
self.plot_batch_average.addWidget(self.toolbar_batch_average)
self.plot_batch_average.addWidget(self.canvas_batch_average)
self.canvas_batch_average.draw_idle()
self.cursor_batch_average = Cursor(self.figure_batch_average.ax, useblit=True, color='green', linewidth=0.75)
def pause_unpause_batch(self):
if self.batch_running == True:
self.batch_pause = not self.batch_pause
if self.batch_pause:
print('Pausing batch run... It will pause in the next step.')
self.push_batch_pause.setText('Unpause')
else:
print('Unpausing batch run...')
self.push_batch_pause.setText('Pause')
self.label_batch_step.setText(self.label_batch_step.text()[9:])
def abort_batch(self):
if self.batch_running == True:
self.batch_abort = True
self.re_abort()
def plot_batches(self, data):
if self.parent_gui.run_mode == 'batch':
self.figure_batch_waterfall.ax.clear()
self.toolbar_batch_waterfall._views.clear()
self.toolbar_batch_waterfall._positions.clear()
self.toolbar_batch_waterfall._update_view()
self.canvas_batch_waterfall.draw_idle()
self.figure_batch_average.ax.clear()
self.toolbar_batch_average._views.clear()
self.toolbar_batch_average._positions.clear()
self.toolbar_batch_average._update_view()
self.canvas_batch_average.draw_idle()
df = pd.read_msgpack(data['processing_ret']['data'])
#df = pd.DataFrame.from_dict(json.loads(data['processing_ret']['data']))
df = df.sort_values('energy')
self.df = df
md = data['processing_ret']['metadata']
trajectory_name = md['trajectory_name']
scan_name = md['name']
sample_name = scan_name.split(' - ')[0]
e0 = int(md['e0'])
if sample_name in self.batch_results:
self.batch_results[sample_name]['data'].append(df)
self.batch_results[sample_name]['orig_all'] = self.batch_results[sample_name]['orig_all'].append(df,
ignore_index=True)
self.gen_parser.interp_arrays = self.batch_results[sample_name]['orig_all']
binned = self.gen_parser.bin(e0,
e0 - 30,
e0 + 50,
10,
0.2,
0.04)
self.batch_results[sample_name]['data_all'] = binned
else:
self.batch_results[sample_name] = {'data': [df]}
self.batch_results[sample_name]['orig_all'] = df
self.gen_parser.interp_df = self.batch_results[sample_name]['orig_all']
binned = self.gen_parser.bin(e0,
e0 - 30,
e0 + 50,
10,
0.2,
0.04)
self.batch_results[sample_name]['data_all'] = binned
largest_range = 0
for sample_index, sample in enumerate(self.batch_results):
for data_index, data_set in enumerate(self.batch_results[sample]['data']):
if self.listWidget_numerator_batch.count() == 0:
self.listWidget_numerator_batch.insertItems(0, list(data_set.keys()))
self.listWidget_denominator_batch.insertItems(0, list(data_set.keys()))
if len(data_set.keys()):
while self.listWidget_numerator_batch.count() == 0 or self.listWidget_denominator_batch.count() == 0:
QtCore.QCoreApplication.processEvents()
index_num = [index for index, item in enumerate(
[self.listWidget_numerator_batch.item(index) for index in
range(self.listWidget_numerator_batch.count())]) if item.text() == self.last_num_batch_text]
if len(index_num):
self.listWidget_numerator_batch.setCurrentRow(index_num[0])
index_den = [index for index, item in enumerate(
[self.listWidget_denominator_batch.item(index) for index in
range(self.listWidget_denominator_batch.count())]) if item.text() == self.last_den_batch_text]
if len(index_den):
self.listWidget_denominator_batch.setCurrentRow(index_den[0])
else:
if self.listWidget_numerator_batch.currentRow() != -1:
self.last_num_batch_text = self.listWidget_numerator_batch.currentItem().text()
if self.listWidget_denominator_batch.currentRow() != -1:
self.last_den_batch_text = self.listWidget_denominator_batch.currentItem().text()
energy_string = 'energy'
result = data_set[self.last_num_batch_text] / data_set[self.last_den_batch_text]
if self.checkBox_log_batch.checkState() > 0:
result = np.log(result)
if result.max() - result.min() > largest_range:
largest_range = result.max() - result.min()
for sample_index, sample in enumerate(self.batch_results):
for data_index, data_set in enumerate(self.batch_results[sample]['data']):
energy_string = 'energy'
result = data_set[self.last_num_batch_text] / data_set[self.last_den_batch_text]
data_set_all = self.batch_results[sample]['data_all']
result_all = data_set_all[self.last_num_batch_text] / data_set_all[self.last_den_batch_text]
# print('data_set', len(data_set['i0']))
if self.checkBox_log_batch.checkState() > 0:
result = np.log(result)
result_all = np.log(result_all)
distance_multiplier = 1.25
if data_index == 0:
text_y = (sample_index * largest_range * distance_multiplier) + (result.max() + result.min()) / 2
bbox_props = dict(boxstyle="round,pad=0.3", fc="white", ec="black", lw=1.3)
self.figure_batch_waterfall.ax.text(data_set[energy_string].iloc[-1], text_y, sample, size=11,
horizontalalignment='right', clip_on=True, bbox=bbox_props)
self.figure_batch_average.ax.text(data_set_all[energy_string].iloc[-1], text_y, sample, size=11,
horizontalalignment='right', clip_on=True, bbox=bbox_props)
self.figure_batch_waterfall.ax.plot(data_set[energy_string].iloc[:len(result)],
(sample_index * largest_range * distance_multiplier) + result)
self.figure_batch_average.ax.plot(data_set_all[energy_string].iloc[:len(result_all)],
(sample_index * largest_range * distance_multiplier) + result_all)
self.canvas_batch_waterfall.draw_idle()
self.canvas_batch_average.draw_idle()
def create_new_sample_func(self):
self.create_new_sample(self.lineEdit_sample_name.text(), self.doubleSpinBox_sample_x.value(),
self.doubleSpinBox_sample_y.value())
def get_sample_pos(self):
if self.stage_x not in self.mot_list:
raise Exception('Stage X was not passed to the GUI')
if self.stage_y not in self.mot_list:
raise Exception('Stage Y was not passed to the GUI')
if not self.motors_dict[self.stage_x]['object'].connected or \
not self.motors_dict[self.stage_y]['object'].connected:
raise Exception('Stage IOC not connected')
x_value = self.motors_dict[self.stage_x]['object'].position
y_value = self.motors_dict[self.stage_y]['object'].position
self.doubleSpinBox_sample_x.setValue(x_value)
self.doubleSpinBox_sample_y.setValue(y_value)
def add_new_sample_func(self):
indexes = self.treeView_samples.selectedIndexes()
for index in indexes:
item = index.model().itemFromIndex(index)
self.add_new_sample(item)
def delete_current_sample(self):
view = self.treeView_samples
index = view.currentIndex()
if index.row() < view.model().rowCount():
view.model().removeRows(index.row(), 1)
def add_new_sample_loop_func(self):
model_samples = self.treeView_samples_loop.model()
data_samples = []
for row in range(model_samples.rowCount()):
index = model_samples.index(row, 0)
data_samples.append(str(model_samples.data(index)))
model_scans = self.treeView_samples_loop_scans.model()
data_scans = []
for row in range(model_scans.rowCount()):
index = model_scans.index(row, 0)
data_scans.append(str(model_scans.data(index)))
self.add_new_sample_loop(data_samples, data_scans)
def delete_current_samples_loop(self):
view = self.treeView_samples_loop
index = view.currentIndex()
if index.row() < view.model().rowCount():
view.model().removeRows(index.row(), 1)
def delete_current_samples_loop_scans(self):
view = self.treeView_samples_loop_scans
index = view.currentIndex()
if index.row() < view.model().rowCount():
view.model().removeRows(index.row(), 1)
def delete_current_scan(self):
view = self.treeView_scans
index = view.currentIndex()
if index.row() < view.model().rowCount():
view.model().removeRows(index.row(), 1)
def create_new_scan_func(self):
self.create_new_scan(self.comboBox_scans.currentText(), self.comboBox_lut.currentText())
def add_new_scan_func(self):
indexes = self.treeView_scans.selectedIndexes()
for index in indexes:
item = index.model().itemFromIndex(index)
self.add_new_scan(item)
def start_batch(self):
print('[Launching Threads]')
self.listWidget_numerator_batch.clear()
self.listWidget_denominator_batch.clear()
self.figure_batch_waterfall.ax.clear()
self.canvas_batch_waterfall.draw_idle()
self.figure_batch_average.ax.clear()
self.canvas_batch_average.draw_idle()
self.run_batch()
def print_batch(self):
print('\n***** Printing Batch Steps *****')
self.run_batch(print_only=True)
print('***** Finished Batch Steps *****')
def delete_current_batch(self):
view = self.treeView_batch
index = view.currentIndex()
if index.row() < view.model().rowCount():
view.model().removeRows(index.row(), 1)
def delete_all_batch(self):
view = self.treeView_samples
if view.model().hasChildren():
view.model().removeRows(0, view.model().rowCount())
view = self.treeView_scans
if view.model().hasChildren():
view.model().removeRows(0, view.model().rowCount())
view = self.treeView_samples_loop
if view.model().hasChildren():
view.model().removeRows(0, view.model().rowCount())
view = self.treeView_samples_loop_scans
if view.model().hasChildren():
view.model().removeRows(0, view.model().rowCount())
view = self.treeView_batch
if view.model().hasChildren():
view.model().removeRows(0, view.model().rowCount())
def create_new_sample(self, name, x, y):
parent = self.model_samples.invisibleRootItem()
item = QtGui.QStandardItem('{} X:{} Y:{}'.format(name, x, y))
item.setDropEnabled(False)
item.item_type = 'sample'
item.x = x
item.y = y
# subitem = QtGui.QStandardItem('X: {}'.format(x))
# subitem.setEnabled(False)
# item.appendRow(subitem)
# subitem = QtGui.QStandardItem('Y: {}'.format(y))
# subitem.setEnabled(False)
# item.appendRow(subitem)
parent.appendRow(item)
self.treeView_samples.expand(self.model_samples.indexFromItem(item))
def add_new_sample(self, item):
parent = self.model_batch.invisibleRootItem()
new_item = item.clone()
new_item.item_type = 'sample'
new_item.x = item.x
new_item.y = item.y
new_item.setEditable(False)
new_item.setDropEnabled(False)
name = new_item.text()[:new_item.text().find(' X:')] # .split()[0]
new_item.setText('Move to "{}" X:{} Y:{}'.format(name, item.x, item.y))
for index in range(item.rowCount()):
subitem = QtGui.QStandardItem(item.child(index))
subitem.setEnabled(False)
subitem.setDropEnabled(False)
new_item.appendRow(subitem)
parent.appendRow(new_item)
def select_all_samples(self):
if len(self.treeView_samples.selectedIndexes()) < self.model_samples.rowCount():
self.treeView_samples.selectAll()
else:
self.treeView_samples.clearSelection()
def create_new_scan(self, curr_type, traj):
run_params = {}
for i in range(len(self.params1_batch)):
if (self.param_types_batch[i] == int):
run_params[self.params3_batch[i].text().split('=')[0]] = self.params2_batch[i].value()
elif (self.param_types_batch[i] == float):
run_params[self.params3_batch[i].text().split('=')[0]] = self.params2_batch[i].value()
elif (self.param_types_batch[i] == bool):
run_params[self.params3_batch[i].text().split('=')[0]] = bool(self.params2_batch[i].checkState())
elif (self.param_types_batch[i] == str):
run_params[self.params3_batch[i].text().split('=')[0]] = self.params2_batch[i].text()
params = str(run_params)[1:-1].replace(': ', ':').replace(',', '').replace("'", "")
parent = self.model_scans.invisibleRootItem()
if self.comboBox_lut.isEnabled():
item = QtGui.QStandardItem('{} Traj:{} {}'.format(curr_type, traj, params))
else:
item = QtGui.QStandardItem('{} {}'.format(curr_type, params))
item.setDropEnabled(False)
item.item_type = 'sample'
parent.appendRow(item)
self.treeView_samples.expand(self.model_samples.indexFromItem(item))
def add_new_scan(self, item):
parent = self.model_batch.invisibleRootItem()
new_item = item.clone()
new_item.item_type = 'scan'
new_item.setEditable(False)
new_item.setDropEnabled(False)
name = new_item.text().split()[0]
new_item.setText('Run {}'.format(new_item.text()))
for index in range(item.rowCount()):
subitem = QtGui.QStandardItem(item.child(index))
subitem.setEnabled(False)
subitem.setDropEnabled(False)
new_item.appendRow(subitem)
parent.appendRow(new_item)
def update_loop_values(self, text):
for motor in self.motors_dict:
if self.comboBox_sample_loop_motor.currentText() == self.motors_dict[motor]['name']:
curr_mot = self.motors_dict[motor]['object']
break
if self.radioButton_sample_rel.isChecked():
if curr_mot.connected == True:
self.push_add_sample_loop.setEnabled(True)
self.doubleSpinBox_motor_range_start.setValue(-0.5)
self.doubleSpinBox_motor_range_stop.setValue(0.5)
self.doubleSpinBox_motor_range_step.setValue(0.25)
self.push_add_sample_loop.setEnabled(True)
else:
self.push_add_sample_loop.setEnabled(False)
self.doubleSpinBox_motor_range_start.setValue(0)
self.doubleSpinBox_motor_range_stop.setValue(0)
self.doubleSpinBox_motor_range_step.setValue(0.025)
else:
if curr_mot.connected == True:
self.push_add_sample_loop.setEnabled(True)
curr_pos = curr_mot.read()[curr_mot.name]['value']
self.doubleSpinBox_motor_range_start.setValue(curr_pos - 0.1)
self.doubleSpinBox_motor_range_stop.setValue(curr_pos + 0.1)
self.doubleSpinBox_motor_range_step.setValue(0.025)
else:
self.push_add_sample_loop.setEnabled(False)
self.doubleSpinBox_motor_range_start.setValue(0)
self.doubleSpinBox_motor_range_stop.setValue(0)
self.doubleSpinBox_motor_range_step.setValue(0.025)
def restore_add_loop(self, value):
if value:
self.push_add_sample_loop.setEnabled(True)
def set_loop_values(self, checked):
if checked:
self.doubleSpinBox_motor_range_start.setValue(-0.5)
self.doubleSpinBox_motor_range_stop.setValue(0.5)
self.doubleSpinBox_motor_range_step.setValue(0.25)
self.push_add_sample_loop.setEnabled(True)
else:
motor_text = self.comboBox_sample_loop_motor.currentText()
self.update_loop_values(motor_text)
def add_new_sample_loop(self, samples, scans):
parent = self.model_batch.invisibleRootItem()
new_item = QtGui.QStandardItem('Sample Loop')
new_item.setEditable(False)
if self.spinBox_sample_loop_rep.value():
repetitions_item = QtGui.QStandardItem('Repetitions:{}'.format(self.spinBox_sample_loop_rep.value()))
else:
repetitions_item = QtGui.QStandardItem(
'Motor:{} Start:{} Stop:{} Step:{}'.format(self.comboBox_sample_loop_motor.currentText(),
self.doubleSpinBox_motor_range_start.value(),
self.doubleSpinBox_motor_range_stop.value(),
self.doubleSpinBox_motor_range_step.value()))
new_item.appendRow(repetitions_item)
if self.radioButton_sample_loop.isChecked():
primary = 'Samples'
else:
primary = 'Scans'
primary_item = QtGui.QStandardItem('Primary:{}'.format(primary))
new_item.appendRow(primary_item)
samples_item = QtGui.QStandardItem('Samples')
samples_item.setDropEnabled(False)
for index in range(len(samples)):
subitem = QtGui.QStandardItem(samples[index])
subitem.setDropEnabled(False)
samples_item.appendRow(subitem)
new_item.appendRow(samples_item)
scans_item = QtGui.QStandardItem('Scans')
scans_item.setDropEnabled(False)
for index in range(len(scans)):
subitem = QtGui.QStandardItem(scans[index])
subitem.setDropEnabled(False)
scans_item.appendRow(subitem)
new_item.appendRow(scans_item)
parent.appendRow(new_item)
self.treeView_batch.expand(self.model_batch.indexFromItem(new_item))
for index in range(new_item.rowCount()):
self.treeView_batch.expand(new_item.child(index).index())
def populateParams_batch(self, index):
if self.comboBox_scans.currentText()[: 5] != 'tscan':
self.comboBox_lut.setEnabled(False)
else:
self.comboBox_lut.setEnabled(True)
for i in range(len(self.params1_batch)):
self.gridLayout_31.removeWidget(self.params1_batch[i])
self.gridLayout_31.removeWidget(self.params2_batch[i])
self.gridLayout_31.removeWidget(self.params3_batch[i])
self.params1_batch[i].deleteLater()
self.params2_batch[i].deleteLater()
self.params3_batch[i].deleteLater()
self.params1_batch = []
self.params2_batch = []
self.params3_batch = []
self.param_types_batch = []
plan_func = self.plan_funcs[index]
signature = inspect.signature(plan_func)
for i in range(0, len(signature.parameters)):
default = re.sub(r':.*?=', '=', str(signature.parameters[list(signature.parameters)[i]]))
if default == str(signature.parameters[list(signature.parameters)[i]]):
default = re.sub(r':.*', '', str(signature.parameters[list(signature.parameters)[i]]))
self.addParamControl(list(signature.parameters)[i], default,
signature.parameters[list(signature.parameters)[i]].annotation,
grid=self.gridLayout_31,
params=[self.params1_batch, self.params2_batch, self.params3_batch])
self.param_types_batch.append(signature.parameters[list(signature.parameters)[i]].annotation)
def addParamControl(self, name, default, annotation, grid, params):
rows = int((grid.count()) / 3)
param1 = QtWidgets.QLabel(str(rows + 1))
param2 = None
def_val = ''
if default.find('=') != -1:
def_val = re.sub(r'.*=', '', default)
if annotation == int:
param2 = QtWidgets.QSpinBox()
param2.setMaximum(100000)
param2.setMinimum(-100000)
def_val = int(def_val)
param2.setValue(def_val)
elif annotation == float:
param2 = QtWidgets.QDoubleSpinBox()
param2.setMaximum(100000)
param2.setMinimum(-100000)
def_val = float(def_val)
param2.setValue(def_val)
elif annotation == bool:
param2 = QtWidgets.QCheckBox()
if def_val == 'True':
def_val = True
else:
def_val = False
param2.setCheckState(def_val)
param2.setTristate(False)
elif annotation == str:
param2 = QtWidgets.QLineEdit()
def_val = str(def_val)
param2.setText(def_val)
if param2 is not None:
param3 = QtWidgets.QLabel(default)
grid.addWidget(param1, rows, 0, QtCore.Qt.AlignTop)
grid.addWidget(param2, rows, 1, QtCore.Qt.AlignTop)
grid.addWidget(param3, rows, 2, QtCore.Qt.AlignTop)
params[0].append(param1)
params[1].append(param2)
params[2].append(param3)
def update_batch_traj(self):
self.trajectories = self.traj_manager.read_info(silent=True)
self.comboBox_lut.clear()
self.comboBox_lut.addItems(
['{}-{}'.format(lut, self.trajectories[lut]['name']) for lut in self.trajectories if lut != '9'])
def load_csv(self):
user_filepath = '/GPFS/xf08id/User Data/{}.{}.{}/'.format(self.RE.md['year'],
self.RE.md['cycle'],
self.RE.md['PROPOSAL'])
filename = QtWidgets.QFileDialog.getOpenFileName(caption='Select file to load',
directory=user_filepath,
filter='*.csv',
parent=self)[0]
if filename:
batman = BatchManager(self)
batman.load_csv(filename)
def save_csv(self):
user_filepath = '/GPFS/xf08id/User Data/{}.{}.{}/'.format(self.RE.md['year'],
self.RE.md['cycle'],
self.RE.md['PROPOSAL'])
filename = QtWidgets.QFileDialog.getSaveFileName(caption='Select file to save',
directory=user_filepath,
filter='*.csv',
parent=self)[0]
if filename:
if filename[-4:] != '.csv':
filename += '.csv'
batman = BatchManager(self)
batman.save_csv(filename)
def check_pause_abort_batch(self):
if self.batch_abort:
print('**** Aborting Batch! ****')
raise Exception('Abort button pressed by user')
elif self.batch_pause:
self.label_batch_step.setText('[Paused] {}'.format(self.label_batch_step.text()))
while self.batch_pause:
QtCore.QCoreApplication.processEvents()
def run_batch(self, print_only=False):
try:
self.last_lut = 0
current_index = 0
self.current_uid_list = []
if print_only is False:
self.parent_gui.run_mode = 'batch'
self.batch_running = True
self.batch_pause = False
self.batch_abort = False
# Send sampling time to the pizzaboxes:
value = int(
round(float(self.analog_samp_time) / self.adc_list[0].sample_rate.value * 100000))
for adc in self.adc_list:
adc.averaging_points.put(str(value))
for enc in self.enc_list:
enc.filter_dt.put(float(self.enc_samp_time) * 100000)
if self.xia.input_trigger is not None:
self.xia.input_trigger.unit_sel.put(1) # ms, not us
self.xia.input_trigger.period_sp.put(int(self.xia_samp_time))
self.batch_results = {}
for batch_index in range(self.model_batch.rowCount()):
index = self.model_batch.index(batch_index, 0)
text = str(self.model_batch.data(index))
item = self.model_batch.item(batch_index)
font = QtGui.QFont()
font.setWeight(QtGui.QFont.Bold)
item.setFont(font)
item.setText(text)
if text.find('Move to ') == 0:
name = text[text.find('"') + 1:text.rfind('"')]
item_x = text[text.find('" X:') + 4:text.find(' Y:')]
item_y = text[text.find(' Y:') + 3:]
print('Move to sample "{}" (X: {}, Y: {})'.format(name, item_x, item_y))
### Uncomment
if print_only == False:
self.label_batch_step.setText('Move to sample "{}" (X: {}, Y: {})'.format(name, item_x, item_y))
self.check_pause_abort_batch()
self.motors_dict[self.stage_x]['object'].move(item_x, wait=False)
self.motors_dict[self.stage_y]['object'].move(item_y, wait=False)
ttime.sleep(0.2)
while (self.motors_dict[self.stage_x]['object'].moving or \
self.motors_dict[self.stage_y]['object'].moving):
QtCore.QCoreApplication.processEvents()
### Uncomment
if text.find('Run ') == 0:
scan_type = text.split()[0]
scans = collections.OrderedDict({})
scans_text = text[text.find(' ') + 1:] # scans_tree.child(scans_index).text()
scan_name = scans_text[:scans_text.find(' ')]
scans_text = scans_text[scans_text.find(' ') + 1:]
i = 2
if scan_name in scans:
sn = scan_name
while sn in scans:
sn = '{}-{}'.format(scan_name, i)
i += 1
scan_name = sn
scans[scan_name] = collections.OrderedDict((k.strip(), v.strip()) for k, v in
(item.split(':') for item in scans_text.split(' ') if
len(item) > 1))
# print(json.dumps(scans, indent=2))
for scan in scans:
if 'Traj' in scans[scan]:
lut = scans[scan]['Traj'][:scans[scan]['Traj'].find('-')]
traj_name = scans[scan]['Traj'][scans[scan]['Traj'].find('-') + 1:]
### Uncomment
if self.last_lut != lut:
print('Init trajectory {} - {}'.format(lut, traj_name))
if print_only == False:
self.label_batch_step.setText('Init trajectory {} - {}'.format(lut, traj_name))
self.check_pause_abort_batch()
self.traj_manager.init(int(lut))
self.last_lut = lut
print('Prepare trajectory {} - {}'.format(lut, traj_name))
if print_only == False:
self.label_batch_step.setText('Prepare trajectory {} - {}'.format(lut, traj_name))
self.check_pause_abort_batch()
self.run_prep_traj()
if 'name' in scans[scan]:
old_name = scans[scan]['name']
scans[scan]['name'] = '{}-{}'.format(scans[scan]['name'],
traj_name[:traj_name.find('.txt')])
if scan.find('-') != -1:
scan_name = scan[:scan.find('-')]
else:
scan_name = scan
### Uncomment
if print_only == False:
if 'name' in scans[scan]:
self.label_batch_step.setText(
'Execute {} - name: {}'.format(scan_name, scans[scan]['name']))
self.check_pause_abort_batch()
else:
self.label_batch_step.setText('Execute {}'.format(scan_name))
self.check_pause_abort_batch()
uid = self.plan_funcs[self.plan_funcs_names.index(scan_name)](**scans[scan])
if uid:
self.batch_mode_uids.extend(uid)
### Uncomment (previous line)
if 'name' in scans[scan]:
print('Execute {} - name: {}'.format(scan_name, scans[scan]['name']))
scans[scan]['name'] = old_name
else:
print('Execute {}'.format(scan_name))
if text == 'Sample Loop':
print('Running Sample Loop...')
repetitions = item.child(0).text()
rep_type = repetitions[:repetitions.find(':')]
if rep_type == 'Repetitions':
repetitions = np.arange(int(repetitions[repetitions.find(':') + 1:]))
elif rep_type == 'Motor':
repetitions = repetitions.split(' ')
rep_motor = repetitions[0][repetitions[0].find(':') + 1:]
rep_motor = self.motors_dict[rep_motor]['object']
rep_start = float(repetitions[1][repetitions[1].find(':') + 1:])
rep_stop = float(repetitions[2][repetitions[2].find(':') + 1:])
rep_step = float(repetitions[3][repetitions[3].find(':') + 1:])
repetitions = np.arange(rep_start, rep_stop + rep_step, rep_step)
primary = item.child(1).text()
primary = primary[primary.find(':') + 1:]
samples = collections.OrderedDict({})
if item.child(2).text() != 'Samples':
raise Exception('Where are the samples?')
samples_tree = item.child(2)
for sample_index in range(samples_tree.rowCount()):
sample_text = samples_tree.child(sample_index).text()
sample_name = sample_text[:sample_text.find(' X:')]
sample_text = sample_text[sample_text.find(' X:') + 1:].split()
samples[sample_name] = collections.OrderedDict({sample_text[0][
0:sample_text[0].find(':')]: float(
sample_text[0][sample_text[0].find(':') + 1:]), sample_text[1][
0:sample_text[1].find(':')]: float(
sample_text[1][sample_text[1].find(':') + 1:])})
scans = collections.OrderedDict({})
if item.child(3).text() != 'Scans':
raise Exception('Where are the scans?')
scans_tree = item.child(3)
for scans_index in range(scans_tree.rowCount()):
scans_text = scans_tree.child(scans_index).text()
scan_name = scans_text[:scans_text.find(' ')]
scans_text = scans_text[scans_text.find(' ') + 1:]
i = 2
if scan_name in scans:
sn = scan_name
while sn in scans:
sn = '{}-{}'.format(scan_name, i)
i += 1
scan_name = sn
scans[scan_name] = collections.OrderedDict((k.strip(), v.strip()) for k, v in
(item.split(':') for item in scans_text.split(' ') if
len(item) > 1))
# print(json.dumps(samples, indent=2))
# print(json.dumps(scans, indent=2))
print('-' * 40)
for step_number, rep in enumerate(repetitions):
print('Step #{}'.format(step_number + 1))
if rep_type == 'Motor':
print('Move {} to {} {}'.format(rep_motor.name, rep, rep_motor.egu))
### Uncomment
if print_only == False:
self.label_batch_step.setText(
'Move {} to {} {} | Loop step number: {}/{}'.format(rep_motor.name, rep,
rep_motor.egu,
step_number + 1,
len(repetitions)))
self.check_pause_abort_batch()
if hasattr(rep_motor, 'move'):
rep_motor.move(rep)
elif hasattr(rep_motor, 'put'):
rep_motor.put(rep)
### Uncomment
if primary == 'Samples':
for index, sample in enumerate(samples):
print('-' * 40)
print('Move to sample {} (X: {}, Y: {})'.format(sample, samples[sample]['X'],
samples[sample]['Y']))
### Uncomment
if print_only == False:
self.label_batch_step.setText(
'Move to sample {} (X: {}, Y: {}) | Loop step number: {}/{}'.format(sample,
samples[
sample][
'X'],
samples[
sample][
'Y'],
step_number + 1,
len(
repetitions)))
self.check_pause_abort_batch()
self.motors_dict[self.stage_x]['object'].move(samples[sample]['X'], wait=False)
self.motors_dict[self.stage_y]['object'].move(samples[sample]['Y'], wait=False)
ttime.sleep(0.2)
while (self.motors_dict[self.stage_x]['object'].moving or \
self.motors_dict[self.stage_y]['object'].moving):
QtCore.QCoreApplication.processEvents()
### Uncomment
for scan in scans:
if 'Traj' in scans[scan]:
lut = scans[scan]['Traj'][:scans[scan]['Traj'].find('-')]
traj_name = scans[scan]['Traj'][scans[scan]['Traj'].find('-') + 1:]
### Uncomment
if self.last_lut != lut:
print('Init trajectory {} - {}'.format(lut, traj_name))
if print_only == False:
self.label_batch_step.setText(
'Init trajectory {} - {} | Loop step number: {}/{}'.format(lut,
traj_name,
step_number + 1,
len(
repetitions)))
self.check_pause_abort_batch()
self.traj_manager.init(int(lut))
self.last_lut = lut
print('Prepare trajectory {} - {}'.format(lut, traj_name))
if print_only == False:
self.label_batch_step.setText(
'Prepare trajectory {} - {} | Loop step number: {}/{}'.format(lut,
traj_name,
step_number + 1,
len(
repetitions)))
self.check_pause_abort_batch()
self.run_prep_traj()
if 'name' in scans[scan]:
old_name = scans[scan]['name']
scans[scan]['name'] = '{} - {} - {} - {}'.format(sample, scans[scan]['name'],
traj_name[
:traj_name.find('.txt')],
rep + 1)
if scan.find('-') != -1:
scan_name = scan[:scan.find('-')]
else:
scan_name = scan
### Uncomment
if print_only == False:
if 'name' in scans[scan]:
self.label_batch_step.setText(
'Execute {} - name: {} | Loop step number: {}/{}'.format(scan_name,
scans[scan][
'name'],
step_number + 1,
len(
repetitions)))
self.check_pause_abort_batch()
else:
self.label_batch_step.setText(
'Execute {} | Loop step number: {}'.format(scan_name, step_number + 1))
self.check_pause_abort_batch()
uid = self.plan_funcs[self.plan_funcs_names.index(scan_name)](**scans[scan])
if uid:
self.batch_mode_uids.extend(uid)
### Uncomment (previous line)
if 'name' in scans[scan]:
print('Execute {} - name: {}'.format(scan_name, scans[scan]['name']))
scans[scan]['name'] = old_name
else:
print('Execute {}'.format(scan_name))
elif primary == 'Scans':
for index_scan, scan in enumerate(scans):
for index, sample in enumerate(samples):
print('-' * 40)
print('Move to sample {} (X: {}, Y: {})'.format(sample, samples[sample]['X'],
samples[sample]['Y']))
### Uncomment
if print_only == False:
self.label_batch_step.setText(
'Move to sample {} (X: {}, Y: {}) | Loop step number: {}/{}'.format(sample,
samples[
sample][
'X'],
samples[
sample][
'Y'],
step_number + 1,
len(
repetitions)))
self.check_pause_abort_batch()
self.motors_dict[self.stage_x]['object'].move(samples[sample]['X'], wait=False)
self.motors_dict[self.stage_y]['object'].move(samples[sample]['Y'], wait=False)
ttime.sleep(0.2)
while (self.motors_dict[self.stage_x]['object'].moving or \
self.motors_dict[self.stage_y]['object'].moving):
QtCore.QCoreApplication.processEvents()
### Uncomment
lut = scans[scan]['Traj'][:scans[scan]['Traj'].find('-')]
traj_name = scans[scan]['Traj'][scans[scan]['Traj'].find('-') + 1:]
if self.last_lut != lut:
print('Init trajectory {} - {}'.format(lut, traj_name))
if print_only == False:
self.label_batch_step.setText(
'Init trajectory {} - {} | Loop step number: {}/{}'.format(lut,
traj_name,
step_number + 1,
len(
repetitions)))
self.check_pause_abort_batch()
self.traj_manager.init(int(lut))
self.last_lut = lut
print('Prepare trajectory {} - {}'.format(lut, traj_name))
if print_only == False:
self.label_batch_step.setText(
'Prepare trajectory {} - {} | Loop step number: {}/{}'.format(lut,
traj_name,
step_number + 1,
len(
repetitions)))
self.check_pause_abort_batch()
self.run_prep_traj()
old_name = scans[scan]['name']
scans[scan]['name'] = '{} - {} - {} - {}'.format(sample, scans[scan]['name'],
traj_name[:traj_name.find('.txt')],
rep + 1)
if scan.find('-') != -1:
scan_name = scan[:scan.find('-')]
else:
scan_name = scan
print('Execute {} - name: {}'.format(scan_name, scans[scan]['name']))
### Uncomment
if print_only == False:
self.label_batch_step.setText(
'Execute {} - name: {} | Loop step number: {}/{}'.format(scan_name,
scans[scan][
'name'],
step_number + 1,
len(repetitions)))
self.check_pause_abort_batch()
uid = self.plan_funcs[self.plan_funcs_names.index(scan_name)](**scans[scan])
if uid:
self.batch_mode_uids.extend(uid)
### Uncomment (previous line)
scans[scan]['name'] = old_name
print('-' * 40)
font = QtGui.QFont()
item.setFont(font)
item.setText(text)
if print_only == False:
self.batch_running = False
self.batch_processor.go = 0
self.label_batch_step.setText('Finished (Idle)')
except Exception as e:
print(e)
print('Batch run aborted!')
font = QtGui.QFont()
item.setFont(font)
item.setText(text)
self.batch_running = False
self.batch_processor.go = 0
self.label_batch_step.setText('Aborted! (Idle)')
return
def setAnalogSampTime(self, text):
self.analog_samp_time = text
def setEncSampTime(self, text):
self.enc_samp_time = text
def setXiaSampTime(self, text):
self.xia_samp_time = text
def re_abort(self):
if self.RE.state != 'idle':
self.RE.abort()
self.RE.is_aborted = True
| 53.803004
| 131
| 0.482686
|
09f88ec52cb4b1182dc69684d2728fe4ffa1f97e
| 5,464
|
py
|
Python
|
clifford/tools/g3c/object_fitting.py
|
rotu/clifford
|
7a0a0e83be9e2b67da2681d66e8cb4ede912fa51
|
[
"BSD-3-Clause"
] | null | null | null |
clifford/tools/g3c/object_fitting.py
|
rotu/clifford
|
7a0a0e83be9e2b67da2681d66e8cb4ede912fa51
|
[
"BSD-3-Clause"
] | null | null | null |
clifford/tools/g3c/object_fitting.py
|
rotu/clifford
|
7a0a0e83be9e2b67da2681d66e8cb4ede912fa51
|
[
"BSD-3-Clause"
] | null | null | null |
from . import *
@numba.njit
def val_fit_circle(point_list):
"""
Performs Leo Dorsts circle fitting technique
"""
# Check if there are just 3 points
if point_list.shape[0] == 3:
best_obj = point_list[0, :]
for i in range(1, 3):
best_obj = omt_func(best_obj, point_list[i, :])
return val_normalised(best_obj)
# Loop over our points and construct the matrix
accumulator_matrix = np.zeros((32, 32))
for i in range(point_list.shape[0]):
# Get the point as a left gmt matrix
P_i_l = get_left_gmt_matrix(point_list[i,:])
# Multiply and add
accumulator_matrix += P_i_l @ mask0 @ P_i_l
accumulator_matrix = accumulator_matrix @ mask1
# Find the eigenvalues of the matrix
e_vals, e_vecs = np.linalg.eig(accumulator_matrix)
# Find the smallest and second smallest non negative eigenvalues
min_eval = np.inf
min_eval_index = -1
min_eval_index2 = -1
for i in range(len(e_vals)):
this_e_val = e_vals[i]
if this_e_val < min_eval and this_e_val > 0:
min_eval = this_e_val
min_eval_index2 = min_eval_index
min_eval_index = i
best_sphere = val_normalised(mask1@np.real(e_vecs[:, min_eval_index]))
second_best_sphere = val_normalised(mask1@np.real(e_vecs[:, min_eval_index2]))
best_circle = val_normalised(mask3@dual_func(omt_func(best_sphere,second_best_sphere)))
return best_circle
def fit_circle(point_list):
"""
Performs Leo Dorsts circle fitting technique
"""
return layout.MultiVector(value=val_fit_circle(np.array([p.value for p in point_list])))
@numba.njit
def val_fit_line(point_list):
"""
Does line fitting with combo J.Lasenbys method and L. Dorsts
"""
# Check if there are just 2 points
if point_list.shape[0] == 2:
best_obj = point_list[0, :]
for i in range(1, 2):
best_obj = omt_func(best_obj, point_list[i, :])
return val_normalised(omt_func(best_obj, ninf_val))
accumulator_matrix = np.zeros((32, 32))
for i in range(point_list.shape[0]):
P_i_l = get_left_gmt_matrix(point_list[i, :])
P_i_r = get_right_gmt_matrix(point_list[i, :])
accumulator_matrix += mask3@P_i_l@P_i_r
# Find the eigenvalues of the matrix
e_vals, e_vecs = np.linalg.eig(accumulator_matrix)
# Find the smallest non negative eigenvalue
min_eval = np.inf
min_eval_index = -1
for i in range(len(e_vals)):
if e_vals[i] < min_eval and e_vals[i] > 0:
min_eval = e_vals[i]
min_eval_index = i
best_line = mask3@omt_func(dual_func(e_vecs[:, min_eval_index]),ninf_val)
return val_normalised(best_line)
def fit_line(point_list):
"""
Does line fitting with combo J.Lasenbys method and L. Dorsts
"""
return layout.MultiVector(value=val_fit_line(np.array([p.value for p in point_list])))
@numba.njit
def val_fit_sphere(point_list):
"""
Performs Leo Dorsts sphere fitting technique
"""
# Check if there are just 4 points
if point_list.shape[0] == 4:
best_sphere = point_list[0, :]
for i in range(1, 4):
best_sphere = omt_func(best_sphere, point_list[i, :])
return val_normalised(best_sphere)
# Loop over our points and construct the matrix
accumulator_matrix = np.zeros((32, 32))
for i in range(point_list.shape[0]):
# Get the point as a left gmt matrix
P_i_l = get_left_gmt_matrix(point_list[i, :])
# Multiply and add
accumulator_matrix += P_i_l @ mask0 @ P_i_l
accumulator_matrix = accumulator_matrix @ mask1
# Find the eigenvalues of the matrix
e_vals, e_vecs = np.linalg.eig(accumulator_matrix)
# Find the smallest non negative eigenvalues
min_eval = np.inf
min_eval_index = -1
for i in range(len(e_vals)):
if e_vals[i] < min_eval and e_vals[i] > 0:
min_eval = e_vals[i]
min_eval_index = i
best_sphere = val_normalised(mask4@dual_func(np.real(e_vecs[:, min_eval_index])))
return best_sphere
def fit_sphere(point_list):
"""
Performs Leo Dorsts sphere fitting technique
"""
return layout.MultiVector(value=val_fit_sphere(np.array([p.value for p in point_list])))
@numba.njit
def val_fit_plane(point_list):
"""
Does plane fitting with combo J.Lasenbys method and L. Dorsts
"""
# Check if there are just 3 points
if point_list.shape[0] == 3:
best_obj = point_list[0, :]
for i in range(1, 3):
best_obj = omt_func(best_obj, point_list[i, :])
return val_normalised(omt_func(best_obj, ninf_val))
accumulator_matrix = np.zeros((32, 32))
for i in range(point_list.shape[0]):
P_i_l = get_left_gmt_matrix(point_list[i, :])
P_i_r = get_right_gmt_matrix(point_list[i, :])
accumulator_matrix += mask4@P_i_l@P_i_r
e_vals, e_vecs = np.linalg.eig(accumulator_matrix)
min_eval = np.inf
min_eval_index = -1
for i in range(len(e_vals)):
if e_vals[i] < min_eval and e_vals[i] > 0:
min_eval = e_vals[i]
min_eval_index = i
best_plane = val_normalised(mask4@e_vecs[:, min_eval_index])
return best_plane
def fit_plane(point_list):
"""
Does plane fitting with combo J.Lasenbys method and L. Dorsts
"""
return layout.MultiVector(value=val_fit_plane(np.array([p.value for p in point_list])))
| 35.025641
| 92
| 0.660322
|
e9ee0b3ef0f9af7cce9474de078f5f83a0f73c38
| 77,753
|
py
|
Python
|
examples/python/bus_driver_scheduling_flow_sat.py
|
mingodad/or-tools
|
2c67e00f3a8a861440f0b8016e209c51990a0403
|
[
"Apache-2.0"
] | null | null | null |
examples/python/bus_driver_scheduling_flow_sat.py
|
mingodad/or-tools
|
2c67e00f3a8a861440f0b8016e209c51990a0403
|
[
"Apache-2.0"
] | null | null | null |
examples/python/bus_driver_scheduling_flow_sat.py
|
mingodad/or-tools
|
2c67e00f3a8a861440f0b8016e209c51990a0403
|
[
"Apache-2.0"
] | 2
|
2020-02-26T18:11:33.000Z
|
2020-12-02T07:44:34.000Z
|
# Copyright 2010-2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This model implements a bus driver scheduling problem.
Constraints:
- max driving time per driver <= 9h
- max working time per driver <= 12h
- min working time per driver >= 6.5h (soft)
- 30 min break after each 4h of driving time per driver
- 10 min preparation time before the first shift
- 15 min cleaning time after the last shift
- 2 min waiting time after each shift for passenger boarding and alighting
"""
from __future__ import print_function
import argparse
import collections
import math
from ortools.sat.python import cp_model
PARSER = argparse.ArgumentParser()
PARSER.add_argument(
'--instance', default=1, type=int, help='Instance number (1..3).')
PARSER.add_argument(
'--output_proto',
default="",
help='Output file to write the cp_model'
'proto to.')
PARSER.add_argument('--params', default="", help='Sat solver parameters.')
SAMPLE_SHIFTS_SMALL = [
#
# column description:
# - shift id
# - shift start time as hh:mm string (for logging and readability purposes)
# - shift end time as hh:mm string (for logging and readability purposes)
# - shift start minute
# - shift end minute
# - shift duration in minutes
#
[0, '05:18', '06:00', 318, 360, 42],
[1, '05:26', '06:08', 326, 368, 42],
[2, '05:40', '05:56', 340, 356, 16],
[3, '06:06', '06:51', 366, 411, 45],
[4, '06:40', '07:52', 400, 472, 72],
[5, '06:42', '07:13', 402, 433, 31],
[6, '06:48', '08:15', 408, 495, 87],
[7, '06:59', '08:07', 419, 487, 68],
[8, '07:20', '07:36', 440, 456, 16],
[9, '07:35', '08:22', 455, 502, 47],
[10, '07:50', '08:55', 470, 535, 65],
[11, '08:00', '09:05', 480, 545, 65],
[12, '08:00', '08:35', 480, 515, 35],
[13, '08:11', '09:41', 491, 581, 90],
[14, '08:28', '08:50', 508, 530, 22],
[15, '08:35', '08:45', 515, 525, 10],
[16, '08:40', '08:50', 520, 530, 10],
[17, '09:03', '10:28', 543, 628, 85],
[18, '09:23', '09:49', 563, 589, 26],
[19, '09:30', '09:40', 570, 580, 10],
[20, '09:57', '10:20', 597, 620, 23],
[21, '10:09', '11:03', 609, 663, 54],
[22, '10:20', '10:30', 620, 630, 10],
[23, '11:00', '11:10', 660, 670, 10],
[24, '11:45', '12:24', 705, 744, 39],
[25, '12:18', '13:00', 738, 780, 42],
[26, '13:18', '14:44', 798, 884, 86],
[27, '13:53', '14:49', 833, 889, 56],
[28, '14:03', '14:50', 843, 890, 47],
[29, '14:28', '15:15', 868, 915, 47],
[30, '14:30', '15:41', 870, 941, 71],
[31, '14:48', '15:35', 888, 935, 47],
[32, '15:03', '15:50', 903, 950, 47],
[33, '15:28', '16:54', 928, 1014, 86],
[34, '15:38', '16:25', 938, 985, 47],
[35, '15:40', '15:56', 940, 956, 16],
[36, '15:58', '16:45', 958, 1005, 47],
[37, '16:04', '17:30', 964, 1050, 86],
[38, '16:28', '17:15', 988, 1035, 47],
[39, '16:36', '17:21', 996, 1041, 45],
[40, '16:50', '17:00', 1010, 1020, 10],
[41, '16:54', '18:20', 1014, 1100, 86],
[42, '17:01', '17:13', 1021, 1033, 12],
[43, '17:19', '18:31', 1039, 1111, 72],
[44, '17:23', '18:10', 1043, 1090, 47],
[45, '17:34', '18:15', 1054, 1095, 41],
[46, '18:04', '19:29', 1084, 1169, 85],
[47, '18:34', '19:58', 1114, 1198, 84],
[48, '19:56', '20:34', 1196, 1234, 38],
[49, '20:05', '20:48', 1205, 1248, 43]
] # yapf:disable
SAMPLE_SHIFTS_MEDIUM = [
[0, '04:30', '04:53', 270, 293, 23],
[1, '04:46', '04:56', 286, 296, 10],
[2, '04:52', '05:56', 292, 356, 64],
[3, '04:53', '05:23', 293, 323, 30],
[4, '05:07', '05:44', 307, 344, 37],
[5, '05:10', '06:06', 310, 366, 56],
[6, '05:18', '06:03', 318, 363, 45],
[7, '05:30', '05:40', 330, 340, 10],
[8, '05:30', '05:40', 330, 340, 10],
[9, '05:33', '06:15', 333, 375, 42],
[10, '05:40', '05:50', 340, 350, 10],
[11, '05:43', '06:08', 343, 368, 25],
[12, '05:54', '07:20', 354, 440, 86],
[13, '06:04', '06:37', 364, 397, 33],
[14, '06:13', '06:58', 373, 418, 45],
[15, '06:14', '07:40', 374, 460, 86],
[16, '06:15', '07:15', 375, 435, 60],
[17, '06:16', '06:26', 376, 386, 10],
[18, '06:17', '06:34', 377, 394, 17],
[19, '06:20', '06:36', 380, 396, 16],
[20, '06:22', '07:06', 382, 426, 44],
[21, '06:24', '07:50', 384, 470, 86],
[22, '06:27', '06:44', 387, 404, 17],
[23, '06:30', '06:40', 390, 400, 10],
[24, '06:31', '06:43', 391, 403, 12],
[25, '06:33', '07:53', 393, 473, 80],
[26, '06:34', '07:09', 394, 429, 35],
[27, '06:40', '06:56', 400, 416, 16],
[28, '06:44', '07:17', 404, 437, 33],
[29, '06:46', '06:58', 406, 418, 12],
[30, '06:49', '07:43', 409, 463, 54],
[31, '06:50', '07:05', 410, 425, 15],
[32, '06:52', '07:36', 412, 456, 44],
[33, '06:54', '07:27', 414, 447, 33],
[34, '06:56', '08:23', 416, 503, 87],
[35, '07:04', '07:44', 424, 464, 40],
[36, '07:11', '08:36', 431, 516, 85],
[37, '07:17', '07:35', 437, 455, 18],
[38, '07:22', '08:06', 442, 486, 44],
[39, '07:27', '08:15', 447, 495, 48],
[40, '07:35', '07:45', 455, 465, 10],
[41, '07:43', '08:08', 463, 488, 25],
[42, '07:50', '08:37', 470, 517, 47],
[43, '07:58', '08:45', 478, 525, 47],
[44, '08:00', '08:35', 480, 515, 35],
[45, '08:06', '08:51', 486, 531, 45],
[46, '08:10', '08:45', 490, 525, 35],
[47, '08:15', '08:30', 495, 510, 15],
[48, '08:16', '09:00', 496, 540, 44],
[49, '08:18', '09:16', 498, 556, 58],
[50, '08:20', '08:36', 500, 516, 16],
[51, '08:27', '09:07', 507, 547, 40],
[52, '08:30', '08:45', 510, 525, 15],
[53, '08:35', '09:15', 515, 555, 40],
[54, '08:46', '09:30', 526, 570, 44],
[55, '08:51', '09:17', 531, 557, 26],
[56, '08:55', '09:15', 535, 555, 20],
[57, '08:58', '09:38', 538, 578, 40],
[58, '09:00', '09:35', 540, 575, 35],
[59, '09:00', '09:16', 540, 556, 16],
[60, '09:20', '09:36', 560, 576, 16],
[61, '09:31', '09:43', 571, 583, 12],
[62, '09:33', '10:15', 573, 615, 42],
[63, '09:54', '10:05', 594, 605, 11],
[64, '10:11', '10:38', 611, 638, 27],
[65, '10:18', '11:00', 618, 660, 42],
[66, '10:21', '10:47', 621, 647, 26],
[67, '10:25', '11:04', 625, 664, 39],
[68, '10:26', '11:08', 626, 668, 42],
[69, '10:44', '12:11', 644, 731, 87],
[70, '11:00', '11:16', 660, 676, 16],
[71, '11:15', '11:54', 675, 714, 39],
[72, '11:16', '11:28', 676, 688, 12],
[73, '11:20', '11:30', 680, 690, 10],
[74, '11:21', '11:47', 681, 707, 26],
[75, '11:25', '12:04', 685, 724, 39],
[76, '11:34', '11:45', 694, 705, 11],
[77, '11:35', '12:14', 695, 734, 39],
[78, '11:41', '12:23', 701, 743, 42],
[79, '11:44', '12:35', 704, 755, 51],
[80, '11:46', '11:58', 706, 718, 12],
[81, '12:00', '12:10', 720, 730, 10],
[82, '12:04', '12:15', 724, 735, 11],
[83, '12:04', '13:04', 724, 784, 60],
[84, '12:11', '12:38', 731, 758, 27],
[85, '12:15', '12:54', 735, 774, 39],
[86, '12:25', '13:10', 745, 790, 45],
[87, '12:30', '12:40', 750, 760, 10],
[88, '12:34', '13:58', 754, 838, 84],
[89, '12:38', '13:25', 758, 805, 47],
[90, '12:48', '13:35', 768, 815, 47],
[91, '13:00', '13:16', 780, 796, 16],
[92, '13:05', '13:44', 785, 824, 39],
[93, '13:08', '13:55', 788, 835, 47],
[94, '13:14', '14:38', 794, 878, 84],
[95, '13:23', '13:49', 803, 829, 26],
[96, '13:25', '14:04', 805, 844, 39],
[97, '13:28', '14:54', 808, 894, 86],
[98, '13:31', '13:43', 811, 823, 12],
[99, '13:34', '14:58', 814, 898, 84],
[100, '13:38', '14:25', 818, 865, 47],
[101, '13:38', '15:04', 818, 904, 86],
[102, '13:39', '14:33', 819, 873, 54],
[103, '13:40', '13:50', 820, 830, 10],
[104, '13:43', '14:10', 823, 850, 27],
[105, '13:48', '14:35', 828, 875, 47],
[106, '13:48', '14:35', 828, 875, 47],
[107, '13:53', '14:40', 833, 880, 47],
[108, '13:58', '15:24', 838, 924, 86],
[109, '13:58', '14:25', 838, 865, 27],
[110, '14:00', '14:16', 840, 856, 16],
[111, '14:13', '15:00', 853, 900, 47],
[112, '14:20', '15:31', 860, 931, 71],
[113, '14:25', '15:02', 865, 902, 37],
[114, '14:34', '14:45', 874, 885, 11],
[115, '14:40', '15:51', 880, 951, 71],
[116, '14:40', '14:56', 880, 896, 16],
[117, '14:46', '14:58', 886, 898, 12],
[118, '14:49', '15:43', 889, 943, 54],
[119, '14:52', '15:21', 892, 921, 29],
[120, '14:58', '16:24', 898, 984, 86],
[121, '14:59', '15:53', 899, 953, 54],
[122, '15:00', '15:10', 900, 910, 10],
[123, '15:00', '15:35', 900, 935, 35],
[124, '15:08', '15:45', 908, 945, 37],
[125, '15:12', '15:36', 912, 936, 24],
[126, '15:18', '16:05', 918, 965, 47],
[127, '15:24', '16:05', 924, 965, 41],
[128, '15:31', '15:43', 931, 943, 12],
[129, '15:35', '15:54', 935, 954, 19],
[130, '15:36', '16:21', 936, 981, 45],
[131, '15:39', '16:33', 939, 993, 54],
[132, '15:48', '16:35', 948, 995, 47],
[133, '15:50', '17:01', 950, 1021, 71],
[134, '16:03', '16:50', 963, 1010, 47],
[135, '16:18', '17:44', 978, 1064, 86],
[136, '16:24', '17:05', 984, 1025, 41],
[137, '16:28', '17:15', 988, 1035, 47],
[138, '16:34', '17:15', 994, 1035, 41],
[139, '16:38', '17:25', 998, 1045, 47],
[140, '16:40', '16:56', 1000, 1016, 16],
[141, '16:45', '17:04', 1005, 1024, 19],
[142, '16:52', '17:36', 1012, 1056, 44],
[143, '16:58', '17:45', 1018, 1065, 47],
[144, '17:04', '18:30', 1024, 1110, 86],
[145, '17:04', '17:45', 1024, 1065, 41],
[146, '17:09', '18:03', 1029, 1083, 54],
[147, '17:18', '18:44', 1038, 1124, 86],
[148, '17:28', '18:15', 1048, 1095, 47],
[149, '17:29', '18:41', 1049, 1121, 72],
[150, '17:36', '18:21', 1056, 1101, 45],
[151, '17:38', '18:25', 1058, 1105, 47],
[152, '17:40', '17:56', 1060, 1076, 16],
[153, '17:45', '18:04', 1065, 1084, 19],
[154, '17:46', '17:58', 1066, 1078, 12],
[155, '17:48', '18:35', 1068, 1115, 47],
[156, '17:49', '18:43', 1069, 1123, 54],
[157, '17:55', '18:14', 1075, 1094, 19],
[158, '17:58', '18:45', 1078, 1125, 47],
[159, '18:00', '19:11', 1080, 1151, 71],
[160, '18:04', '18:45', 1084, 1125, 41],
[161, '18:09', '19:03', 1089, 1143, 54],
[162, '18:13', '19:00', 1093, 1140, 47],
[163, '18:13', '18:40', 1093, 1120, 27],
[164, '18:19', '19:13', 1099, 1153, 54],
[165, '18:28', '19:25', 1108, 1165, 57],
[166, '18:48', '19:28', 1128, 1168, 40],
[167, '19:03', '19:45', 1143, 1185, 42],
[168, '19:20', '19:36', 1160, 1176, 16],
[169, '19:21', '19:31', 1161, 1171, 10],
[170, '19:25', '20:04', 1165, 1204, 39],
[171, '19:26', '20:08', 1166, 1208, 42],
[172, '19:30', '19:40', 1170, 1180, 10],
[173, '19:44', '20:33', 1184, 1233, 49],
[174, '19:48', '21:09', 1188, 1269, 81],
[175, '19:53', '21:02', 1193, 1262, 69],
[176, '20:04', '20:29', 1204, 1229, 25],
[177, '20:17', '21:03', 1217, 1263, 46],
[178, '20:20', '20:57', 1220, 1257, 37],
[179, '20:29', '21:18', 1229, 1278, 49],
[180, '20:35', '21:54', 1235, 1314, 79],
[181, '20:40', '20:50', 1240, 1250, 10],
[182, '20:47', '21:42', 1247, 1302, 55],
[183, '21:00', '21:10', 1260, 1270, 10],
[184, '21:07', '21:44', 1267, 1304, 37],
[185, '21:14', '22:03', 1274, 1323, 49],
[186, '21:39', '21:55', 1299, 1315, 16],
[187, '21:40', '22:17', 1300, 1337, 37],
[188, '21:40', '21:50', 1300, 1310, 10],
[189, '21:48', '22:03', 1308, 1323, 15],
[190, '22:17', '23:03', 1337, 1383, 46],
[191, '22:43', '23:08', 1363, 1388, 25],
[192, '23:35', '01:05', 1415, 1505, 90],
[193, '23:46', '00:01', 1426, 1441, 15],
[194, '23:47', '00:33', 1427, 1473, 46],
[195, '23:52', '00:24', 1432, 1464, 32],
[196, '23:58', '00:38', 1438, 1478, 40],
[197, '00:02', '00:12', 1442, 1452, 10],
[198, '00:07', '00:39', 1447, 1479, 32],
[199, '00:25', '01:12', 1465, 1512, 47]
] # yapf:disable
SAMPLE_SHIFTS_LARGE = [
[0, '04:18', '05:00', 258, 300, 42],
[1, '04:27', '05:08', 267, 308, 41],
[2, '04:29', '05:26', 269, 326, 57],
[3, '04:29', '04:55', 269, 295, 26],
[4, '04:30', '04:53', 270, 293, 23],
[5, '04:30', '04:51', 270, 291, 21],
[6, '04:31', '04:53', 271, 293, 22],
[7, '04:33', '05:15', 273, 315, 42],
[8, '04:34', '04:44', 274, 284, 10],
[9, '04:34', '05:03', 274, 303, 29],
[10, '04:35', '04:50', 275, 290, 15],
[11, '04:36', '04:46', 276, 286, 10],
[12, '04:37', '05:18', 277, 318, 41],
[13, '04:41', '05:13', 281, 313, 32],
[14, '04:42', '05:23', 282, 323, 41],
[15, '04:43', '04:53', 283, 293, 10],
[16, '04:44', '05:45', 284, 345, 61],
[17, '04:45', '05:11', 285, 311, 26],
[18, '04:46', '05:01', 286, 301, 15],
[19, '04:46', '04:56', 286, 296, 10],
[20, '04:47', '05:14', 287, 314, 27],
[21, '04:48', '05:30', 288, 330, 42],
[22, '04:49', '05:41', 289, 341, 52],
[23, '04:49', '05:18', 289, 318, 29],
[24, '04:50', '05:33', 290, 333, 43],
[25, '04:52', '05:56', 292, 356, 64],
[26, '04:52', '05:07', 292, 307, 15],
[27, '04:53', '05:19', 293, 319, 26],
[28, '04:53', '05:23', 293, 323, 30],
[29, '04:55', '05:27', 295, 327, 32],
[30, '04:57', '05:38', 297, 338, 41],
[31, '05:00', '06:00', 300, 360, 60],
[32, '05:00', '05:54', 300, 354, 54],
[33, '05:01', '05:33', 301, 333, 32],
[34, '05:01', '05:26', 301, 326, 25],
[35, '05:02', '05:29', 302, 329, 27],
[36, '05:02', '05:12', 302, 312, 10],
[37, '05:03', '05:45', 303, 345, 42],
[38, '05:03', '05:18', 303, 318, 15],
[39, '05:03', '06:28', 303, 388, 85],
[40, '05:03', '05:13', 303, 313, 10],
[41, '05:04', '06:24', 304, 384, 80],
[42, '05:07', '05:44', 307, 344, 37],
[43, '05:08', '05:48', 308, 348, 40],
[44, '05:10', '06:06', 310, 366, 56],
[45, '05:11', '05:37', 311, 337, 26],
[46, '05:11', '05:53', 311, 353, 42],
[47, '05:13', '06:15', 313, 375, 62],
[48, '05:13', '05:38', 313, 338, 25],
[49, '05:16', '05:44', 316, 344, 28],
[50, '05:17', '05:27', 317, 327, 10],
[51, '05:18', '06:40', 318, 400, 82],
[52, '05:18', '06:03', 318, 363, 45],
[53, '05:18', '06:11', 318, 371, 53],
[54, '05:18', '06:00', 318, 360, 42],
[55, '05:19', '06:34', 319, 394, 75],
[56, '05:20', '06:17', 320, 377, 57],
[57, '05:22', '05:59', 322, 359, 37],
[58, '05:24', '05:48', 324, 348, 24],
[59, '05:25', '05:40', 325, 340, 15],
[60, '05:26', '06:08', 326, 368, 42],
[61, '05:27', '06:30', 327, 390, 63],
[62, '05:27', '05:54', 327, 354, 27],
[63, '05:28', '05:53', 328, 353, 25],
[64, '05:29', '05:44', 329, 344, 15],
[65, '05:30', '05:40', 330, 340, 10],
[66, '05:30', '05:40', 330, 340, 10],
[67, '05:30', '05:40', 330, 340, 10],
[68, '05:32', '06:53', 332, 413, 81],
[69, '05:33', '07:00', 333, 420, 87],
[70, '05:33', '06:15', 333, 375, 42],
[71, '05:33', '05:47', 333, 347, 14],
[72, '05:37', '06:13', 337, 373, 36],
[73, '05:37', '06:05', 337, 365, 28],
[74, '05:38', '06:33', 338, 393, 55],
[75, '05:38', '06:04', 338, 364, 26],
[76, '05:38', '06:18', 338, 378, 40],
[77, '05:39', '05:54', 339, 354, 15],
[78, '05:40', '05:56', 340, 356, 16],
[79, '05:40', '06:41', 340, 401, 61],
[80, '05:40', '05:50', 340, 350, 10],
[81, '05:41', '06:23', 341, 383, 42],
[82, '05:41', '06:01', 341, 361, 20],
[83, '05:43', '06:08', 343, 368, 25],
[84, '05:44', '07:10', 344, 430, 86],
[85, '05:44', '05:55', 344, 355, 11],
[86, '05:45', '06:44', 345, 404, 59],
[87, '05:47', '06:17', 347, 377, 30],
[88, '05:48', '07:08', 348, 428, 80],
[89, '05:48', '06:30', 348, 390, 42],
[90, '05:50', '06:50', 350, 410, 60],
[91, '05:50', '06:00', 350, 360, 10],
[92, '05:50', '06:00', 350, 360, 10],
[93, '05:50', '06:51', 350, 411, 61],
[94, '05:52', '06:33', 352, 393, 41],
[95, '05:52', '06:36', 352, 396, 44],
[96, '05:52', '06:23', 352, 383, 31],
[97, '05:54', '06:14', 354, 374, 20],
[98, '05:54', '07:20', 354, 440, 86],
[99, '05:55', '06:40', 355, 400, 45],
[100, '05:55', '06:27', 355, 387, 32],
[101, '05:56', '06:35', 356, 395, 39],
[102, '05:56', '06:06', 356, 366, 10],
[103, '05:57', '06:21', 357, 381, 24],
[104, '05:58', '07:23', 358, 443, 85],
[105, '05:58', '06:23', 358, 383, 25],
[106, '05:58', '06:08', 358, 368, 10],
[107, '05:58', '06:43', 358, 403, 45],
[108, '06:00', '06:10', 360, 370, 10],
[109, '06:00', '06:16', 360, 376, 16],
[110, '06:00', '07:01', 360, 421, 61],
[111, '06:01', '07:00', 361, 420, 59],
[112, '06:01', '06:13', 361, 373, 12],
[113, '06:01', '06:45', 361, 405, 44],
[114, '06:03', '06:50', 363, 410, 47],
[115, '06:04', '06:37', 364, 397, 33],
[116, '06:04', '07:30', 364, 450, 86],
[117, '06:05', '06:24', 365, 384, 19],
[118, '06:06', '06:51', 366, 411, 45],
[119, '06:07', '06:43', 367, 403, 36],
[120, '06:08', '07:30', 368, 450, 82],
[121, '06:10', '06:20', 370, 380, 10],
[122, '06:10', '07:17', 370, 437, 67],
[123, '06:11', '06:54', 371, 414, 43],
[124, '06:11', '06:21', 371, 381, 10],
[125, '06:13', '06:38', 373, 398, 25],
[126, '06:13', '06:58', 373, 418, 45],
[127, '06:13', '06:53', 373, 413, 40],
[128, '06:14', '07:03', 374, 423, 49],
[129, '06:14', '06:47', 374, 407, 33],
[130, '06:14', '07:40', 374, 460, 86],
[131, '06:15', '07:15', 375, 435, 60],
[132, '06:16', '06:28', 376, 388, 12],
[133, '06:16', '06:26', 376, 386, 10],
[134, '06:17', '06:34', 377, 394, 17],
[135, '06:18', '07:06', 378, 426, 48],
[136, '06:18', '07:38', 378, 458, 80],
[137, '06:18', '07:02', 378, 422, 44],
[138, '06:19', '06:53', 379, 413, 34],
[139, '06:20', '07:25', 380, 445, 65],
[140, '06:20', '06:36', 380, 396, 16],
[141, '06:20', '06:30', 380, 390, 10],
[142, '06:20', '06:30', 380, 390, 10],
[143, '06:21', '06:49', 381, 409, 28],
[144, '06:22', '07:06', 382, 426, 44],
[145, '06:24', '07:50', 384, 470, 86],
[146, '06:24', '06:57', 384, 417, 33],
[147, '06:26', '07:45', 386, 465, 79],
[148, '06:26', '07:10', 386, 430, 44],
[149, '06:27', '06:44', 387, 404, 17],
[150, '06:28', '06:53', 388, 413, 25],
[151, '06:28', '07:14', 388, 434, 46],
[152, '06:29', '07:03', 389, 423, 34],
[153, '06:30', '06:40', 390, 400, 10],
[154, '06:30', '07:37', 390, 457, 67],
[155, '06:31', '06:43', 391, 403, 12],
[156, '06:33', '07:14', 393, 434, 41],
[157, '06:33', '07:53', 393, 473, 80],
[158, '06:34', '08:16', 394, 496, 102],
[159, '06:34', '07:09', 394, 429, 35],
[160, '06:34', '07:07', 394, 427, 33],
[161, '06:36', '07:21', 396, 441, 45],
[162, '06:37', '07:22', 397, 442, 45],
[163, '06:37', '06:54', 397, 414, 17],
[164, '06:38', '07:30', 398, 450, 52],
[165, '06:38', '07:18', 398, 438, 40],
[166, '06:39', '07:33', 399, 453, 54],
[167, '06:40', '07:52', 400, 472, 72],
[168, '06:40', '06:50', 400, 410, 10],
[169, '06:40', '07:22', 400, 442, 42],
[170, '06:40', '06:56', 400, 416, 16],
[171, '06:41', '08:00', 401, 480, 79],
[172, '06:42', '07:26', 402, 446, 44],
[173, '06:42', '07:13', 402, 433, 31],
[174, '06:43', '07:08', 403, 428, 25],
[175, '06:43', '07:30', 403, 450, 47],
[176, '06:43', '07:23', 403, 443, 40],
[177, '06:44', '07:17', 404, 437, 33],
[178, '06:44', '08:13', 404, 493, 89],
[179, '06:46', '07:01', 406, 421, 15],
[180, '06:46', '06:58', 406, 418, 12],
[181, '06:47', '07:04', 407, 424, 17],
[182, '06:48', '08:15', 408, 495, 87],
[183, '06:48', '07:34', 408, 454, 46],
[184, '06:48', '07:37', 408, 457, 49],
[185, '06:49', '07:43', 409, 463, 54],
[186, '06:50', '08:00', 410, 480, 70],
[187, '06:50', '07:00', 410, 420, 10],
[188, '06:50', '07:05', 410, 425, 15],
[189, '06:51', '07:18', 411, 438, 27],
[190, '06:52', '07:36', 412, 456, 44],
[191, '06:53', '07:37', 413, 457, 44],
[192, '06:54', '08:20', 414, 500, 86],
[193, '06:54', '07:27', 414, 447, 33],
[194, '06:54', '07:20', 414, 440, 26],
[195, '06:56', '08:23', 416, 503, 87],
[196, '06:57', '07:12', 417, 432, 15],
[197, '06:57', '07:58', 417, 478, 61],
[198, '06:57', '07:45', 417, 465, 48],
[199, '06:57', '07:40', 417, 460, 43],
[200, '06:58', '07:23', 418, 443, 25],
[201, '06:59', '07:53', 419, 473, 54],
[202, '06:59', '08:07', 419, 487, 68],
[203, '07:00', '07:10', 420, 430, 10],
[204, '07:00', '07:16', 420, 436, 16],
[205, '07:01', '08:30', 421, 510, 89],
[206, '07:01', '07:13', 421, 433, 12],
[207, '07:01', '07:43', 421, 463, 42],
[208, '07:03', '08:30', 423, 510, 87],
[209, '07:04', '07:37', 424, 457, 33],
[210, '07:04', '07:44', 424, 464, 40],
[211, '07:05', '07:52', 425, 472, 47],
[212, '07:05', '08:05', 425, 485, 60],
[213, '07:05', '07:46', 425, 466, 41],
[214, '07:06', '07:51', 426, 471, 45],
[215, '07:07', '08:08', 427, 488, 61],
[216, '07:07', '07:52', 427, 472, 45],
[217, '07:07', '08:16', 427, 496, 69],
[218, '07:07', '07:27', 427, 447, 20],
[219, '07:09', '07:50', 429, 470, 41],
[220, '07:09', '08:40', 429, 520, 91],
[221, '07:09', '08:03', 429, 483, 54],
[222, '07:10', '07:20', 430, 440, 10],
[223, '07:11', '08:36', 431, 516, 85],
[224, '07:12', '08:00', 432, 480, 48],
[225, '07:12', '07:47', 432, 467, 35],
[226, '07:13', '07:54', 433, 474, 41],
[227, '07:13', '07:38', 433, 458, 25],
[228, '07:14', '07:59', 434, 479, 45],
[229, '07:16', '08:50', 436, 530, 94],
[230, '07:16', '07:28', 436, 448, 12],
[231, '07:17', '07:35', 437, 455, 18],
[232, '07:17', '07:58', 437, 478, 41],
[233, '07:18', '08:06', 438, 486, 48],
[234, '07:18', '08:44', 438, 524, 86],
[235, '07:19', '08:13', 439, 493, 54],
[236, '07:20', '08:02', 440, 482, 42],
[237, '07:20', '08:07', 440, 487, 47],
[238, '07:20', '07:30', 440, 450, 10],
[239, '07:20', '07:57', 440, 477, 37],
[240, '07:20', '07:36', 440, 456, 16],
[241, '07:21', '07:48', 441, 468, 27],
[242, '07:22', '08:06', 442, 486, 44],
[243, '07:22', '08:25', 442, 505, 63],
[244, '07:24', '08:27', 444, 507, 63],
[245, '07:24', '08:05', 444, 485, 41],
[246, '07:26', '08:23', 446, 503, 57],
[247, '07:26', '08:52', 446, 532, 86],
[248, '07:27', '08:07', 447, 487, 40],
[249, '07:27', '07:42', 447, 462, 15],
[250, '07:27', '08:15', 447, 495, 48],
[251, '07:28', '07:53', 448, 473, 25],
[252, '07:28', '08:09', 448, 489, 41],
[253, '07:28', '07:38', 448, 458, 10],
[254, '07:30', '08:35', 450, 515, 65],
[255, '07:31', '07:43', 451, 463, 12],
[256, '07:32', '08:13', 452, 493, 41],
[257, '07:34', '09:00', 454, 540, 86],
[258, '07:34', '08:33', 454, 513, 59],
[259, '07:34', '09:04', 454, 544, 90],
[260, '07:35', '08:22', 455, 502, 47],
[261, '07:35', '07:45', 455, 465, 10],
[262, '07:35', '08:16', 455, 496, 41],
[263, '07:36', '08:17', 456, 497, 41],
[264, '07:36', '08:36', 456, 516, 60],
[265, '07:37', '07:50', 457, 470, 13],
[266, '07:40', '07:56', 460, 476, 16],
[267, '07:40', '08:20', 460, 500, 40],
[268, '07:40', '08:45', 460, 525, 65],
[269, '07:41', '08:39', 461, 519, 58],
[270, '07:41', '07:51', 461, 471, 10],
[271, '07:42', '08:30', 462, 510, 48],
[272, '07:42', '08:21', 462, 501, 39],
[273, '07:43', '08:08', 463, 488, 25],
[274, '07:43', '08:24', 463, 504, 41],
[275, '07:44', '09:10', 464, 550, 86],
[276, '07:44', '08:43', 464, 523, 59],
[277, '07:46', '08:28', 466, 508, 42],
[278, '07:46', '07:58', 466, 478, 12],
[279, '07:47', '08:00', 467, 480, 13],
[280, '07:48', '09:14', 468, 554, 86],
[281, '07:49', '08:32', 469, 512, 43],
[282, '07:50', '08:55', 470, 535, 65],
[283, '07:50', '08:00', 470, 480, 10],
[284, '07:50', '08:37', 470, 517, 47],
[285, '07:50', '08:26', 470, 506, 36],
[286, '07:51', '08:18', 471, 498, 27],
[287, '07:52', '08:21', 472, 501, 29],
[288, '07:53', '08:35', 473, 515, 42],
[289, '07:54', '09:19', 474, 559, 85],
[290, '07:55', '08:53', 475, 533, 58],
[291, '07:56', '08:54', 476, 534, 58],
[292, '07:57', '08:39', 477, 519, 42],
[293, '07:57', '08:10', 477, 490, 13],
[294, '07:58', '08:45', 478, 525, 47],
[295, '07:58', '08:23', 478, 503, 25],
[296, '08:00', '08:10', 480, 490, 10],
[297, '08:00', '09:05', 480, 545, 65],
[298, '08:00', '08:16', 480, 496, 16],
[299, '08:00', '08:35', 480, 515, 35],
[300, '08:01', '08:13', 481, 493, 12],
[301, '08:01', '08:43', 481, 523, 42],
[302, '08:03', '09:26', 483, 566, 83],
[303, '08:04', '09:29', 484, 569, 85],
[304, '08:05', '08:21', 485, 501, 16],
[305, '08:05', '08:47', 485, 527, 42],
[306, '08:06', '08:51', 486, 531, 45],
[307, '08:06', '09:03', 486, 543, 57],
[308, '08:07', '08:20', 487, 500, 13],
[309, '08:08', '08:55', 488, 535, 47],
[310, '08:08', '08:50', 488, 530, 42],
[311, '08:10', '08:45', 490, 525, 35],
[312, '08:10', '09:15', 490, 555, 65],
[313, '08:10', '08:20', 490, 500, 10],
[314, '08:11', '09:41', 491, 581, 90],
[315, '08:12', '08:55', 492, 535, 43],
[316, '08:13', '08:38', 493, 518, 25],
[317, '08:14', '09:38', 494, 578, 84],
[318, '08:15', '08:30', 495, 510, 15],
[319, '08:16', '08:30', 496, 510, 14],
[320, '08:16', '08:28', 496, 508, 12],
[321, '08:16', '09:00', 496, 540, 44],
[322, '08:17', '09:13', 497, 553, 56],
[323, '08:18', '09:16', 498, 556, 58],
[324, '08:18', '09:05', 498, 545, 47],
[325, '08:20', '08:36', 500, 516, 16],
[326, '08:20', '08:55', 500, 535, 35],
[327, '08:20', '09:05', 500, 545, 45],
[328, '08:20', '08:30', 500, 510, 10],
[329, '08:20', '09:25', 500, 565, 65],
[330, '08:21', '08:38', 501, 518, 17],
[331, '08:21', '08:47', 501, 527, 26],
[332, '08:22', '08:45', 502, 525, 23],
[333, '08:23', '09:10', 503, 550, 47],
[334, '08:24', '09:48', 504, 588, 84],
[335, '08:26', '08:46', 506, 526, 20],
[336, '08:27', '09:07', 507, 547, 40],
[337, '08:28', '08:50', 508, 530, 22],
[338, '08:28', '09:56', 508, 596, 88],
[339, '08:28', '09:23', 508, 563, 55],
[340, '08:29', '09:20', 509, 560, 51],
[341, '08:30', '09:05', 510, 545, 35],
[342, '08:30', '08:45', 510, 525, 15],
[343, '08:30', '08:40', 510, 520, 10],
[344, '08:30', '09:35', 510, 575, 65],
[345, '08:31', '08:43', 511, 523, 12],
[346, '08:31', '09:13', 511, 553, 42],
[347, '08:34', '09:58', 514, 598, 84],
[348, '08:35', '08:55', 515, 535, 20],
[349, '08:35', '09:15', 515, 555, 40],
[350, '08:35', '08:45', 515, 525, 10],
[351, '08:36', '08:46', 516, 526, 10],
[352, '08:36', '09:00', 516, 540, 24],
[353, '08:38', '09:20', 518, 560, 42],
[354, '08:38', '09:35', 518, 575, 57],
[355, '08:38', '09:14', 518, 554, 36],
[356, '08:39', '09:33', 519, 573, 54],
[357, '08:40', '09:45', 520, 585, 65],
[358, '08:40', '08:50', 520, 530, 10],
[359, '08:40', '08:56', 520, 536, 16],
[360, '08:42', '09:25', 522, 565, 43],
[361, '08:43', '09:08', 523, 548, 25],
[362, '08:44', '09:35', 524, 575, 51],
[363, '08:45', '09:00', 525, 540, 15],
[364, '08:45', '09:05', 525, 545, 20],
[365, '08:46', '09:24', 526, 564, 38],
[366, '08:46', '08:58', 526, 538, 12],
[367, '08:46', '09:30', 526, 570, 44],
[368, '08:48', '10:11', 528, 611, 83],
[369, '08:48', '10:13', 528, 613, 85],
[370, '08:49', '09:43', 529, 583, 54],
[371, '08:50', '09:30', 530, 570, 40],
[372, '08:50', '10:00', 530, 600, 70],
[373, '08:50', '09:00', 530, 540, 10],
[374, '08:51', '09:17', 531, 557, 26],
[375, '08:53', '09:20', 533, 560, 27],
[376, '08:53', '09:35', 533, 575, 42],
[377, '08:55', '09:34', 535, 574, 39],
[378, '08:55', '09:15', 535, 555, 20],
[379, '08:58', '09:38', 538, 578, 40],
[380, '08:58', '10:26', 538, 626, 88],
[381, '08:59', '09:53', 539, 593, 54],
[382, '08:59', '09:50', 539, 590, 51],
[383, '09:00', '09:35', 540, 575, 35],
[384, '09:00', '09:16', 540, 556, 16],
[385, '09:00', '09:10', 540, 550, 10],
[386, '09:00', '09:16', 540, 556, 16],
[387, '09:01', '09:13', 541, 553, 12],
[388, '09:03', '09:45', 543, 585, 42],
[389, '09:03', '10:28', 543, 628, 85],
[390, '09:05', '09:44', 545, 584, 39],
[391, '09:05', '09:25', 545, 565, 20],
[392, '09:08', '09:53', 548, 593, 45],
[393, '09:08', '10:04', 548, 604, 56],
[394, '09:09', '10:03', 549, 603, 54],
[395, '09:10', '10:15', 550, 615, 65],
[396, '09:10', '09:20', 550, 560, 10],
[397, '09:11', '09:38', 551, 578, 27],
[398, '09:13', '10:00', 553, 600, 47],
[399, '09:14', '09:39', 554, 579, 25],
[400, '09:14', '10:05', 554, 605, 51],
[401, '09:15', '09:54', 555, 594, 39],
[402, '09:16', '09:28', 556, 568, 12],
[403, '09:18', '10:43', 558, 643, 85],
[404, '09:18', '10:41', 558, 641, 83],
[405, '09:18', '09:58', 558, 598, 40],
[406, '09:19', '10:13', 559, 613, 54],
[407, '09:20', '09:30', 560, 570, 10],
[408, '09:20', '09:36', 560, 576, 16],
[409, '09:21', '09:47', 561, 587, 26],
[410, '09:23', '10:30', 563, 630, 67],
[411, '09:23', '10:05', 563, 605, 42],
[412, '09:23', '09:49', 563, 589, 26],
[413, '09:24', '09:35', 564, 575, 11],
[414, '09:25', '09:35', 565, 575, 10],
[415, '09:25', '10:04', 565, 604, 39],
[416, '09:28', '10:08', 568, 608, 40],
[417, '09:29', '09:45', 569, 585, 16],
[418, '09:29', '10:20', 569, 620, 51],
[419, '09:29', '10:56', 569, 656, 87],
[420, '09:29', '10:23', 569, 623, 54],
[421, '09:30', '09:40', 570, 580, 10],
[422, '09:31', '09:43', 571, 583, 12],
[423, '09:33', '10:58', 573, 658, 85],
[424, '09:33', '10:15', 573, 615, 42],
[425, '09:34', '09:45', 574, 585, 11],
[426, '09:35', '10:14', 575, 614, 39],
[427, '09:38', '10:45', 578, 645, 67],
[428, '09:39', '10:33', 579, 633, 54],
[429, '09:40', '09:56', 580, 596, 16],
[430, '09:40', '09:50', 580, 590, 10],
[431, '09:41', '10:08', 581, 608, 27],
[432, '09:41', '10:23', 581, 623, 42],
[433, '09:44', '10:35', 584, 635, 51],
[434, '09:44', '11:11', 584, 671, 87],
[435, '09:44', '09:55', 584, 595, 11],
[436, '09:45', '10:24', 585, 624, 39],
[437, '09:46', '09:58', 586, 598, 12],
[438, '09:48', '10:30', 588, 630, 42],
[439, '09:48', '11:13', 588, 673, 85],
[440, '09:48', '10:04', 588, 604, 16],
[441, '09:49', '10:43', 589, 643, 54],
[442, '09:50', '10:00', 590, 600, 10],
[443, '09:51', '10:17', 591, 617, 26],
[444, '09:53', '10:49', 593, 649, 56],
[445, '09:53', '11:00', 593, 660, 67],
[446, '09:54', '10:05', 594, 605, 11],
[447, '09:55', '10:34', 595, 634, 39],
[448, '09:56', '10:38', 596, 638, 42],
[449, '09:57', '10:20', 597, 620, 23],
[450, '09:59', '11:26', 599, 686, 87],
[451, '09:59', '10:50', 599, 650, 51],
[452, '09:59', '10:53', 599, 653, 54],
[453, '10:00', '10:16', 600, 616, 16],
[454, '10:00', '10:10', 600, 610, 10],
[455, '10:01', '10:13', 601, 613, 12],
[456, '10:03', '11:28', 603, 688, 85],
[457, '10:03', '10:45', 603, 645, 42],
[458, '10:04', '10:15', 604, 615, 11],
[459, '10:05', '10:44', 605, 644, 39],
[460, '10:08', '11:15', 608, 675, 67],
[461, '10:09', '11:03', 609, 663, 54],
[462, '10:10', '10:20', 610, 620, 10],
[463, '10:11', '10:38', 611, 638, 27],
[464, '10:11', '10:53', 611, 653, 42],
[465, '10:14', '11:05', 614, 665, 51],
[466, '10:14', '11:41', 614, 701, 87],
[467, '10:14', '10:25', 614, 625, 11],
[468, '10:15', '10:54', 615, 654, 39],
[469, '10:16', '10:28', 616, 628, 12],
[470, '10:18', '11:43', 618, 703, 85],
[471, '10:18', '11:00', 618, 660, 42],
[472, '10:19', '11:13', 619, 673, 54],
[473, '10:20', '10:30', 620, 630, 10],
[474, '10:20', '10:36', 620, 636, 16],
[475, '10:21', '10:47', 621, 647, 26],
[476, '10:23', '11:30', 623, 690, 67],
[477, '10:23', '10:45', 623, 645, 22],
[478, '10:24', '10:35', 624, 635, 11],
[479, '10:25', '11:04', 625, 664, 39],
[480, '10:26', '11:08', 626, 668, 42],
[481, '10:29', '11:20', 629, 680, 51],
[482, '10:29', '11:23', 629, 683, 54],
[483, '10:29', '11:56', 629, 716, 87],
[484, '10:30', '10:40', 630, 640, 10],
[485, '10:31', '10:43', 631, 643, 12],
[486, '10:33', '11:15', 633, 675, 42],
[487, '10:33', '11:58', 633, 718, 85],
[488, '10:34', '10:45', 634, 645, 11],
[489, '10:35', '11:14', 635, 674, 39],
[490, '10:38', '11:45', 638, 705, 67],
[491, '10:39', '11:33', 639, 693, 54],
[492, '10:40', '10:50', 640, 650, 10],
[493, '10:40', '10:56', 640, 656, 16],
[494, '10:41', '11:23', 641, 683, 42],
[495, '10:41', '11:08', 641, 668, 27],
[496, '10:44', '12:11', 644, 731, 87],
[497, '10:44', '11:35', 644, 695, 51],
[498, '10:44', '10:55', 644, 655, 11],
[499, '10:45', '11:24', 645, 684, 39],
[500, '10:46', '10:58', 646, 658, 12],
[501, '10:48', '12:13', 648, 733, 85],
[502, '10:48', '11:30', 648, 690, 42],
[503, '10:49', '11:43', 649, 703, 54],
[504, '10:50', '11:00', 650, 660, 10],
[505, '10:51', '11:17', 651, 677, 26],
[506, '10:53', '12:00', 653, 720, 67],
[507, '10:53', '11:20', 653, 680, 27],
[508, '10:54', '11:05', 654, 665, 11],
[509, '10:55', '11:34', 655, 694, 39],
[510, '10:56', '11:38', 656, 698, 42],
[511, '10:59', '11:14', 659, 674, 15],
[512, '10:59', '12:26', 659, 746, 87],
[513, '10:59', '11:53', 659, 713, 54],
[514, '10:59', '11:50', 659, 710, 51],
[515, '11:00', '11:16', 660, 676, 16],
[516, '11:00', '11:10', 660, 670, 10],
[517, '11:01', '11:13', 661, 673, 12],
[518, '11:03', '11:45', 663, 705, 42],
[519, '11:03', '12:28', 663, 748, 85],
[520, '11:04', '11:15', 664, 675, 11],
[521, '11:05', '11:44', 665, 704, 39],
[522, '11:08', '12:15', 668, 735, 67],
[523, '11:09', '12:03', 669, 723, 54],
[524, '11:10', '11:20', 670, 680, 10],
[525, '11:11', '11:38', 671, 698, 27],
[526, '11:11', '11:53', 671, 713, 42],
[527, '11:14', '11:25', 674, 685, 11],
[528, '11:14', '12:05', 674, 725, 51],
[529, '11:14', '12:38', 674, 758, 84],
[530, '11:14', '12:41', 674, 761, 87],
[531, '11:15', '11:54', 675, 714, 39],
[532, '11:16', '11:28', 676, 688, 12],
[533, '11:18', '12:00', 678, 720, 42],
[534, '11:19', '12:13', 679, 733, 54],
[535, '11:20', '11:30', 680, 690, 10],
[536, '11:20', '11:36', 680, 696, 16],
[537, '11:21', '11:47', 681, 707, 26],
[538, '11:23', '12:30', 683, 750, 67],
[539, '11:23', '11:49', 683, 709, 26],
[540, '11:24', '12:48', 684, 768, 84],
[541, '11:24', '11:35', 684, 695, 11],
[542, '11:25', '12:04', 685, 724, 39],
[543, '11:26', '12:08', 686, 728, 42],
[544, '11:29', '11:44', 689, 704, 15],
[545, '11:29', '12:23', 689, 743, 54],
[546, '11:29', '12:20', 689, 740, 51],
[547, '11:29', '12:54', 689, 774, 85],
[548, '11:30', '11:40', 690, 700, 10],
[549, '11:31', '11:43', 691, 703, 12],
[550, '11:33', '12:15', 693, 735, 42],
[551, '11:34', '12:58', 694, 778, 84],
[552, '11:34', '11:45', 694, 705, 11],
[553, '11:35', '12:14', 695, 734, 39],
[554, '11:38', '12:45', 698, 765, 67],
[555, '11:39', '12:33', 699, 753, 54],
[556, '11:40', '11:56', 700, 716, 16],
[557, '11:40', '11:50', 700, 710, 10],
[558, '11:41', '12:08', 701, 728, 27],
[559, '11:41', '12:23', 701, 743, 42],
[560, '11:44', '11:55', 704, 715, 11],
[561, '11:44', '13:14', 704, 794, 90],
[562, '11:44', '13:08', 704, 788, 84],
[563, '11:44', '12:35', 704, 755, 51],
[564, '11:45', '12:24', 705, 744, 39],
[565, '11:46', '11:58', 706, 718, 12],
[566, '11:48', '12:30', 708, 750, 42],
[567, '11:49', '12:43', 709, 763, 54],
[568, '11:50', '12:00', 710, 720, 10],
[569, '11:51', '12:17', 711, 737, 26],
[570, '11:53', '12:49', 713, 769, 56],
[571, '11:53', '13:00', 713, 780, 67],
[572, '11:54', '13:18', 714, 798, 84],
[573, '11:54', '12:05', 714, 725, 11],
[574, '11:55', '12:40', 715, 760, 45],
[575, '11:55', '12:34', 715, 754, 39],
[576, '11:56', '12:35', 716, 755, 39],
[577, '11:57', '12:20', 717, 740, 23],
[578, '11:58', '12:29', 718, 749, 31],
[579, '11:59', '12:50', 719, 770, 51],
[580, '11:59', '12:53', 719, 773, 54],
[581, '11:59', '13:24', 719, 804, 85],
[582, '11:59', '12:14', 719, 734, 15],
[583, '12:00', '12:16', 720, 736, 16],
[584, '12:00', '12:10', 720, 730, 10],
[585, '12:01', '12:45', 721, 765, 44],
[586, '12:01', '12:13', 721, 733, 12],
[587, '12:03', '12:50', 723, 770, 47],
[588, '12:04', '12:15', 724, 735, 11],
[589, '12:04', '13:04', 724, 784, 60],
[590, '12:04', '13:28', 724, 808, 84],
[591, '12:05', '12:44', 725, 764, 39],
[592, '12:08', '13:11', 728, 791, 63],
[593, '12:08', '12:39', 728, 759, 31],
[594, '12:09', '13:03', 729, 783, 54],
[595, '12:10', '12:20', 730, 740, 10],
[596, '12:11', '12:55', 731, 775, 44],
[597, '12:11', '12:38', 731, 758, 27],
[598, '12:14', '13:05', 734, 785, 51],
[599, '12:14', '12:25', 734, 745, 11],
[600, '12:14', '13:44', 734, 824, 90],
[601, '12:14', '13:38', 734, 818, 84],
[602, '12:15', '12:54', 735, 774, 39],
[603, '12:16', '12:28', 736, 748, 12],
[604, '12:18', '13:00', 738, 780, 42],
[605, '12:19', '13:13', 739, 793, 54],
[606, '12:20', '12:30', 740, 750, 10],
[607, '12:20', '13:31', 740, 811, 71],
[608, '12:20', '12:30', 740, 750, 10],
[609, '12:20', '12:36', 740, 756, 16],
[610, '12:21', '12:47', 741, 767, 26],
[611, '12:23', '12:45', 743, 765, 22],
[612, '12:24', '12:35', 744, 755, 11],
[613, '12:24', '13:48', 744, 828, 84],
[614, '12:25', '13:10', 745, 790, 45],
[615, '12:25', '13:04', 745, 784, 39],
[616, '12:26', '13:05', 746, 785, 39],
[617, '12:28', '13:54', 748, 834, 86],
[618, '12:28', '12:38', 748, 758, 10],
[619, '12:28', '13:15', 748, 795, 47],
[620, '12:29', '13:23', 749, 803, 54],
[621, '12:30', '13:41', 750, 821, 71],
[622, '12:30', '12:40', 750, 760, 10],
[623, '12:31', '13:15', 751, 795, 44],
[624, '12:31', '12:43', 751, 763, 12],
[625, '12:33', '12:48', 753, 768, 15],
[626, '12:33', '13:20', 753, 800, 47],
[627, '12:34', '13:58', 754, 838, 84],
[628, '12:34', '13:34', 754, 814, 60],
[629, '12:34', '12:45', 754, 765, 11],
[630, '12:35', '13:14', 755, 794, 39],
[631, '12:38', '13:25', 758, 805, 47],
[632, '12:38', '13:25', 758, 805, 47],
[633, '12:38', '14:04', 758, 844, 86],
[634, '12:39', '13:33', 759, 813, 54],
[635, '12:40', '13:51', 760, 831, 71],
[636, '12:40', '12:50', 760, 770, 10],
[637, '12:40', '12:56', 760, 776, 16],
[638, '12:41', '13:08', 761, 788, 27],
[639, '12:43', '13:30', 763, 810, 47],
[640, '12:44', '12:55', 764, 775, 11],
[641, '12:44', '14:08', 764, 848, 84],
[642, '12:45', '13:24', 765, 804, 39],
[643, '12:46', '12:58', 766, 778, 12],
[644, '12:46', '13:21', 766, 801, 35],
[645, '12:48', '14:14', 768, 854, 86],
[646, '12:48', '13:35', 768, 815, 47],
[647, '12:48', '12:58', 768, 778, 10],
[648, '12:48', '13:35', 768, 815, 47],
[649, '12:49', '13:43', 769, 823, 54],
[650, '12:50', '14:01', 770, 841, 71],
[651, '12:50', '13:00', 770, 780, 10],
[652, '12:50', '13:00', 770, 780, 10],
[653, '12:51', '13:17', 771, 797, 26],
[654, '12:53', '13:20', 773, 800, 27],
[655, '12:53', '13:24', 773, 804, 31],
[656, '12:53', '13:40', 773, 820, 47],
[657, '12:54', '14:18', 774, 858, 84],
[658, '12:54', '13:05', 774, 785, 11],
[659, '12:55', '13:34', 775, 814, 39],
[660, '12:58', '14:24', 778, 864, 86],
[661, '12:58', '13:25', 778, 805, 27],
[662, '12:58', '13:45', 778, 825, 47],
[663, '12:58', '13:45', 778, 825, 47],
[664, '12:59', '13:53', 779, 833, 54],
[665, '13:00', '13:10', 780, 790, 10],
[666, '13:00', '13:16', 780, 796, 16],
[667, '13:00', '14:11', 780, 851, 71],
[668, '13:01', '13:13', 781, 793, 12],
[669, '13:03', '13:34', 783, 814, 31],
[670, '13:03', '13:50', 783, 830, 47],
[671, '13:04', '13:15', 784, 795, 11],
[672, '13:04', '14:28', 784, 868, 84],
[673, '13:05', '13:44', 785, 824, 39],
[674, '13:08', '13:55', 788, 835, 47],
[675, '13:08', '14:34', 788, 874, 86],
[676, '13:08', '13:55', 788, 835, 47],
[677, '13:09', '14:03', 789, 843, 54],
[678, '13:10', '13:20', 790, 800, 10],
[679, '13:10', '14:21', 790, 861, 71],
[680, '13:13', '14:00', 793, 840, 47],
[681, '13:13', '13:40', 793, 820, 27],
[682, '13:14', '14:38', 794, 878, 84],
[683, '13:14', '13:25', 794, 805, 11],
[684, '13:15', '13:54', 795, 834, 39],
[685, '13:16', '13:28', 796, 808, 12],
[686, '13:18', '14:05', 798, 845, 47],
[687, '13:18', '14:44', 798, 884, 86],
[688, '13:18', '14:05', 798, 845, 47],
[689, '13:19', '14:13', 799, 853, 54],
[690, '13:20', '13:36', 800, 816, 16],
[691, '13:20', '14:31', 800, 871, 71],
[692, '13:20', '13:30', 800, 810, 10],
[693, '13:21', '13:47', 801, 827, 26],
[694, '13:23', '14:10', 803, 850, 47],
[695, '13:23', '13:49', 803, 829, 26],
[696, '13:24', '14:48', 804, 888, 84],
[697, '13:24', '13:35', 804, 815, 11],
[698, '13:25', '14:04', 805, 844, 39],
[699, '13:28', '14:15', 808, 855, 47],
[700, '13:28', '14:54', 808, 894, 86],
[701, '13:28', '13:55', 808, 835, 27],
[702, '13:28', '14:15', 808, 855, 47],
[703, '13:29', '14:23', 809, 863, 54],
[704, '13:30', '13:40', 810, 820, 10],
[705, '13:30', '14:41', 810, 881, 71],
[706, '13:31', '13:43', 811, 823, 12],
[707, '13:33', '14:20', 813, 860, 47],
[708, '13:34', '14:58', 814, 898, 84],
[709, '13:34', '13:45', 814, 825, 11],
[710, '13:35', '14:14', 815, 854, 39],
[711, '13:38', '14:25', 818, 865, 47],
[712, '13:38', '14:25', 818, 865, 47],
[713, '13:38', '15:04', 818, 904, 86],
[714, '13:39', '14:33', 819, 873, 54],
[715, '13:40', '13:50', 820, 830, 10],
[716, '13:40', '13:56', 820, 836, 16],
[717, '13:40', '14:51', 820, 891, 71],
[718, '13:43', '14:30', 823, 870, 47],
[719, '13:43', '14:10', 823, 850, 27],
[720, '13:44', '15:09', 824, 909, 85],
[721, '13:44', '13:55', 824, 835, 11],
[722, '13:45', '14:24', 825, 864, 39],
[723, '13:46', '13:58', 826, 838, 12],
[724, '13:48', '14:35', 828, 875, 47],
[725, '13:48', '15:14', 828, 914, 86],
[726, '13:48', '14:35', 828, 875, 47],
[727, '13:49', '14:43', 829, 883, 54],
[728, '13:50', '14:00', 830, 840, 10],
[729, '13:50', '15:01', 830, 901, 71],
[730, '13:51', '14:17', 831, 857, 26],
[731, '13:53', '14:40', 833, 880, 47],
[732, '13:53', '14:49', 833, 889, 56],
[733, '13:54', '14:05', 834, 845, 11],
[734, '13:54', '15:19', 834, 919, 85],
[735, '13:55', '14:34', 835, 874, 39],
[736, '13:57', '14:20', 837, 860, 23],
[737, '13:58', '15:24', 838, 924, 86],
[738, '13:58', '14:45', 838, 885, 47],
[739, '13:58', '14:45', 838, 885, 47],
[740, '13:58', '14:25', 838, 865, 27],
[741, '13:59', '14:53', 839, 893, 54],
[742, '14:00', '14:16', 840, 856, 16],
[743, '14:00', '14:10', 840, 850, 10],
[744, '14:00', '15:11', 840, 911, 71],
[745, '14:01', '14:13', 841, 853, 12],
[746, '14:03', '14:50', 843, 890, 47],
[747, '14:04', '14:15', 844, 855, 11],
[748, '14:04', '15:29', 844, 929, 85],
[749, '14:05', '14:44', 845, 884, 39],
[750, '14:08', '14:55', 848, 895, 47],
[751, '14:08', '14:55', 848, 895, 47],
[752, '14:08', '15:34', 848, 934, 86],
[753, '14:09', '15:03', 849, 903, 54],
[754, '14:10', '15:21', 850, 921, 71],
[755, '14:10', '14:20', 850, 860, 10],
[756, '14:13', '15:00', 853, 900, 47],
[757, '14:13', '14:40', 853, 880, 27],
[758, '14:14', '15:40', 854, 940, 86],
[759, '14:14', '14:25', 854, 865, 11],
[760, '14:15', '14:54', 855, 894, 39],
[761, '14:16', '14:28', 856, 868, 12],
[762, '14:18', '15:05', 858, 905, 47],
[763, '14:18', '15:44', 858, 944, 86],
[764, '14:18', '15:05', 858, 905, 47],
[765, '14:19', '15:13', 859, 913, 54],
[766, '14:20', '15:31', 860, 931, 71],
[767, '14:20', '14:30', 860, 870, 10],
[768, '14:20', '14:36', 860, 876, 16],
[769, '14:21', '14:47', 861, 887, 26],
[770, '14:23', '15:10', 863, 910, 47],
[771, '14:23', '14:45', 863, 885, 22],
[772, '14:24', '15:50', 864, 950, 86],
[773, '14:24', '14:35', 864, 875, 11],
[774, '14:25', '15:02', 865, 902, 37],
[775, '14:26', '14:52', 866, 892, 26],
[776, '14:28', '15:15', 868, 915, 47],
[777, '14:28', '14:55', 868, 895, 27],
[778, '14:28', '15:54', 868, 954, 86],
[779, '14:28', '15:15', 868, 915, 47],
[780, '14:29', '15:23', 869, 923, 54],
[781, '14:30', '15:41', 870, 941, 71],
[782, '14:30', '14:40', 870, 880, 10],
[783, '14:31', '14:43', 871, 883, 12],
[784, '14:33', '15:20', 873, 920, 47],
[785, '14:34', '16:00', 874, 960, 86],
[786, '14:34', '14:45', 874, 885, 11],
[787, '14:35', '15:11', 875, 911, 36],
[788, '14:38', '15:25', 878, 925, 47],
[789, '14:38', '15:25', 878, 925, 47],
[790, '14:38', '16:04', 878, 964, 86],
[791, '14:39', '15:33', 879, 933, 54],
[792, '14:40', '14:50', 880, 890, 10],
[793, '14:40', '15:51', 880, 951, 71],
[794, '14:40', '14:56', 880, 896, 16],
[795, '14:43', '15:30', 883, 930, 47],
[796, '14:43', '15:10', 883, 910, 27],
[797, '14:44', '15:00', 884, 900, 16],
[798, '14:44', '16:10', 884, 970, 86],
[799, '14:45', '15:19', 885, 919, 34],
[800, '14:46', '14:58', 886, 898, 12],
[801, '14:48', '15:35', 888, 935, 47],
[802, '14:48', '15:35', 888, 935, 47],
[803, '14:48', '17:04', 888, 1024, 136],
[804, '14:49', '15:43', 889, 943, 54],
[805, '14:50', '16:01', 890, 961, 71],
[806, '14:50', '15:00', 890, 900, 10],
[807, '14:51', '15:17', 891, 917, 26],
[808, '14:52', '15:27', 892, 927, 35],
[809, '14:52', '15:21', 892, 921, 29],
[810, '14:53', '15:40', 893, 940, 47],
[811, '14:54', '15:08', 894, 908, 14],
[812, '14:54', '16:20', 894, 980, 86],
[813, '14:58', '16:24', 898, 984, 86],
[814, '14:58', '15:45', 898, 945, 47],
[815, '14:58', '15:25', 898, 925, 27],
[816, '14:58', '15:45', 898, 945, 47],
[817, '14:59', '15:53', 899, 953, 54],
[818, '15:00', '15:10', 900, 910, 10],
[819, '15:00', '15:35', 900, 935, 35],
[820, '15:00', '16:11', 900, 971, 71],
[821, '15:00', '15:16', 900, 916, 16],
[822, '15:01', '15:13', 901, 913, 12],
[823, '15:02', '15:16', 902, 916, 14],
[824, '15:03', '15:50', 903, 950, 47],
[825, '15:04', '16:30', 904, 990, 86],
[826, '15:08', '16:34', 908, 994, 86],
[827, '15:08', '15:55', 908, 955, 47],
[828, '15:08', '15:55', 908, 955, 47],
[829, '15:08', '15:45', 908, 945, 37],
[830, '15:09', '16:14', 909, 974, 65],
[831, '15:09', '16:03', 909, 963, 54],
[832, '15:10', '16:21', 910, 981, 71],
[833, '15:10', '15:20', 910, 920, 10],
[834, '15:11', '15:24', 911, 924, 13],
[835, '15:12', '15:36', 912, 936, 24],
[836, '15:13', '16:00', 913, 960, 47],
[837, '15:13', '15:40', 913, 940, 27],
[838, '15:14', '16:40', 914, 1000, 86],
[839, '15:16', '15:28', 916, 928, 12],
[840, '15:16', '15:55', 916, 955, 39],
[841, '15:18', '16:05', 918, 965, 47],
[842, '15:18', '16:44', 918, 1004, 86],
[843, '15:18', '16:05', 918, 965, 47],
[844, '15:19', '16:13', 919, 973, 54],
[845, '15:19', '15:34', 919, 934, 15],
[846, '15:20', '15:30', 920, 930, 10],
[847, '15:20', '16:31', 920, 991, 71],
[848, '15:20', '15:36', 920, 936, 16],
[849, '15:21', '15:47', 921, 947, 26],
[850, '15:21', '16:06', 921, 966, 45],
[851, '15:23', '16:10', 923, 970, 47],
[852, '15:24', '16:50', 924, 1010, 86],
[853, '15:24', '16:05', 924, 965, 41],
[854, '15:27', '15:51', 927, 951, 24],
[855, '15:27', '15:44', 927, 944, 17],
[856, '15:28', '16:15', 928, 975, 47],
[857, '15:28', '16:54', 928, 1014, 86],
[858, '15:28', '16:15', 928, 975, 47],
[859, '15:28', '15:55', 928, 955, 27],
[860, '15:29', '16:23', 929, 983, 54],
[861, '15:30', '16:41', 930, 1001, 71],
[862, '15:30', '15:40', 930, 940, 10],
[863, '15:31', '15:43', 931, 943, 12],
[864, '15:33', '16:20', 933, 980, 47],
[865, '15:34', '17:00', 934, 1020, 86],
[866, '15:34', '16:15', 934, 975, 41],
[867, '15:35', '15:54', 935, 954, 19],
[868, '15:36', '16:21', 936, 981, 45],
[869, '15:38', '16:25', 938, 985, 47],
[870, '15:38', '16:25', 938, 985, 47],
[871, '15:38', '16:39', 938, 999, 61],
[872, '15:39', '16:33', 939, 993, 54],
[873, '15:40', '15:50', 940, 950, 10],
[874, '15:40', '16:51', 940, 1011, 71],
[875, '15:40', '15:56', 940, 956, 16],
[876, '15:43', '16:10', 943, 970, 27],
[877, '15:43', '16:30', 943, 990, 47],
[878, '15:44', '17:10', 944, 1030, 86],
[879, '15:44', '16:25', 944, 985, 41],
[880, '15:45', '16:04', 945, 964, 19],
[881, '15:46', '15:58', 946, 958, 12],
[882, '15:48', '16:35', 948, 995, 47],
[883, '15:48', '16:35', 948, 995, 47],
[884, '15:48', '17:14', 948, 1034, 86],
[885, '15:49', '16:43', 949, 1003, 54],
[886, '15:50', '16:00', 950, 960, 10],
[887, '15:50', '17:01', 950, 1021, 71],
[888, '15:51', '16:18', 951, 978, 27],
[889, '15:52', '16:36', 952, 996, 44],
[890, '15:53', '16:40', 953, 1000, 47],
[891, '15:54', '17:20', 954, 1040, 86],
[892, '15:54', '16:35', 954, 995, 41],
[893, '15:55', '16:14', 955, 974, 19],
[894, '15:58', '16:25', 958, 985, 27],
[895, '15:58', '16:45', 958, 1005, 47],
[896, '15:58', '16:45', 958, 1005, 47],
[897, '15:58', '17:24', 958, 1044, 86],
[898, '15:59', '17:11', 959, 1031, 72],
[899, '15:59', '16:53', 959, 1013, 54],
[900, '16:00', '16:10', 960, 970, 10],
[901, '16:00', '16:16', 960, 976, 16],
[902, '16:01', '16:13', 961, 973, 12],
[903, '16:03', '16:50', 963, 1010, 47],
[904, '16:04', '17:30', 964, 1050, 86],
[905, '16:04', '16:45', 964, 1005, 41],
[906, '16:05', '16:24', 965, 984, 19],
[907, '16:06', '16:51', 966, 1011, 45],
[908, '16:08', '16:55', 968, 1015, 47],
[909, '16:08', '17:34', 968, 1054, 86],
[910, '16:08', '16:55', 968, 1015, 47],
[911, '16:09', '17:03', 969, 1023, 54],
[912, '16:09', '17:21', 969, 1041, 72],
[913, '16:10', '16:20', 970, 980, 10],
[914, '16:13', '16:40', 973, 1000, 27],
[915, '16:13', '17:00', 973, 1020, 47],
[916, '16:14', '16:55', 974, 1015, 41],
[917, '16:14', '17:40', 974, 1060, 86],
[918, '16:15', '16:34', 975, 994, 19],
[919, '16:16', '16:28', 976, 988, 12],
[920, '16:18', '17:05', 978, 1025, 47],
[921, '16:18', '17:05', 978, 1025, 47],
[922, '16:18', '17:44', 978, 1064, 86],
[923, '16:19', '17:31', 979, 1051, 72],
[924, '16:19', '17:13', 979, 1033, 54],
[925, '16:20', '16:30', 980, 990, 10],
[926, '16:20', '16:36', 980, 996, 16],
[927, '16:21', '16:48', 981, 1008, 27],
[928, '16:22', '17:06', 982, 1026, 44],
[929, '16:23', '17:10', 983, 1030, 47],
[930, '16:24', '17:05', 984, 1025, 41],
[931, '16:24', '17:50', 984, 1070, 86],
[932, '16:25', '16:44', 985, 1004, 19],
[933, '16:28', '17:15', 988, 1035, 47],
[934, '16:28', '17:15', 988, 1035, 47],
[935, '16:28', '16:55', 988, 1015, 27],
[936, '16:28', '17:54', 988, 1074, 86],
[937, '16:29', '17:23', 989, 1043, 54],
[938, '16:29', '17:41', 989, 1061, 72],
[939, '16:30', '16:40', 990, 1000, 10],
[940, '16:31', '16:43', 991, 1003, 12],
[941, '16:33', '17:20', 993, 1040, 47],
[942, '16:34', '17:15', 994, 1035, 41],
[943, '16:34', '18:00', 994, 1080, 86],
[944, '16:35', '16:54', 995, 1014, 19],
[945, '16:36', '17:21', 996, 1041, 45],
[946, '16:38', '17:25', 998, 1045, 47],
[947, '16:38', '17:25', 998, 1045, 47],
[948, '16:38', '18:04', 998, 1084, 86],
[949, '16:39', '17:33', 999, 1053, 54],
[950, '16:39', '17:51', 999, 1071, 72],
[951, '16:40', '16:56', 1000, 1016, 16],
[952, '16:40', '16:50', 1000, 1010, 10],
[953, '16:43', '17:10', 1003, 1030, 27],
[954, '16:43', '17:30', 1003, 1050, 47],
[955, '16:44', '17:25', 1004, 1045, 41],
[956, '16:44', '18:10', 1004, 1090, 86],
[957, '16:45', '17:04', 1005, 1024, 19],
[958, '16:46', '16:58', 1006, 1018, 12],
[959, '16:48', '18:14', 1008, 1094, 86],
[960, '16:48', '17:35', 1008, 1055, 47],
[961, '16:48', '17:35', 1008, 1055, 47],
[962, '16:49', '18:01', 1009, 1081, 72],
[963, '16:49', '17:43', 1009, 1063, 54],
[964, '16:50', '17:00', 1010, 1020, 10],
[965, '16:51', '17:18', 1011, 1038, 27],
[966, '16:52', '17:36', 1012, 1056, 44],
[967, '16:53', '17:40', 1013, 1060, 47],
[968, '16:54', '18:20', 1014, 1100, 86],
[969, '16:54', '17:35', 1014, 1055, 41],
[970, '16:55', '17:14', 1015, 1034, 19],
[971, '16:58', '17:25', 1018, 1045, 27],
[972, '16:58', '17:45', 1018, 1065, 47],
[973, '16:58', '17:45', 1018, 1065, 47],
[974, '16:58', '18:24', 1018, 1104, 86],
[975, '16:59', '18:11', 1019, 1091, 72],
[976, '16:59', '17:53', 1019, 1073, 54],
[977, '17:00', '17:16', 1020, 1036, 16],
[978, '17:00', '17:10', 1020, 1030, 10],
[979, '17:01', '17:13', 1021, 1033, 12],
[980, '17:03', '17:50', 1023, 1070, 47],
[981, '17:04', '18:30', 1024, 1110, 86],
[982, '17:04', '17:45', 1024, 1065, 41],
[983, '17:05', '17:24', 1025, 1044, 19],
[984, '17:06', '17:51', 1026, 1071, 45],
[985, '17:08', '17:55', 1028, 1075, 47],
[986, '17:08', '17:55', 1028, 1075, 47],
[987, '17:08', '18:34', 1028, 1114, 86],
[988, '17:09', '18:03', 1029, 1083, 54],
[989, '17:09', '18:21', 1029, 1101, 72],
[990, '17:10', '17:20', 1030, 1040, 10],
[991, '17:13', '17:40', 1033, 1060, 27],
[992, '17:13', '18:00', 1033, 1080, 47],
[993, '17:14', '17:55', 1034, 1075, 41],
[994, '17:14', '18:40', 1034, 1120, 86],
[995, '17:15', '17:34', 1035, 1054, 19],
[996, '17:16', '17:28', 1036, 1048, 12],
[997, '17:18', '18:05', 1038, 1085, 47],
[998, '17:18', '18:05', 1038, 1085, 47],
[999, '17:18', '18:44', 1038, 1124, 86],
[1000, '17:19', '18:31', 1039, 1111, 72],
[1001, '17:19', '18:13', 1039, 1093, 54],
[1002, '17:20', '17:36', 1040, 1056, 16],
[1003, '17:20', '17:30', 1040, 1050, 10],
[1004, '17:21', '17:47', 1041, 1067, 26],
[1005, '17:22', '18:06', 1042, 1086, 44],
[1006, '17:23', '18:10', 1043, 1090, 47],
[1007, '17:24', '18:50', 1044, 1130, 86],
[1008, '17:24', '18:05', 1044, 1085, 41],
[1009, '17:25', '17:44', 1045, 1064, 19],
[1010, '17:28', '17:55', 1048, 1075, 27],
[1011, '17:28', '18:15', 1048, 1095, 47],
[1012, '17:28', '18:15', 1048, 1095, 47],
[1013, '17:28', '18:54', 1048, 1134, 86],
[1014, '17:29', '18:41', 1049, 1121, 72],
[1015, '17:29', '18:23', 1049, 1103, 54],
[1016, '17:30', '17:40', 1050, 1060, 10],
[1017, '17:31', '17:43', 1051, 1063, 12],
[1018, '17:33', '18:20', 1053, 1100, 47],
[1019, '17:34', '18:15', 1054, 1095, 41],
[1020, '17:34', '19:00', 1054, 1140, 86],
[1021, '17:35', '17:54', 1055, 1074, 19],
[1022, '17:36', '18:21', 1056, 1101, 45],
[1023, '17:38', '18:25', 1058, 1105, 47],
[1024, '17:38', '19:04', 1058, 1144, 86],
[1025, '17:38', '18:25', 1058, 1105, 47],
[1026, '17:39', '18:51', 1059, 1131, 72],
[1027, '17:39', '18:33', 1059, 1113, 54],
[1028, '17:40', '17:56', 1060, 1076, 16],
[1029, '17:40', '17:50', 1060, 1070, 10],
[1030, '17:43', '18:10', 1063, 1090, 27],
[1031, '17:43', '18:30', 1063, 1110, 47],
[1032, '17:44', '18:25', 1064, 1105, 41],
[1033, '17:44', '19:14', 1064, 1154, 90],
[1034, '17:45', '18:04', 1065, 1084, 19],
[1035, '17:46', '17:58', 1066, 1078, 12],
[1036, '17:48', '18:35', 1068, 1115, 47],
[1037, '17:48', '18:35', 1068, 1115, 47],
[1038, '17:48', '19:14', 1068, 1154, 86],
[1039, '17:49', '19:01', 1069, 1141, 72],
[1040, '17:49', '18:43', 1069, 1123, 54],
[1041, '17:50', '18:00', 1070, 1080, 10],
[1042, '17:51', '18:17', 1071, 1097, 26],
[1043, '17:52', '18:36', 1072, 1116, 44],
[1044, '17:53', '18:40', 1073, 1120, 47],
[1045, '17:54', '18:35', 1074, 1115, 41],
[1046, '17:54', '18:57', 1074, 1137, 63],
[1047, '17:55', '18:14', 1075, 1094, 19],
[1048, '17:58', '18:45', 1078, 1125, 47],
[1049, '17:58', '18:45', 1078, 1125, 47],
[1050, '17:58', '18:25', 1078, 1105, 27],
[1051, '17:58', '19:26', 1078, 1166, 88],
[1052, '17:59', '18:53', 1079, 1133, 54],
[1053, '18:00', '19:11', 1080, 1151, 71],
[1054, '18:00', '18:10', 1080, 1090, 10],
[1055, '18:00', '18:16', 1080, 1096, 16],
[1056, '18:01', '18:13', 1081, 1093, 12],
[1057, '18:03', '18:50', 1083, 1130, 47],
[1058, '18:04', '18:45', 1084, 1125, 41],
[1059, '18:04', '19:29', 1084, 1169, 85],
[1060, '18:05', '18:24', 1085, 1104, 19],
[1061, '18:06', '18:51', 1086, 1131, 45],
[1062, '18:08', '18:55', 1088, 1135, 47],
[1063, '18:08', '19:06', 1088, 1146, 58],
[1064, '18:08', '18:55', 1088, 1135, 47],
[1065, '18:09', '19:03', 1089, 1143, 54],
[1066, '18:10', '18:20', 1090, 1100, 10],
[1067, '18:10', '19:21', 1090, 1161, 71],
[1068, '18:13', '19:00', 1093, 1140, 47],
[1069, '18:13', '18:40', 1093, 1120, 27],
[1070, '18:14', '19:43', 1094, 1183, 89],
[1071, '18:14', '18:55', 1094, 1135, 41],
[1072, '18:15', '18:34', 1095, 1114, 19],
[1073, '18:16', '18:28', 1096, 1108, 12],
[1074, '18:17', '18:27', 1097, 1107, 10],
[1075, '18:18', '19:41', 1098, 1181, 83],
[1076, '18:18', '18:58', 1098, 1138, 40],
[1077, '18:18', '19:05', 1098, 1145, 47],
[1078, '18:19', '19:13', 1099, 1153, 54],
[1079, '18:20', '19:31', 1100, 1171, 71],
[1080, '18:20', '18:36', 1100, 1116, 16],
[1081, '18:20', '18:30', 1100, 1110, 10],
[1082, '18:22', '19:05', 1102, 1145, 43],
[1083, '18:23', '19:05', 1103, 1145, 42],
[1084, '18:24', '19:27', 1104, 1167, 63],
[1085, '18:24', '19:05', 1104, 1145, 41],
[1086, '18:25', '18:44', 1105, 1124, 19],
[1087, '18:28', '19:25', 1108, 1165, 57],
[1088, '18:28', '18:55', 1108, 1135, 27],
[1089, '18:28', '19:08', 1108, 1148, 40],
[1090, '18:28', '19:15', 1108, 1155, 47],
[1091, '18:29', '19:23', 1109, 1163, 54],
[1092, '18:30', '19:05', 1110, 1145, 35],
[1093, '18:30', '18:40', 1110, 1120, 10],
[1094, '18:31', '18:43', 1111, 1123, 12],
[1095, '18:33', '19:15', 1113, 1155, 42],
[1096, '18:34', '19:58', 1114, 1198, 84],
[1097, '18:34', '19:14', 1114, 1154, 40],
[1098, '18:35', '18:55', 1115, 1135, 20],
[1099, '18:36', '19:20', 1116, 1160, 44],
[1100, '18:38', '19:25', 1118, 1165, 47],
[1101, '18:38', '19:23', 1118, 1163, 45],
[1102, '18:38', '19:56', 1118, 1196, 78],
[1103, '18:39', '19:33', 1119, 1173, 54],
[1104, '18:40', '18:50', 1120, 1130, 10],
[1105, '18:40', '19:45', 1120, 1185, 65],
[1106, '18:40', '18:56', 1120, 1136, 16],
[1107, '18:43', '19:10', 1123, 1150, 27],
[1108, '18:43', '19:30', 1123, 1170, 47],
[1109, '18:44', '19:24', 1124, 1164, 40],
[1110, '18:45', '19:05', 1125, 1145, 20],
[1111, '18:46', '18:58', 1126, 1138, 12],
[1112, '18:48', '19:35', 1128, 1175, 47],
[1113, '18:48', '20:12', 1128, 1212, 84],
[1114, '18:48', '20:11', 1128, 1211, 83],
[1115, '18:48', '19:28', 1128, 1168, 40],
[1116, '18:49', '19:43', 1129, 1183, 54],
[1117, '18:50', '19:00', 1130, 1140, 10],
[1118, '18:51', '19:01', 1131, 1141, 10],
[1119, '18:53', '19:35', 1133, 1175, 42],
[1120, '18:53', '19:15', 1133, 1155, 22],
[1121, '18:53', '20:00', 1133, 1200, 67],
[1122, '18:55', '19:15', 1135, 1155, 20],
[1123, '18:55', '19:34', 1135, 1174, 39],
[1124, '18:58', '19:38', 1138, 1178, 40],
[1125, '18:59', '19:53', 1139, 1193, 54],
[1126, '18:59', '19:50', 1139, 1190, 51],
[1127, '18:59', '19:53', 1139, 1193, 54],
[1128, '19:00', '19:16', 1140, 1156, 16],
[1129, '19:00', '19:10', 1140, 1150, 10],
[1130, '19:00', '19:16', 1140, 1156, 16],
[1131, '19:01', '19:13', 1141, 1153, 12],
[1132, '19:03', '20:26', 1143, 1226, 83],
[1133, '19:03', '19:45', 1143, 1185, 42],
[1134, '19:05', '19:44', 1145, 1184, 39],
[1135, '19:05', '19:25', 1145, 1165, 20],
[1136, '19:08', '20:15', 1148, 1215, 67],
[1137, '19:08', '19:35', 1148, 1175, 27],
[1138, '19:09', '19:49', 1149, 1189, 40],
[1139, '19:09', '20:03', 1149, 1203, 54],
[1140, '19:10', '19:20', 1150, 1160, 10],
[1141, '19:10', '19:20', 1150, 1160, 10],
[1142, '19:11', '19:53', 1151, 1193, 42],
[1143, '19:14', '20:26', 1154, 1226, 72],
[1144, '19:14', '19:35', 1154, 1175, 21],
[1145, '19:14', '19:24', 1154, 1164, 10],
[1146, '19:14', '20:05', 1154, 1205, 51],
[1147, '19:15', '19:30', 1155, 1170, 15],
[1148, '19:15', '19:54', 1155, 1194, 39],
[1149, '19:18', '20:39', 1158, 1239, 81],
[1150, '19:18', '20:00', 1158, 1200, 42],
[1151, '19:19', '20:14', 1159, 1214, 55],
[1152, '19:20', '19:30', 1160, 1170, 10],
[1153, '19:20', '19:36', 1160, 1176, 16],
[1154, '19:21', '19:31', 1161, 1171, 10],
[1155, '19:23', '20:30', 1163, 1230, 67],
[1156, '19:23', '19:35', 1163, 1175, 12],
[1157, '19:24', '19:45', 1164, 1185, 21],
[1158, '19:24', '19:45', 1164, 1185, 21],
[1159, '19:25', '20:04', 1165, 1204, 39],
[1160, '19:26', '20:08', 1166, 1208, 42],
[1161, '19:29', '20:02', 1169, 1202, 33],
[1162, '19:29', '20:18', 1169, 1218, 49],
[1163, '19:29', '20:41', 1169, 1241, 72],
[1164, '19:30', '19:40', 1170, 1180, 10],
[1165, '19:33', '20:54', 1173, 1254, 81],
[1166, '19:33', '20:17', 1173, 1217, 44],
[1167, '19:34', '19:55', 1174, 1195, 21],
[1168, '19:35', '20:14', 1175, 1214, 39],
[1169, '19:38', '20:05', 1178, 1205, 27],
[1170, '19:38', '20:45', 1178, 1245, 67],
[1171, '19:39', '20:12', 1179, 1212, 33],
[1172, '19:40', '19:50', 1180, 1190, 10],
[1173, '19:40', '19:56', 1180, 1196, 16],
[1174, '19:41', '20:27', 1181, 1227, 46],
[1175, '19:43', '19:55', 1183, 1195, 12],
[1176, '19:44', '20:05', 1184, 1205, 21],
[1177, '19:44', '20:33', 1184, 1233, 49],
[1178, '19:44', '21:00', 1184, 1260, 76],
[1179, '19:45', '20:24', 1185, 1224, 39],
[1180, '19:48', '20:37', 1188, 1237, 49],
[1181, '19:48', '21:09', 1188, 1269, 81],
[1182, '19:50', '20:00', 1190, 1200, 10],
[1183, '19:52', '20:29', 1192, 1229, 37],
[1184, '19:53', '20:08', 1193, 1208, 15],
[1185, '19:53', '21:02', 1193, 1262, 69],
[1186, '19:53', '20:20', 1193, 1220, 27],
[1187, '19:54', '20:19', 1194, 1219, 25],
[1188, '19:55', '20:34', 1195, 1234, 39],
[1189, '19:56', '20:34', 1196, 1234, 38],
[1190, '19:59', '20:48', 1199, 1248, 49],
[1191, '19:59', '21:20', 1199, 1280, 81],
[1192, '20:00', '20:16', 1200, 1216, 16],
[1193, '20:00', '20:10', 1200, 1210, 10],
[1194, '20:03', '20:42', 1203, 1242, 39],
[1195, '20:03', '21:24', 1203, 1284, 81],
[1196, '20:04', '20:29', 1204, 1229, 25],
[1197, '20:05', '20:48', 1205, 1248, 43],
[1198, '20:07', '20:44', 1207, 1244, 37],
[1199, '20:08', '20:40', 1208, 1240, 32],
[1200, '20:08', '20:35', 1208, 1235, 27],
[1201, '20:10', '20:20', 1210, 1220, 10],
[1202, '20:10', '20:22', 1210, 1222, 12],
[1203, '20:11', '20:47', 1211, 1247, 36],
[1204, '20:14', '21:04', 1214, 1264, 50],
[1205, '20:14', '21:03', 1214, 1263, 49],
[1206, '20:17', '21:03', 1217, 1263, 46],
[1207, '20:18', '21:39', 1218, 1299, 81],
[1208, '20:20', '20:30', 1220, 1230, 10],
[1209, '20:20', '20:57', 1220, 1257, 37],
[1210, '20:20', '20:36', 1220, 1236, 16],
[1211, '20:22', '20:59', 1222, 1259, 37],
[1212, '20:22', '20:42', 1222, 1242, 20],
[1213, '20:24', '20:49', 1224, 1249, 25],
[1214, '20:27', '21:22', 1227, 1282, 55],
[1215, '20:29', '21:18', 1229, 1278, 49],
[1216, '20:30', '21:07', 1230, 1267, 37],
[1217, '20:30', '20:40', 1230, 1240, 10],
[1218, '20:30', '20:40', 1230, 1240, 10],
[1219, '20:30', '21:40', 1230, 1300, 70],
[1220, '20:32', '21:18', 1232, 1278, 46],
[1221, '20:35', '21:54', 1235, 1314, 79],
[1222, '20:37', '21:14', 1237, 1274, 37],
[1223, '20:38', '21:08', 1238, 1268, 30],
[1224, '20:40', '20:50', 1240, 1250, 10],
[1225, '20:40', '21:17', 1240, 1277, 37],
[1226, '20:40', '20:56', 1240, 1256, 16],
[1227, '20:44', '21:33', 1244, 1293, 49],
[1228, '20:47', '21:33', 1247, 1293, 46],
[1229, '20:47', '21:42', 1247, 1302, 55],
[1230, '20:50', '21:00', 1250, 1260, 10],
[1231, '20:50', '22:00', 1250, 1320, 70],
[1232, '20:50', '22:09', 1250, 1329, 79],
[1233, '20:50', '21:27', 1250, 1287, 37],
[1234, '20:52', '21:29', 1252, 1289, 37],
[1235, '20:53', '21:20', 1253, 1280, 27],
[1236, '20:56', '21:11', 1256, 1271, 15],
[1237, '20:59', '21:48', 1259, 1308, 49],
[1238, '21:00', '21:10', 1260, 1270, 10],
[1239, '21:00', '21:37', 1260, 1297, 37],
[1240, '21:02', '21:48', 1262, 1308, 46],
[1241, '21:05', '22:24', 1265, 1344, 79],
[1242, '21:07', '21:44', 1267, 1304, 37],
[1243, '21:07', '22:02', 1267, 1322, 55],
[1244, '21:08', '21:38', 1268, 1298, 30],
[1245, '21:10', '22:25', 1270, 1345, 75],
[1246, '21:10', '21:20', 1270, 1280, 10],
[1247, '21:10', '21:47', 1270, 1307, 37],
[1248, '21:14', '22:03', 1274, 1323, 49],
[1249, '21:17', '22:03', 1277, 1323, 46],
[1250, '21:20', '22:18', 1280, 1338, 58],
[1251, '21:20', '21:57', 1280, 1317, 37],
[1252, '21:20', '21:30', 1280, 1290, 10],
[1253, '21:22', '21:59', 1282, 1319, 37],
[1254, '21:24', '21:49', 1284, 1309, 25],
[1255, '21:27', '22:21', 1287, 1341, 54],
[1256, '21:30', '22:07', 1290, 1327, 37],
[1257, '21:30', '22:20', 1290, 1340, 50],
[1258, '21:30', '21:40', 1290, 1300, 10],
[1259, '21:32', '22:18', 1292, 1338, 46],
[1260, '21:32', '22:01', 1292, 1321, 29],
[1261, '21:35', '22:54', 1295, 1374, 79],
[1262, '21:37', '22:14', 1297, 1334, 37],
[1263, '21:39', '21:55', 1299, 1315, 16],
[1264, '21:40', '22:17', 1300, 1337, 37],
[1265, '21:40', '21:50', 1300, 1310, 10],
[1266, '21:41', '22:08', 1301, 1328, 27],
[1267, '21:47', '22:16', 1307, 1336, 29],
[1268, '21:47', '22:51', 1307, 1371, 64],
[1269, '21:47', '22:33', 1307, 1353, 46],
[1270, '21:48', '22:03', 1308, 1323, 15],
[1271, '21:50', '22:55', 1310, 1375, 65],
[1272, '21:50', '22:27', 1310, 1347, 37],
[1273, '21:50', '22:00', 1310, 1320, 10],
[1274, '21:52', '22:29', 1312, 1349, 37],
[1275, '21:53', '22:19', 1313, 1339, 26],
[1276, '22:00', '22:38', 1320, 1358, 38],
[1277, '22:00', '22:10', 1320, 1330, 10],
[1278, '22:02', '22:12', 1322, 1332, 10],
[1279, '22:02', '22:48', 1322, 1368, 46],
[1280, '22:04', '22:31', 1324, 1351, 27],
[1281, '22:05', '23:24', 1325, 1404, 79],
[1282, '22:07', '22:44', 1327, 1364, 37],
[1283, '22:07', '22:39', 1327, 1359, 32],
[1284, '22:09', '22:25', 1329, 1345, 16],
[1285, '22:10', '23:25', 1330, 1405, 75],
[1286, '22:13', '22:38', 1333, 1358, 25],
[1287, '22:13', '22:53', 1333, 1373, 40],
[1288, '22:17', '22:27', 1337, 1347, 10],
[1289, '22:17', '23:03', 1337, 1383, 46],
[1290, '22:19', '22:46', 1339, 1366, 27],
[1291, '22:22', '22:59', 1342, 1379, 37],
[1292, '22:24', '22:48', 1344, 1368, 24],
[1293, '22:27', '22:52', 1347, 1372, 25],
[1294, '22:27', '23:21', 1347, 1401, 54],
[1295, '22:28', '23:08', 1348, 1388, 40],
[1296, '22:30', '23:17', 1350, 1397, 47],
[1297, '22:32', '22:42', 1352, 1362, 10],
[1298, '22:32', '23:11', 1352, 1391, 39],
[1299, '22:34', '23:01', 1354, 1381, 27],
[1300, '22:35', '23:54', 1355, 1434, 79],
[1301, '22:37', '23:14', 1357, 1394, 37],
[1302, '22:43', '23:23', 1363, 1403, 40],
[1303, '22:43', '23:08', 1363, 1388, 25],
[1304, '22:47', '23:33', 1367, 1413, 46],
[1305, '22:47', '22:57', 1367, 1377, 10],
[1306, '22:49', '23:16', 1369, 1396, 27],
[1307, '22:52', '23:29', 1372, 1409, 37],
[1308, '22:53', '23:15', 1373, 1395, 22],
[1309, '22:55', '23:55', 1375, 1435, 60],
[1310, '22:57', '23:51', 1377, 1431, 54],
[1311, '22:58', '23:38', 1378, 1418, 40],
[1312, '23:02', '23:41', 1382, 1421, 39],
[1313, '23:02', '23:12', 1382, 1392, 10],
[1314, '23:04', '23:31', 1384, 1411, 27],
[1315, '23:05', '00:24', 1385, 1464, 79],
[1316, '23:07', '23:44', 1387, 1424, 37],
[1317, '23:13', '23:53', 1393, 1433, 40],
[1318, '23:13', '23:38', 1393, 1418, 25],
[1319, '23:17', '00:03', 1397, 1443, 46],
[1320, '23:17', '23:27', 1397, 1407, 10],
[1321, '23:19', '23:46', 1399, 1426, 27],
[1322, '23:22', '23:59', 1402, 1439, 37],
[1323, '23:25', '00:25', 1405, 1465, 60],
[1324, '23:27', '00:21', 1407, 1461, 54],
[1325, '23:28', '00:08', 1408, 1448, 40],
[1326, '23:32', '23:42', 1412, 1422, 10],
[1327, '23:34', '00:01', 1414, 1441, 27],
[1328, '23:35', '01:05', 1415, 1505, 90],
[1329, '23:37', '00:09', 1417, 1449, 32],
[1330, '23:43', '00:23', 1423, 1463, 40],
[1331, '23:43', '00:08', 1423, 1448, 25],
[1332, '23:46', '00:01', 1426, 1441, 15],
[1333, '23:47', '23:57', 1427, 1437, 10],
[1334, '23:47', '00:33', 1427, 1473, 46],
[1335, '23:52', '00:24', 1432, 1464, 32],
[1336, '23:55', '00:49', 1435, 1489, 54],
[1337, '23:57', '00:57', 1437, 1497, 60],
[1338, '23:58', '00:38', 1438, 1478, 40],
[1339, '00:02', '00:12', 1442, 1452, 10],
[1340, '00:07', '00:39', 1447, 1479, 32],
[1341, '00:13', '00:38', 1453, 1478, 25],
[1342, '00:13', '00:51', 1453, 1491, 38],
[1343, '00:15', '01:14', 1455, 1514, 59],
[1344, '00:17', '01:23', 1457, 1523, 66],
[1345, '00:23', '00:33', 1463, 1473, 10],
[1346, '00:24', '00:40', 1464, 1480, 16],
[1347, '00:25', '01:12', 1465, 1512, 47],
[1348, '00:28', '01:07', 1468, 1507, 39],
[1349, '00:33', '01:05', 1473, 1505, 32],
[1350, '00:43', '01:21', 1483, 1521, 38],
[1351, '00:44', '00:54', 1484, 1494, 10],
[1352, '00:47', '01:09', 1487, 1509, 22],
[1353, '00:47', '01:26', 1487, 1526, 39],
[1354, '00:54', '01:04', 1494, 1504, 10],
[1355, '00:57', '01:07', 1497, 1507, 10]
] # yapf:disable
def find_minimum_number_of_drivers(shifts, params):
"""Minimize the number of needed drivers."""
num_shifts = len(shifts)
# All durations are in minutes.
max_driving_time = 540 # 8 hours.
max_driving_time_without_pauses = 240 # 4 hours
min_pause_after_4h = 30
min_delay_between_shifts = 2
max_working_time = 720
min_working_time = 390 # 6.5 hours
extra_time = 10 + 25
# Computed data.
total_driving_time = sum(shift[5] for shift in shifts)
min_num_drivers = int(
math.ceil(total_driving_time * 1.0 / max_driving_time))
min_start_time = min(shift[3] for shift in shifts)
max_end_time = max(shift[4] for shift in shifts)
print('Bus driver scheduling')
print(' num shifts =', num_shifts)
print(' total driving time =', total_driving_time, 'minutes')
print(' min num drivers =', min_num_drivers)
print(' min start time =', min_start_time)
print(' max end time =', max_end_time)
# We are going to build a flow from a the start of the day to the end
# of the day.
#
# Along the path, we will accumulate driving time, accrued time since the
# last break, and total working time.
model = cp_model.CpModel()
# Per node info
driving_time = {}
working_time = {}
no_break_driving_time = {}
incoming_literals = collections.defaultdict(list)
outgoing_literals = collections.defaultdict(list)
outgoing_source_literals = []
incoming_sink_literals = []
# Create all the shift variables before iterating on the transitions
# between these shifts.
for shift in range(num_shifts):
driving_time[shift] = model.NewIntVar(0, max_driving_time, 'dt_%i' % shift)
no_break_driving_time[shift] = model.NewIntVar(
0, max_driving_time_without_pauses, 'nbdt_%i' % shift)
working_time[shift] = model.NewIntVar(
0, max_working_time, 'wt_%i' % shift)
for shift in range(num_shifts):
duration = shifts[shift][5]
# Arc from source to shift.
# - set the working time of the driver
# - increase driving time and driving time since the last break
source_lit = model.NewBoolVar('from source to %i' % shift)
outgoing_source_literals.append(source_lit)
incoming_literals[shift].append(source_lit)
model.Add(driving_time[shift] == duration).OnlyEnforceIf(source_lit)
model.Add(no_break_driving_time[shift] == duration).OnlyEnforceIf(
source_lit)
model.Add(working_time[shift] == duration + extra_time).OnlyEnforceIf(
source_lit)
# Arc from shift to sink
# - checks that working time is greater than min_working_time
sink_lit = model.NewBoolVar('from %i to sink' % shift)
outgoing_literals[shift].append(sink_lit)
incoming_sink_literals.append(sink_lit)
#model.Add(working_time[shift] >= min_working_time).OnlyEnforceIf(sink_lit)
#model.Add(working_time[shift] < min_working_time).OnlyEnforceIf(sink_lit.Not())
for other in range(num_shifts):
delay = shifts[other][3] - shifts[shift][4]
if delay < min_delay_between_shifts:
continue
other_duration = shifts[other][5]
lit = model.NewBoolVar('from %i to %i' % (shift, other))
# Increase driving time
model.Add(driving_time[other] ==
driving_time[shift] + other_duration).OnlyEnforceIf(lit)
# Increase no_break_driving or reset it to 0 depending on the delay
if delay >= min_pause_after_4h:
model.Add(no_break_driving_time[other] ==
other_duration).OnlyEnforceIf(lit)
else:
model.Add(
no_break_driving_time[other] ==
no_break_driving_time[shift] + other_duration).OnlyEnforceIf(lit)
# Increase working time
model.Add(working_time[other] == working_time[shift] + delay +
other_duration).OnlyEnforceIf(lit)
# Add arc
outgoing_literals[shift].append(lit)
incoming_literals[other].append(lit)
# Create dag constraint.
for shift in range(num_shifts):
model.Add(sum(outgoing_literals[shift]) == 1)
model.Add(sum(incoming_literals[shift]) == 1)
# Num drivers
num_drivers = model.NewIntVar(min_num_drivers, min_num_drivers * 3, 'num_drivers')
model.Add(sum(incoming_sink_literals) == num_drivers)
model.Add(sum(outgoing_source_literals) == num_drivers)
model.Minimize(num_drivers)
# Solve model.
solver = cp_model.CpSolver()
solver.parameters.log_search_progress = True
solver.parameters.num_search_workers = 8
status = solver.Solve(model)
if status != cp_model.OPTIMAL and status != cp_model.FEASIBLE:
return -1
# Display solution
optimal_num_drivers = int(solver.ObjectiveValue())
print('minimal number of drivers =', optimal_num_drivers)
return optimal_num_drivers
def main(args):
"""Optimize the bus driver allocation in two passes."""
print('----------- first pass: minimize the number of drivers')
shifts = []
if args.instance == 1:
shifts = SAMPLE_SHIFTS_SMALL
elif args.instance == 2:
shifts = SAMPLE_SHIFTS_MEDIUM
elif args.instance == 3:
shifts = SAMPLE_SHIFTS_LARGE
num_drivers = find_minimum_number_of_drivers(shifts, args.params)
print('----------- second pass: minimize the sum of working times')
#bus_driver_scheduling(False, num_drivers)
if __name__ == '__main__':
main(PARSER.parse_args())
| 42.768427
| 88
| 0.4722
|
42c017f2704ef055179c438cf9da10b67cb5e942
| 195
|
py
|
Python
|
Thesis@3.9.1/Lib/site-packages/django/contrib/sitemaps/apps.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | null | null | null |
Thesis@3.9.1/Lib/site-packages/django/contrib/sitemaps/apps.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | null | null | null |
Thesis@3.9.1/Lib/site-packages/django/contrib/sitemaps/apps.py
|
nverbois/TFE21-232
|
7113837b5263b5c508bfc6903cb6982b48aa7ee4
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class SiteMapsConfig(AppConfig):
name = "django.contrib.sitemaps"
verbose_name = _("Site Maps")
| 24.375
| 54
| 0.769231
|
011c49aa9a44446c1dea7cd8168289b9f4d4a546
| 495
|
py
|
Python
|
example/ABC119-D.py
|
koba-e964/SortedSet
|
f614979eb8ba3370707b8f599ade5b9fc4c3fe87
|
[
"Unlicense"
] | 43
|
2021-09-05T11:23:23.000Z
|
2022-03-31T04:21:31.000Z
|
example/ABC119-D.py
|
koba-e964/SortedSet
|
f614979eb8ba3370707b8f599ade5b9fc4c3fe87
|
[
"Unlicense"
] | 2
|
2022-03-04T14:10:32.000Z
|
2022-03-07T17:45:28.000Z
|
example/ABC119-D.py
|
koba-e964/SortedSet
|
f614979eb8ba3370707b8f599ade5b9fc4c3fe87
|
[
"Unlicense"
] | 11
|
2022-01-20T13:36:35.000Z
|
2022-03-20T12:22:46.000Z
|
# https://atcoder.jp/contests/abc119/submissions/28441723
# paste SortedSet here
import sys
input = sys.stdin.readline
INF = 1 << 60
A, B, Q = map(int, input().split())
s = SortedSet([-INF] + [int(input()) for i in range(A)] + [INF])
t = SortedSet([-INF] + [int(input()) for i in range(B)] + [INF])
for i in range(Q):
x = int(input())
s0 = x - s.le(x)
s1 = s.ge(x) - x
t0 = x - t.le(x)
t1 = t.ge(x) - x
print(min(max(s0, t0), max(s1, t1), s0 + t1 + min(s0, t1), s1 + t0 + min(s1, t0)))
| 26.052632
| 83
| 0.575758
|
dca99a25d0b354777b6ac1ea3a5b36e54ed762e6
| 15,085
|
py
|
Python
|
concordia/admin/__init__.py
|
Violet26/concordia
|
d126f80808b22e428881fb6c429f0dcb695fc8b2
|
[
"CC0-1.0"
] | 1
|
2020-02-03T22:23:27.000Z
|
2020-02-03T22:23:27.000Z
|
concordia/admin/__init__.py
|
Violet26/concordia
|
d126f80808b22e428881fb6c429f0dcb695fc8b2
|
[
"CC0-1.0"
] | 9
|
2021-03-19T12:43:20.000Z
|
2022-02-10T15:40:48.000Z
|
concordia/admin/__init__.py
|
Violet26/concordia
|
d126f80808b22e428881fb6c429f0dcb695fc8b2
|
[
"CC0-1.0"
] | null | null | null |
from urllib.parse import urljoin
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.decorators import permission_required
from django.contrib.auth.models import User
from django.db.models import Count
from django.shortcuts import get_object_or_404, render
from django.template.defaultfilters import truncatechars
from django.urls import path
from django.utils.decorators import method_decorator
from django.utils.html import format_html
from django_admin_multiple_choice_list_filter.list_filters import (
MultipleChoiceListFilter,
)
from tabular_export.admin import export_to_csv_action, export_to_excel_action
from exporter import views as exporter_views
from importer.tasks import import_items_into_project_from_url
from ..models import (
Asset,
Campaign,
CarouselSlide,
Item,
Project,
Resource,
SimpleContentBlock,
SimplePage,
SiteReport,
Tag,
Topic,
Transcription,
UserAssetTagCollection,
)
from ..views import ReportCampaignView
from .actions import (
anonymize_action,
publish_action,
publish_item_action,
reopen_asset_action,
unpublish_action,
unpublish_item_action,
)
from .filters import AcceptedFilter, RejectedFilter, SubmittedFilter
from .forms import (
AdminItemImportForm,
BleachedDescriptionAdminForm,
SimpleContentBlockAdminForm,
)
class ProjectListFilter(MultipleChoiceListFilter):
title = "Project"
def lookups(self, request, model_admin):
choices = Project.objects.values_list("pk", "title")
return tuple(choices)
class AssetProjectListFilter(ProjectListFilter):
parameter_name = "item__project__in"
class ItemProjectListFilter(ProjectListFilter):
parameter_name = "project__in"
class ConcordiaUserAdmin(UserAdmin):
list_display = UserAdmin.list_display + ("date_joined", "transcription_count")
def get_queryset(self, request):
qs = super().get_queryset(request)
qs = qs.annotate(Count("transcription"))
return qs
def transcription_count(self, obj):
return obj.transcription__count
EXPORT_FIELDS = (
"username",
"email",
"first_name",
"last_name",
"is_active",
"is_staff",
"is_superuser",
"date_joined",
"last_login",
"transcription__count",
)
def export_users_as_csv(self, request, queryset):
return export_to_csv_action(
self, request, queryset, field_names=self.EXPORT_FIELDS
)
def export_users_as_excel(self, request, queryset):
return export_to_excel_action(
self, request, queryset, field_names=self.EXPORT_FIELDS
)
transcription_count.admin_order_field = "transcription__count"
actions = (anonymize_action, export_users_as_csv, export_users_as_excel)
admin.site.unregister(User)
admin.site.register(User, ConcordiaUserAdmin)
class CustomListDisplayFieldsMixin:
"""
Mixin which provides some custom text formatters for list display fields
used on multiple models
"""
def truncated_description(self, obj):
return truncatechars(obj.description, 200)
truncated_description.short_description = "Description"
def truncated_metadata(self, obj):
if obj.metadata:
return format_html("<code>{}</code>", truncatechars(obj.metadata, 200))
else:
return ""
truncated_metadata.allow_tags = True
truncated_metadata.short_description = "Metadata"
@admin.register(Campaign)
class CampaignAdmin(admin.ModelAdmin, CustomListDisplayFieldsMixin):
form = BleachedDescriptionAdminForm
list_display = (
"title",
"short_description",
"published",
"display_on_homepage",
"ordering",
"truncated_metadata",
)
list_editable = ("display_on_homepage", "ordering", "published")
list_display_links = ("title",)
prepopulated_fields = {"slug": ("title",)}
search_fields = ["title", "description"]
list_filter = ("published", "display_on_homepage")
actions = (publish_action, unpublish_action)
def get_urls(self):
urls = super().get_urls()
app_label = self.model._meta.app_label
model_name = self.model._meta.model_name
custom_urls = [
path(
"exportCSV/<path:campaign_slug>",
exporter_views.ExportCampaignToCSV.as_view(),
name=f"{app_label}_{model_name}_export-csv",
),
path(
"exportBagIt/<path:campaign_slug>",
exporter_views.ExportCampaignToBagIt.as_view(),
name=f"{app_label}_{model_name}_export-bagit",
),
path(
"report/<path:campaign_slug>",
ReportCampaignView.as_view(),
name=f"{app_label}_{model_name}_report",
),
]
return custom_urls + urls
@admin.register(Resource)
class ResourceAdmin(admin.ModelAdmin, CustomListDisplayFieldsMixin):
list_display = ("campaign", "topic", "sequence", "title", "resource_url")
list_display_links = ("campaign", "topic", "sequence", "title")
@admin.register(Topic)
class TopicAdmin(admin.ModelAdmin):
form = BleachedDescriptionAdminForm
list_display = ("id", "title", "slug")
list_display_links = ("id", "title", "slug")
prepopulated_fields = {"slug": ("title",)}
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin, CustomListDisplayFieldsMixin):
form = BleachedDescriptionAdminForm
# todo: add foreignKey link for campaign
list_display = ("id", "title", "slug", "campaign", "published")
list_display_links = ("id", "title", "slug")
prepopulated_fields = {"slug": ("title",)}
search_fields = ["title", "campaign__title"]
list_filter = ("published", "topics", "campaign")
actions = (publish_action, unpublish_action)
def get_urls(self):
urls = super().get_urls()
app_label = self.model._meta.app_label
model_name = self.model._meta.model_name
custom_urls = [
path(
"<path:object_id>/item-import/",
self.admin_site.admin_view(self.item_import_view),
name=f"{app_label}_{model_name}_item-import",
)
]
return custom_urls + urls
@method_decorator(permission_required("concordia.add_campaign"))
@method_decorator(permission_required("concordia.change_campaign"))
@method_decorator(permission_required("concordia.add_project"))
@method_decorator(permission_required("concordia.change_project"))
@method_decorator(permission_required("concordia.add_item"))
@method_decorator(permission_required("concordia.change_item"))
def item_import_view(self, request, object_id):
project = get_object_or_404(Project, pk=object_id)
if request.method == "POST":
form = AdminItemImportForm(request.POST)
if form.is_valid():
import_url = form.cleaned_data["import_url"]
import_job = import_items_into_project_from_url(
request.user, project, import_url
)
else:
form = AdminItemImportForm()
import_job = None
media = self.media
context = {
**self.admin_site.each_context(request),
"app_label": self.model._meta.app_label,
"add": False,
"change": False,
"save_as": False,
"save_on_top": False,
"opts": self.model._meta,
"title": f"Import Items into “{project.title}”",
"object_id": object_id,
"original": project,
"media": media,
"preserved_filters": self.get_preserved_filters(request),
"is_popup": False,
"has_view_permission": True,
"has_add_permission": True,
"has_change_permission": True,
"has_delete_permission": False,
"has_editable_inline_admin_formsets": False,
"project": project,
"form": form,
"import_job": import_job,
}
return render(request, "admin/concordia/project/item_import.html", context)
@admin.register(Item)
class ItemAdmin(admin.ModelAdmin):
list_display = ("title", "item_id", "campaign_title", "project", "published")
list_display_links = ("title", "item_id")
search_fields = [
"title",
"item_id",
"item_url",
"project__campaign__title",
"project__title",
]
list_filter = (
"published",
"project__topics",
"project__campaign",
ItemProjectListFilter,
)
actions = (publish_item_action, unpublish_item_action)
def get_queryset(self, request):
qs = super().get_queryset(request)
qs = qs.select_related("project", "project__campaign")
return qs
def campaign_title(self, obj):
return obj.project.campaign.title
@admin.register(Asset)
class AssetAdmin(admin.ModelAdmin, CustomListDisplayFieldsMixin):
list_display = (
"published",
"transcription_status",
"item_id",
"year",
"sequence",
"difficulty",
"truncated_media_url",
"media_type",
"truncated_metadata",
)
list_display_links = ("item_id", "sequence")
prepopulated_fields = {"slug": ("title",)}
search_fields = [
"title",
"media_url",
"item__project__campaign__title",
"item__project__title",
"item__item_id",
]
list_filter = (
"transcription_status",
"published",
"item__project__topics",
"item__project__campaign",
AssetProjectListFilter,
"media_type",
)
actions = (
publish_action,
reopen_asset_action,
unpublish_action,
export_to_csv_action,
export_to_excel_action,
)
autocomplete_fields = ("item",)
ordering = ("item__item_id", "sequence")
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.select_related("item").order_by("item__item_id", "sequence")
def lookup_allowed(self, key, value):
if key in ("item__project__id__exact"):
return True
else:
return super().lookup_allowed(key, value)
def item_id(self, obj):
return obj.item.item_id
def truncated_media_url(self, obj):
return format_html(
'<a target="_blank" href="{}">{}</a>',
urljoin(settings.MEDIA_URL, obj.media_url),
truncatechars(obj.media_url, 100),
)
truncated_media_url.allow_tags = True
truncated_media_url.short_description = "Media URL"
def get_readonly_fields(self, request, obj=None):
if obj:
return self.readonly_fields + ("item",)
return self.readonly_fields
def change_view(self, request, object_id, extra_context=None, **kwargs):
if object_id:
if extra_context is None:
extra_context = {}
extra_context["transcriptions"] = (
Transcription.objects.filter(asset__pk=object_id)
.select_related("user", "reviewed_by")
.order_by("-pk")
)
return super().change_view(
request, object_id, extra_context=extra_context, **kwargs
)
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
list_display = ("id", "value")
list_display_links = ("id", "value")
search_fields = ["value"]
@admin.register(UserAssetTagCollection)
class UserAssetTagCollectionAdmin(admin.ModelAdmin):
list_display = ("id", "asset", "user", "created_on", "updated_on")
list_display_links = ("id", "asset")
date_hierarchy = "created_on"
search_fields = ["asset__title", "asset__campaign__title", "asset__project__title"]
list_filter = (
"asset__item__project__campaign",
"asset__item__project",
"user__is_staff",
)
@admin.register(Transcription)
class TranscriptionAdmin(admin.ModelAdmin):
list_display = (
"id",
"asset",
"user",
"truncated_text",
"created_on",
"updated_on",
"accepted",
"rejected",
)
list_display_links = ("id", "asset")
list_filter = (
SubmittedFilter,
AcceptedFilter,
RejectedFilter,
"asset__item__project__campaign",
"asset__item__project",
)
search_fields = ["text", "user__username", "user__email"]
readonly_fields = (
"asset",
"user",
"created_on",
"updated_on",
"submitted",
"accepted",
"rejected",
"reviewed_by",
"supersedes",
"text",
)
actions = (export_to_csv_action, export_to_excel_action)
def truncated_text(self, obj):
return truncatechars(obj.text, 100)
truncated_text.short_description = "Text"
@admin.register(SimpleContentBlock)
class SimpleContentBlockAdmin(admin.ModelAdmin):
form = SimpleContentBlockAdminForm
list_display = ("slug", "created_on", "updated_on")
readonly_fields = ("created_on", "updated_on")
fieldsets = (
(None, {"fields": ("created_on", "updated_on", "slug")}),
("Body", {"classes": ("markdown-preview",), "fields": ("body",)}),
)
@admin.register(CarouselSlide)
class CarouselSlideAdmin(admin.ModelAdmin):
list_display = ("headline", "published", "ordering")
readonly_fields = ("created_on", "updated_on")
@admin.register(SimplePage)
class SimplePageAdmin(admin.ModelAdmin):
list_display = ("path", "title", "created_on", "updated_on")
readonly_fields = ("created_on", "updated_on")
fieldsets = (
(None, {"fields": ("created_on", "updated_on", "path", "title")}),
("Body", {"classes": ("markdown-preview",), "fields": ("body",)}),
)
@admin.register(SiteReport)
class SiteReportAdmin(admin.ModelAdmin):
list_display = ("created_on", "campaign", "topic")
list_filter = ("campaign", "topic")
def export_to_csv(self, request, queryset):
return export_to_csv_action(
self, request, queryset, field_names=SiteReport.DEFAULT_EXPORT_FIELDNAMES
)
def export_to_excel(self, request, queryset):
return export_to_excel_action(
self, request, queryset, field_names=SiteReport.DEFAULT_EXPORT_FIELDNAMES
)
actions = (export_to_csv, export_to_excel)
FIELDNAME_SORT_KEYS = [
"created",
"user",
"campaign",
"topic",
"project",
"item",
"asset",
"transcription",
"tag",
]
def fieldname_sort_key(self, key):
for i, prefix in enumerate(self.FIELDNAME_SORT_KEYS):
if prefix in key:
return (i, key)
else:
return (1024, key)
| 28.898467
| 87
| 0.638117
|
0737bb4aaabbea17fc036794c0f0477485f296db
| 7,283
|
py
|
Python
|
business_rules/operators.py
|
Buzzvil/business-rules
|
1b47959008a5e4f31f0d655756d34436de6785a6
|
[
"MIT"
] | null | null | null |
business_rules/operators.py
|
Buzzvil/business-rules
|
1b47959008a5e4f31f0d655756d34436de6785a6
|
[
"MIT"
] | 2
|
2021-02-02T01:32:46.000Z
|
2021-02-02T05:45:09.000Z
|
business_rules/operators.py
|
Buzzvil/business-rules
|
1b47959008a5e4f31f0d655756d34436de6785a6
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import inspect
import re
from decimal import Decimal
from functools import wraps
from .fields import FIELD_NO_INPUT, FIELD_NUMERIC, FIELD_SELECT, FIELD_SELECT_MULTIPLE, FIELD_TEXT
from .utils import float_to_decimal, fn_name_to_pretty_label
class BaseType(object):
def __init__(self, value):
self.value = self._assert_valid_value_and_cast(value)
def _assert_valid_value_and_cast(self, value):
raise NotImplemented()
@classmethod
def get_all_operators(cls):
methods = inspect.getmembers(cls)
return [
{'name': m[0], 'label': m[1].label, 'input_type': m[1].input_type}
for m in methods
if getattr(m[1], 'is_operator', False)
]
def export_type(cls):
""" Decorator to expose the given class to business_rules.export_rule_data. """
cls.export_in_rule_data = True
return cls
def type_operator(input_type, label=None, assert_type_for_arguments=True):
"""Decorator to make a function into a type operator.
- assert_type_for_arguments - if True this patches the operator function
so that arguments passed to it will have _assert_valid_value_and_cast
called on them to make type errors explicit.
"""
def wrapper(func):
func.is_operator = True
func.label = label or fn_name_to_pretty_label(func.__name__)
func.input_type = input_type
@wraps(func)
def inner(self, *args, **kwargs):
if assert_type_for_arguments:
args = [self._assert_valid_value_and_cast(arg) for arg in args]
kwargs = dict((k, self._assert_valid_value_and_cast(v)) for k, v in kwargs.items())
return func(self, *args, **kwargs)
return inner
return wrapper
@export_type
class StringType(BaseType):
name = "string"
def _assert_valid_value_and_cast(self, value):
value = value or ""
if not isinstance(value, str):
raise AssertionError("{0} is not a valid string type.".format(value))
return value
@type_operator(FIELD_TEXT)
def equal_to(self, other_string):
return self.value == other_string
@type_operator(FIELD_TEXT, label="Equal To (case insensitive)")
def equal_to_case_insensitive(self, other_string):
return self.value.lower() == other_string.lower()
@type_operator(FIELD_TEXT)
def starts_with(self, other_string):
return self.value.startswith(other_string)
@type_operator(FIELD_TEXT)
def ends_with(self, other_string):
return self.value.endswith(other_string)
@type_operator(FIELD_TEXT)
def contains(self, other_string):
return other_string in self.value
@type_operator(FIELD_TEXT)
def matches_regex(self, regex):
return re.search(regex, self.value)
@type_operator(FIELD_NO_INPUT)
def non_empty(self):
return bool(self.value)
@export_type
class NumericType(BaseType):
EPSILON = Decimal('0.000001')
name = "numeric"
@staticmethod
def _assert_valid_value_and_cast(value):
if isinstance(value, float):
# In python 2.6, casting float to Decimal doesn't work
return float_to_decimal(value)
if isinstance(value, int):
return Decimal(value)
if isinstance(value, Decimal):
return value
else:
raise AssertionError("{0} is not a valid numeric type.".format(value))
@type_operator(FIELD_NUMERIC)
def equal_to(self, other_numeric):
return abs(self.value - other_numeric) <= self.EPSILON
@type_operator(FIELD_NUMERIC)
def greater_than(self, other_numeric):
return (self.value - other_numeric) > self.EPSILON
@type_operator(FIELD_NUMERIC)
def greater_than_or_equal_to(self, other_numeric):
return self.greater_than(other_numeric) or self.equal_to(other_numeric)
@type_operator(FIELD_NUMERIC)
def less_than(self, other_numeric):
return (other_numeric - self.value) > self.EPSILON
@type_operator(FIELD_NUMERIC)
def less_than_or_equal_to(self, other_numeric):
return self.less_than(other_numeric) or self.equal_to(other_numeric)
@export_type
class BooleanType(BaseType):
name = "boolean"
def _assert_valid_value_and_cast(self, value):
if type(value) != bool:
raise AssertionError("{0} is not a valid boolean type".format(value))
return value
@type_operator(FIELD_NO_INPUT)
def is_true(self):
return self.value
@type_operator(FIELD_NO_INPUT)
def is_false(self):
return not self.value
@export_type
class SelectType(BaseType):
name = "select"
def _assert_valid_value_and_cast(self, value):
if not hasattr(value, '__iter__'):
raise AssertionError("{0} is not a valid select type".format(value))
return value
@staticmethod
def _case_insensitive_equal_to(value_from_list, other_value):
if isinstance(value_from_list, str) and isinstance(other_value, str):
return value_from_list.lower() == other_value.lower()
else:
return value_from_list == other_value
@type_operator(FIELD_SELECT, assert_type_for_arguments=False)
def contains(self, other_value):
for val in self.value:
if self._case_insensitive_equal_to(val, other_value):
return True
return False
@type_operator(FIELD_SELECT, assert_type_for_arguments=False)
def does_not_contain(self, other_value):
for val in self.value:
if self._case_insensitive_equal_to(val, other_value):
return False
return True
@export_type
class SelectMultipleType(BaseType):
name = "select_multiple"
def _assert_valid_value_and_cast(self, value):
if not hasattr(value, '__iter__'):
raise AssertionError("{0} is not a valid select multiple type".format(value))
return value
@type_operator(FIELD_SELECT_MULTIPLE)
def contains_all(self, other_value):
select = SelectType(self.value)
for other_val in other_value:
if not select.contains(other_val):
return False
return True
@type_operator(FIELD_SELECT_MULTIPLE)
def is_contained_by(self, other_value):
other_select_multiple = SelectMultipleType(other_value)
return other_select_multiple.contains_all(self.value)
@type_operator(FIELD_SELECT_MULTIPLE)
def shares_at_least_one_element_with(self, other_value):
select = SelectType(self.value)
for other_val in other_value:
if select.contains(other_val):
return True
return False
@type_operator(FIELD_SELECT_MULTIPLE)
def shares_exactly_one_element_with(self, other_value):
found_one = False
select = SelectType(self.value)
for other_val in other_value:
if select.contains(other_val):
if found_one:
return False
found_one = True
return found_one
@type_operator(FIELD_SELECT_MULTIPLE)
def shares_no_elements_with(self, other_value):
return not self.shares_at_least_one_element_with(other_value)
| 30.991489
| 99
| 0.679528
|
5fecd6797fa75051cc09a88bfbdb9dc80a0548c2
| 18,339
|
py
|
Python
|
prcslib/sexpdata.py
|
kazssym/prcslib-python
|
ce8d00b7d3ab9b5df39b5abca417e751999830c4
|
[
"MIT"
] | 1
|
2019-09-21T14:17:44.000Z
|
2019-09-21T14:17:44.000Z
|
prcslib/sexpdata.py
|
kazssym/prcslib-python
|
ce8d00b7d3ab9b5df39b5abca417e751999830c4
|
[
"MIT"
] | 2
|
2021-02-13T04:11:10.000Z
|
2022-03-05T02:43:47.000Z
|
prcslib/sexpdata.py
|
kazssym/prcslib-python
|
ce8d00b7d3ab9b5df39b5abca417e751999830c4
|
[
"MIT"
] | null | null | null |
# [[[cog import cog; cog.outl('"""\n%s\n"""' % file('README.rst').read()) ]]]
"""
S-expression parser for Python
==============================
`sexpdata` is a simple S-expression parser/serializer. It has
simple `load` and `dump` functions like `pickle`, `json` or `PyYAML`
module.
>>> from sexpdata import loads, dumps
>>> loads('("a" "b")')
['a', 'b']
>>> print(dumps(['a', 'b']))
("a" "b")
You can install `sexpdata` from PyPI_::
pip install sexpdata
Links:
* `Documentation (at Read the Docs) <http://sexpdata.readthedocs.org/>`_
* `Repository (at GitHub) <https://github.com/tkf/sexpdata>`_
* `Issue tracker (at GitHub) <https://github.com/tkf/sexpdata/issues>`_
* `PyPI <http://pypi.python.org/pypi/sexpdata>`_
* `Travis CI <https://travis-ci.org/#!/tkf/sexpdata>`_
License
-------
`sexpdata` is licensed under the terms of the BSD 2-Clause License.
See the source code for more information.
"""
# [[[end]]]
# Copyright (c) 2012 Takafumi Arakaki
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__version__ = '0.0.3'
__author__ = 'Takafumi Arakaki'
__license__ = 'BSD License'
__all__ = [
# API functions:
'load', 'loads', 'dump', 'dumps',
# Utility functions:
'car', 'cdr',
# S-expression classes:
'Symbol', 'String', 'Quoted',
]
import re
from string import whitespace
import functools
BRACKETS = {'(': ')', '[': ']'}
### Python 3 compatibility
try:
unicode
PY3 = False
except NameError:
basestring = unicode = str # Python 3
PY3 = True
def uformat(s, *args, **kwds):
"""Alias of ``unicode(s).format(...)``."""
return tounicode(s).format(*args, **kwds)
### Utility
def tounicode(string):
"""
Decode `string` if it is not unicode. Do nothing in Python 3.
"""
if not isinstance(string, unicode):
string = unicode(string, 'utf-8')
return string
def return_as(converter):
"""
Decorator to convert result of a function.
It is just a function composition. The following two codes are
equivalent.
Using `@return_as`::
@return_as(converter)
def generator(args):
...
result = generator(args)
Manually do the same::
def generator(args):
...
result = converter(generator(args))
Example:
>>> @return_as(list)
... def f():
... for i in range(3):
... yield i
...
>>> f() # this gives a list, not an iterator
[0, 1, 2]
"""
def wrapper(generator):
@functools.wraps(generator)
def func(*args, **kwds):
return converter(generator(*args, **kwds))
return func
return wrapper
### Interface
def load(filelike, **kwds):
"""
Load object from S-expression stored in `filelike`.
:arg filelike: A text stream object.
See :func:`loads` for valid keyword arguments.
>>> import io
>>> fp = io.StringIO()
>>> sexp = [Symbol('a'), Symbol('b')] # let's dump and load this object
>>> dump(sexp, fp)
>>> _ = fp.seek(0)
>>> load(fp) == sexp
True
"""
return loads(filelike.read(), **kwds)
def loads(string, **kwds):
"""
Load object from S-expression `string`.
:arg string: String containing an S-expression.
:type nil: str or None
:keyword nil: A symbol interpreted as an empty list.
Default is ``'nil'``.
:type true: str or None
:keyword true: A symbol interpreted as True.
Default is ``'t'``.
:type false: str or None
:keyword false: A symbol interpreted as False.
Default is ``None``.
:type line_comment: str
:keyword line_comment: Beginning of line comment.
Default is ``';'``.
>>> loads("(a b)")
[Symbol('a'), Symbol('b')]
>>> loads("a")
Symbol('a')
>>> loads("(a 'b)")
[Symbol('a'), Quoted(Symbol('b'))]
>>> loads("(a '(b))")
[Symbol('a'), Quoted([Symbol('b')])]
>>> loads('''
... ;; This is a line comment.
... ("a" "b") ; this is also a comment.
... ''')
['a', 'b']
>>> loads('''
... # This is a line comment.
... ("a" "b") # this is also a comment.
... ''', line_comment='#')
['a', 'b']
``nil`` is converted to an empty list by default. You can use
keyword argument `nil` to change what symbol must be interpreted
as nil:
>>> loads("nil")
[]
>>> loads("null", nil='null')
[]
>>> loads("nil", nil=None)
Symbol('nil')
``t`` is converted to True by default. You can use keyword
argument `true` to change what symbol must be converted to True.:
>>> loads("t")
True
>>> loads("#t", true='#t')
True
>>> loads("t", true=None)
Symbol('t')
No symbol is converted to False by default. You can use keyword
argument `false` to convert a symbol to False.
>>> loads("#f")
Symbol('#f')
>>> loads("#f", false='#f')
False
>>> loads("nil", false='nil', nil=None)
False
"""
obj = parse(string, **kwds)
assert len(obj) == 1 # FIXME: raise an appropriate error
return obj[0]
def dump(obj, filelike, **kwds):
"""
Write `obj` as an S-expression into given stream `filelike`.
:arg obj: A Python object.
:arg filelike: A text stream object.
See :func:`dumps` for valid keyword arguments.
>>> import io
>>> fp = io.StringIO()
>>> dump([Symbol('a'), Symbol('b')], fp)
>>> print(fp.getvalue())
(a b)
"""
filelike.write(unicode(dumps(obj)))
def dumps(obj, **kwds):
"""
Convert python object into an S-expression.
:arg obj: A Python object.
:type str_as: ``'symbol'`` or ``'string'``
:keyword str_as: How string should be interpreted.
Default is ``'string'``.
:type tuple_as: ``'list'`` or ``'array'``
:keyword tuple_as: How tuple should be interpreted.
Default is ``'list'``.
:type true_as: str
:keyword true_as: How True should be interpreted.
Default is ``'t'``
:type false_as: str
:keyword false_as: How False should be interpreted.
Default is ``'()'``
:type none_as: str
:keyword none_as: How None should be interpreted.
Default is ``'()'``
Basic usage:
>>> print(dumps(['a', 'b']))
("a" "b")
>>> print(dumps(['a', 'b'], str_as='symbol'))
(a b)
>>> print(dumps(dict(a=1, b=2)))
(:a 1 :b 2)
>>> print(dumps([None, True, False, ()]))
(() t () ())
>>> print(dumps([None, True, False, ()],
... none_as='null', true_as='#t', false_as='#f'))
(null #t #f ())
>>> print(dumps(('a', 'b')))
("a" "b")
>>> print(dumps(('a', 'b'), tuple_as='array'))
["a" "b"]
More verbose usage:
>>> print(dumps([Symbol('a'), Symbol('b')]))
(a b)
>>> print(dumps(Symbol('a')))
a
>>> print(dumps([Symbol('a'), Quoted(Symbol('b'))]))
(a 'b)
>>> print(dumps([Symbol('a'), Quoted([Symbol('b')])]))
(a '(b))
"""
return tosexp(obj, **kwds)
def car(obj):
"""
Alias of ``obj[0]``.
>>> car(loads('(a . b)'))
Symbol('a')
>>> car(loads('(a b)'))
Symbol('a')
"""
return obj[0]
def cdr(obj):
"""
`cdr`-like function.
>>> cdr(loads('(a . b)'))
Symbol('b')
>>> cdr(loads('(a b)'))
[Symbol('b')]
>>> cdr(loads('(a . (b))'))
[Symbol('b')]
>>> cdr(loads('(a)'))
[]
>>> cdr(loads('(a . nil)'))
[]
"""
# This is very lazy implementation. Probably the best way to do
# it is to define `Cons` S-expression class.
if len(obj) > 2:
dot = obj[1]
if isinstance(dot, Symbol) and dot.value() == '.':
return obj[2]
return obj[1:]
### Core
def tosexp(obj, str_as='string', tuple_as='list',
true_as='t', false_as='()', none_as='()'):
"""
Convert an object to an S-expression (`dumps` is just calling this).
See this table for comparison of lispy languages, to support them
as much as possible:
`Lisp: Common Lisp, Scheme/Racket, Clojure, Emacs Lisp - Hyperpolyglot
<http://hyperpolyglot.org/lisp>`_
"""
_tosexp = lambda x: tosexp(
x, str_as=str_as, tuple_as=tuple_as,
true_as=true_as, false_as=false_as, none_as=none_as)
if isinstance(obj, list):
return Bracket(obj, '(').tosexp(_tosexp)
elif isinstance(obj, tuple):
if tuple_as == 'list':
return Bracket(obj, '(').tosexp(_tosexp)
elif tuple_as == 'array':
return Bracket(obj, '[').tosexp(_tosexp)
else:
raise ValueError(uformat("tuple_as={0!r} is not valid", tuple_as))
elif obj is True: # must do this before ``isinstance(obj, int)``
return true_as
elif obj is False:
return false_as
elif obj is None:
return none_as
elif isinstance(obj, (int, float)):
return str(obj)
elif isinstance(obj, basestring):
if str_as == 'symbol':
return obj
elif str_as == 'string':
return String(obj).tosexp()
else:
raise ValueError(uformat("str_as={0!r} is not valid", str_as))
elif isinstance(obj, dict):
return _tosexp(dict_to_plist(obj))
elif isinstance(obj, SExpBase):
return obj.tosexp(_tosexp)
else:
raise TypeError(uformat(
"Object of type '{0}' cannot be converted by `tosexp`. "
"It's value is '{1!r}'", type(obj), obj))
@return_as(list)
def dict_to_plist(obj):
for key in obj:
yield Symbol(uformat(":{0}", key))
yield obj[key]
class SExpBase(object):
def __init__(self, val):
self._val = val
def __repr__(self):
return uformat("{0}({1!r})", self.__class__.__name__, self._val)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self._val == other._val
else:
return False
def value(self):
return self._val
def tosexp(self, tosexp=tosexp):
"""
Decode this object into an S-expression string.
:arg tosexp: A function to be used when converting sub S-expression.
"""
raise NotImplementedError
@classmethod
def quote(cls, string):
for (s, q) in cls._lisp_quoted_specials:
string = string.replace(s, q)
return tounicode(string)
@classmethod
def unquote(cls, string):
return cls._lisp_quoted_to_raw.get(string, string)
class Symbol(SExpBase):
_lisp_quoted_specials = [
('\\', '\\\\'), # must come first to avoid doubly quoting "\"
("'", r"\'"), ("`", r"\`"), ('"', r'\"'),
('(', r'\('), (')', r'\)'), ('[', r'\['), (']', r'\]'),
(' ', r'\ '), ('.', r'\.'), (',', r'\,'), ('?', r'\?'),
(';', r'\;'), ('#', r'\#'),
]
_lisp_quoted_to_raw = dict((q, r) for (r, q) in _lisp_quoted_specials)
def tosexp(self, tosexp=None):
return self.quote(self._val)
class String(SExpBase):
_lisp_quoted_specials = [ # from Pymacs
('\\', '\\\\'), # must come first to avoid doubly quoting "\"
('"', '\\"'), ('\b', '\\b'), ('\f', '\\f'),
('\n', '\\n'), ('\r', '\\r'), ('\t', '\\t')]
_lisp_quoted_to_raw = dict((q, r) for (r, q) in _lisp_quoted_specials)
def tosexp(self, tosexp=None):
return uformat('"{0}"', self.quote(self._val))
class Quoted(SExpBase):
def tosexp(self, tosexp=tosexp):
return uformat("'{0}", tosexp(self._val))
class Bracket(SExpBase):
def __init__(self, val, bra):
assert bra in BRACKETS # FIXME: raise an appropriate error
super(Bracket, self).__init__(val)
self._bra = bra
def __repr__(self):
return uformat("{0}({1!r}, {2!r})",
self.__class__.__name__, self._val, self._bra)
def tosexp(self, tosexp=tosexp):
bra = self._bra
ket = BRACKETS[self._bra]
c = ' '.join(tosexp(v) for v in self._val)
return uformat("{0}{1}{2}", bra, c, ket)
def bracket(val, bra):
if bra == '(':
return val
else:
return Bracket(val, bra)
class ExpectClosingBracket(Exception):
def __init__(self, got, expect):
super(ExpectClosingBracket, self).__init__(uformat(
"Not enough closing brackets. "
"Expected {0!r} to be the last letter in the sexp. "
"Got: {1!r}", expect, got))
class ExpectNothing(Exception):
def __init__(self, got):
super(ExpectNothing, self).__init__(uformat(
"Too many closing brackets. "
"Expected no character left in the sexp. "
"Got: {0!r}", got))
class Parser(object):
closing_brackets = set(BRACKETS.values())
atom_end = \
set(BRACKETS) | set(closing_brackets) | set('"\'') | set(whitespace)
atom_end_or_escape_re = re.compile("|".join(map(re.escape,
atom_end | set('\\'))))
quote_or_escape_re = re.compile(r'"|\\')
def __init__(self, string, string_to=None, nil='nil', true='t', false=None,
line_comment=';'):
self.string = string
self.nil = nil
self.true = true
self.false = false
self.string_to = (lambda x: x) if string_to is None else string_to
self.line_comment = line_comment
def parse_str(self, i):
string = self.string
chars = []
append = chars.append
search = self.quote_or_escape_re.search
assert string[i] == '"' # never fail
while True:
i += 1
match = search(string, i)
end = match.start()
append(string[i:end])
c = match.group()
if c == '"':
i = end + 1
break
elif c == '\\':
i = end + 1
append(String.unquote(c + string[i]))
else:
raise ExpectClosingBracket('"', None)
return (i, ''.join(chars))
def parse_atom(self, i):
string = self.string
chars = []
append = chars.append
search = self.atom_end_or_escape_re.search
atom_end = self.atom_end
while True:
match = search(string, i)
if not match:
append(string[i:])
i = len(string)
break
end = match.start()
append(string[i:end])
c = match.group()
if c in atom_end:
i = end # this is different from str
break
elif c == '\\':
i = end + 1
append(Symbol.unquote(c + string[i]))
i += 1
else:
raise ExpectClosingBracket('"', None)
return (i, self.atom(''.join(chars)))
def atom(self, token):
if token == self.nil:
return []
if token == self.true:
return True
if token == self.false:
return False
return Symbol(token)
def parse_sexp(self, i):
string = self.string
len_string = len(self.string)
sexp = []
append = sexp.append
while i < len_string:
c = string[i]
if c == '"':
(i, subsexp) = self.parse_str(i)
append(self.string_to(subsexp))
elif c in whitespace:
i += 1
continue
elif c in BRACKETS:
close = BRACKETS[c]
(i, subsexp) = self.parse_sexp(i + 1)
append(bracket(subsexp, c))
try:
nc = string[i]
except IndexError:
nc = None
if nc != close:
raise ExpectClosingBracket(nc, close)
i += 1
elif c in self.closing_brackets:
break
elif c == "'":
(i, subsexp) = self.parse_sexp(i + 1)
append(Quoted(subsexp[0]))
sexp.extend(subsexp[1:])
elif c == self.line_comment:
i = string.find('\n', i) + 1
if i <= 0:
i = len_string
break
else:
(i, subsexp) = self.parse_atom(i)
append(subsexp)
return (i, sexp)
def parse(self):
(i, sexp) = self.parse_sexp(0)
if i < len(self.string):
raise ExpectNothing(self.string[i:])
return sexp
def parse(string, **kwds):
"""
Parse s-expression.
>>> parse("(a b)")
[[Symbol('a'), Symbol('b')]]
>>> parse("a")
[Symbol('a')]
>>> parse("(a 'b)")
[[Symbol('a'), Quoted(Symbol('b'))]]
>>> parse("(a '(b))")
[[Symbol('a'), Quoted([Symbol('b')])]]
"""
return Parser(string, **kwds).parse()
| 27.371642
| 79
| 0.537925
|
d9bd8deb88faf0310ae2f10260c80cf96678032b
| 1,925
|
py
|
Python
|
aiida_abacus/calculations/functions.py
|
materials-science/aiida-abacus
|
b59e56f92b31dbe5ddf317bc4ab74eda6c3d3ea7
|
[
"MIT"
] | null | null | null |
aiida_abacus/calculations/functions.py
|
materials-science/aiida-abacus
|
b59e56f92b31dbe5ddf317bc4ab74eda6c3d3ea7
|
[
"MIT"
] | null | null | null |
aiida_abacus/calculations/functions.py
|
materials-science/aiida-abacus
|
b59e56f92b31dbe5ddf317bc4ab74eda6c3d3ea7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from aiida.engine import calcfunction
@calcfunction
def create_kpoints_from_distance(structure, distance, force_parity, system_2d):
"""[Refer to `aiida_quantumespresso/calculations/functions/create_kpoints_from_distance`, v.3.4.2] Calculation function to compute a k-point mesh for a structure with a guaranteed minimum k-point distance.
Generate a uniformly spaced kpoint mesh for a given structure.
The spacing between kpoints in reciprocal space is guaranteed to be at least the defined distance.
:param structure: the StructureData to which the mesh should apply
:param distance: a Float with the desired distance between kpoints in reciprocal space
:param force_parity: a Bool to specify whether the generated mesh should maintain parity
:returns: a KpointsData with the generated mesh
"""
from numpy import linalg
from aiida.orm import KpointsData
epsilon = 1e-5
kpoints = KpointsData()
kpoints.set_cell_from_structure(structure)
kpoints.set_kpoints_mesh_from_density(
distance.value, force_parity=force_parity.value
)
lengths_vector = [linalg.norm(vector) for vector in structure.cell]
lengths_kpoint = kpoints.get_kpoints_mesh()[0]
is_symmetric_cell = all(
abs(length - lengths_vector[0]) < epsilon for length in lengths_vector
)
is_symmetric_mesh = all(
length == lengths_kpoint[0] for length in lengths_kpoint
)
# If the vectors of the cell all have the same length, the kpoint mesh should be isotropic as well
if is_symmetric_cell and not is_symmetric_mesh:
nkpoints = max(lengths_kpoint)
kpoints.set_kpoints_mesh([nkpoints, nkpoints, nkpoints])
# TODO: cope with 2d system structures
if system_2d.value is True:
mesh, off = kpoints.get_kpoints_mesh()
mesh[2] = 1
kpoints.set_kpoints_mesh(mesh)
return kpoints
| 37.745098
| 209
| 0.735065
|
4cac96847cd151b377797181ae95a15356a20e93
| 14,638
|
py
|
Python
|
GitHub_RankingsScraperShell.py
|
cassandrarogers/Rankings
|
64ae37ecc3009b59c672b6ccf63813fcac054abc
|
[
"MIT"
] | 2
|
2018-06-21T20:36:44.000Z
|
2019-05-02T12:49:39.000Z
|
GitHub_RankingsScraperShell.py
|
cassandrarogers/Rankings
|
64ae37ecc3009b59c672b6ccf63813fcac054abc
|
[
"MIT"
] | null | null | null |
GitHub_RankingsScraperShell.py
|
cassandrarogers/Rankings
|
64ae37ecc3009b59c672b6ccf63813fcac054abc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon May 21 15:30:15 2018
Rankings Scraper Shell
@author: rogers
"""
def GetRankings(ranking, **kwargs):
import pandas as pd
import datetime
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from itertools import islice
import urllib.request as urllib2
from bs4 import BeautifulSoup
from DemoFunctions_GitHub import DEMO_append_metadata, DEMO_export_to_csv
date = datetime.date.today()
url = kwargs.get('url', None)
ranking_year = kwargs.get('ranking_year', date.year)
publication_year = kwargs.get('publication_year', date.year)
publication_month = kwargs.get('publication_month', date.month)
publication_day = kwargs.get('publication_day', date.day)
ranking_scope = kwargs.get('ranking_scope', 'National')
primary_ranking = kwargs.get('primary_ranking', 1)
publication_date = datetime.date(month = int(publication_month), day = int(publication_day), year = int(publication_year))
print('\n','Ranking Metadata: ', '\n\n' ,
'Ranking Year: ', ranking_year, '\n' ,
'Publication Date: ', publication_date, '\n' ,
'Scope: ', ranking_scope, '\n' ,
'Primary: ', primary_ranking, '\n\n')
print('Initiating web scraping...')
driver = webdriver.Chrome('C:/Users/rogers/chromedriver.exe')
if ranking == "US_News_Best_Colleges":
driver.switch_to_window(driver.current_window_handle)
##UNCOMMENT THIS SECTION TO LOGIN TO US NEWS COMPASS
#driver.get("https://secure.usnews.com/member/login")
#username = driver.find_element_by_xpath('//*[@id="username"]')
#password = driver.find_element_by_xpath('//*[@id="password"]')
# ENTER US NEWS COMPASS LOGIN DETAILS
#username.send_keys('') #USERNAME
#password.send_keys('') #PASSWORD
#driver.find_element_by_xpath('//*[@id="login_form"]/input[3]').click()
driver.get(str(url)) # navigate to the college profile
college_name = driver.find_element_by_xpath('/html/body/div[2]/div/div/div[2]/div[3]/div/div[1]/div[2]/h1').text
ul = driver.find_elements_by_xpath('//*[@id="content-main"]/div[1]/div[2]/ul')
data =[]
rank =[]
rankname =[]
college_list =[]
for u in ul:
data = u.text.splitlines()
for d in data:
if d.startswith('At'):
data.remove(d)
for part in data:
rankname.append('US News - ' + str(part.split('in', 1)[1]).strip(' \n#'))
rank.append(str(part.split('in', 1)[0]).strip(' \n#'))
college_list.append(college_name)
print(college_name + '- Data captured successfully')
driver.quit()
df = pd.DataFrame(list(zip(rank, rankname, college_list)))
df.columns = ['Rank', 'Ranking Name', 'School']
#print(df.head())
#append the metadata to the df
df = DEMO_append_metadata(df, ranking_year, publication_date, ranking_scope, primary_ranking)
## export data to a csv
DEMO_export_to_csv(df, ranking, ranking_year)
if ranking == "US_News_Online_Programs":
##UNCOMMENT THIS SECTION TO LOGIN TO US NEWS COMPASS
#driver.get("https://secure.usnews.com/member/login")
#username = driver.find_element_by_xpath('//*[@id="username"]')
#password = driver.find_element_by_xpath('//*[@id="password"]')
# ENTER US NEWS COMPASS LOGIN DETAILS
#username.send_keys('') #USERNAME
#password.send_keys('') #PASSWORD
#driver.find_element_by_xpath('//*[@id="login_form"]/input[3]').click()
data =[]
Ranks =[]
RankingName =[]
college_list = []
driver.get(url)
li = driver.find_elements_by_xpath('//*[@id="content-main"]/div[1]/div[2]')
for l in li:
data = l.text.splitlines()
data = [x for x in data if not x.startswith('Online Programs Rankings')]
data = [item.replace('#', '') for item in data]
Rank = [d.split(' in ')[:1] for d in data]
RN = [d.split(' in ')[1:] for d in data]
college_name1 = driver.find_element_by_xpath('/html/body/div[2]/div/div/div[1]/div[2]/div/div[1]/div[2]/h1').text
college_name2 = driver.find_element_by_xpath('/html/body/div[2]/div/div/div[1]/div[2]/div/div[1]/div[2]/h1/span/a').text
college_name = college_name1.replace(college_name2, '').rstrip()
college_rep = [college_name] * len(Rank)
for item in Rank:
Ranks.append(str(item).strip("[").strip("]").strip("'"))
for R in RN:
RankingName.append('US News Online Programs - ' + str(R).strip("[").strip("]").strip("'").strip("\""))
for college in college_rep:
college_list.append(college)
print(college_name, ' - Data captured successfully')
driver.quit()
df = pd.DataFrame(list(zip(Ranks, RankingName, college_list)))
df.columns = ['Rank', 'Ranking Name', 'School']
#print(df)
#append the metadata to the df
df = DEMO_append_metadata(df, ranking_year, publication_date, ranking_scope, primary_ranking)
## export data to a csv
DEMO_export_to_csv(df, ranking, ranking_year)
if ranking == "US_News_Graduate_Schools":
##UNCOMMENT THIS SECTION TO LOGIN TO US NEWS COMPASS
#driver.get("https://secure.usnews.com/member/login")
#username = driver.find_element_by_xpath('//*[@id="username"]')
#password = driver.find_element_by_xpath('//*[@id="password"]')
# ENTER US NEWS COMPASS LOGIN DETAILS
#username.send_keys('') #USERNAME
#password.send_keys('') #PASSWORD
#driver.find_element_by_xpath('//*[@id="login_form"]/input[3]').click()
data =[]
Ranks =[]
RankingName =[]
college_list = []
driver.get(url)
li = driver.find_elements_by_xpath('//*[@id="content"]/div[3]/div[2]/div/ul')
for l in li:
data = l.text.splitlines()
data = [num for num in data if num != 'Tie']
data = [item.replace('#', '') for item in data]
Rank = data[::2]
RN = data[1::2]
college_name = driver.find_element_by_xpath('//*[@id="content"]/h1').text.rstrip()
college_rep = [college_name] * len(Rank)
for item in Rank:
Ranks.append(item)
for R in RN:
RankingName.append('US News Graduate Schools - ' + str(R))
for college in college_rep:
college_list.append(college)
print(college_name, '- Data captured successfully')
driver.quit()
df = pd.DataFrame(list(zip(Ranks, RankingName, college_list)))
df.columns = ['Rank', 'Ranking Name', 'School']
#print(df)
#append the metadata to the df
df = DEMO_append_metadata(df, ranking_year, publication_date, ranking_scope, primary_ranking)
## export data to a csv
DEMO_export_to_csv(df, ranking, ranking_year)
if ranking == 'PayScale_College_Salary_Report':
# get arg for # of schools, or get top 25 schools by default
limit = kwargs.get('limit', 25)
driver.get('https://www.payscale.com/college-salary-report/bachelors')
wait = WebDriverWait(driver, 20)
element = driver.find_element_by_xpath('//*[@id="collegeSalaryReportContent"]/div/div/div[2]/div/div/div/div[2]/a')
driver.execute_script("return arguments[0].scrollIntoView(0, document.documentElement.scrollHeight-10);", element)
driver.find_element_by_xpath('//*[@id="collegeSalaryReportContent"]/div/div/div[2]/div/div/div/div[2]/a').click()
wait
print('Full List Loaded')
data1 =[]
trs = driver.find_elements_by_xpath('//*[@id="collegeSalaryReportContent"]/div/div/div[2]/div/div/table/tbody/tr')
# get the visible rows
for tr in islice(trs, limit):
tds = tr.find_elements_by_tag_name('td')
data = (list(filter(None, (td.text for td in tds))))
data1.insert(len(data1),data)
print(data[1] + ' - Data captured successfully')
driver.quit()
df = pd.DataFrame(data1)
# drop duplicate column of school names
df.drop([2], axis=1,inplace = True)
#set the columns to the table header
df.columns = ['Rank', 'School', 'School Type', 'Early Career Pay','Mid-Career Pay', '% High Meaning','% STEM Degrees']
df.insert(1,'Ranking Name', 'PayScale - College Salary Report')
#print(df)
#append the metadata to the df
df = DEMO_append_metadata(df, ranking_year, publication_date, ranking_scope, primary_ranking)
## export data to a csv
DEMO_export_to_csv(df, ranking, ranking_year)
if ranking == 'Forbes':
## ENTER COLLEGE NAMES HERE
colleges = ['Illinois Institute of Technology',
'Colorado School of Mines',
'Case Western Reserve University',
'Northeastern University',
'New Jersey Institute of Technology',
'University of Texas at Dallas',
'University of Maryland, Baltimore County',
'Missouri University of Science and Technology',
'Michigan Technological University',
'New Mexico Institute of Mining and Technology',
'University of Massachusetts Lowell',
'Louisiana Tech University',
'Massachusetts Institute of Technology',
'California Institute of Technology',
'Carnegie Mellon University',
'Rensselaer Polytechnic Institute',
'Georgia Institute of Technology',
'Virginia Tech',
'Texas Tech University',
'Princeton University',
'The College of New Jersey',
'Rutgers University-New Brunswick',
'Stevens Institute of Technology',
'Montclair State University',
'Seton Hall University',
'Rowan University'
]
## ENTER URLS TO CAPTURE HERE
urls =['https://www.forbes.com/colleges/illinois-institute-of-technology/',
'https://www.forbes.com/colleges/colorado-school-of-mines/',
'https://www.forbes.com/colleges/case-western-reserve-university/',
'https://www.forbes.com/colleges/northeastern-university/',
'https://www.forbes.com/colleges/new-jersey-institute-of-technology/',
'https://www.forbes.com/colleges/the-university-of-texas-at-dallas/',
'https://www.forbes.com/colleges/university-of-maryland-baltimore-county/',
'https://www.forbes.com/colleges/missouri-university-of-science-and-technology/',
'https://www.forbes.com/colleges/michigan-technological-university/',
'https://www.forbes.com/colleges/new-mexico-institute-of-mining-and-technology/',
'https://www.forbes.com/colleges/university-of-massachusetts-lowell/',
'https://www.forbes.com/colleges/louisiana-tech-university/',
'https://www.forbes.com/colleges/massachusetts-institute-of-technology/',
'https://www.forbes.com/colleges/california-institute-of-technology/',
'https://www.forbes.com/colleges/carnegie-mellon-university/',
'https://www.forbes.com/colleges/rensselaer-polytechnic-institute/',
'https://www.forbes.com/colleges/georgia-institute-of-technology-main-campus/',
'https://www.forbes.com/colleges/virginia-polytechnic-institute-and-state-university/',
'https://www.forbes.com/colleges/texas-tech-university/',
'https://www.forbes.com/colleges/princeton-university/',
'https://www.forbes.com/colleges/the-college-of-new-jersey/',
'https://www.forbes.com/colleges/rutgers-university-new-brunswick/',
'https://www.forbes.com/colleges/stevens-institute-of-technology/',
'https://www.forbes.com/colleges/montclair-state-university/',
'https://www.forbes.com/colleges/seton-hall-university/',
'https://www.forbes.com/colleges/rowan-university/'
]
rank =[]
rankname =[]
college_list =[]
for idx, college in enumerate(colleges):
page = urllib2.urlopen(urls[idx])
tidy = BeautifulSoup(page, 'lxml')
div = tidy.find('div', {'class':'forbeslists fright'})
li = div.findAll('li')
div1 = tidy.findAll('div', {'class':'rankonlist'})
for d in div1:
rankname.append('Forbes - ' + str(d.text.split(' ', 1)[1]).strip('\" \n'))
rank.append(str(d.text.split(' ', 1)[0]).strip (' \n#'))
college_list.append(colleges[idx])
for l in li:
if ':' not in l.text:
rank.append(str(l.text.split('in')[0]).strip(' \n#'))
rankname.append('Forbes - ' + 'Top Colleges - ' + str(l.text.split('in')[1]).strip(' \n#'))
college_list.append(colleges[idx])
else:
rankname.append('Forbes - ' + str(l.text.split(':')[0]).strip('\" \n'))
rank.append(str(l.text.split(':')[1]).strip(' \n'))
college_list.append(colleges[idx])
print(colleges[idx] + ' - Data captured successfully')
df = pd.DataFrame(list(zip(rank, rankname, college_list)))
df.columns = ['Rank', 'Ranking Name', 'School']
#print(df)
#append the metadata to the df
df = DEMO_append_metadata(df, ranking_year, publication_date, ranking_scope, primary_ranking)
## export data to a csv
DEMO_export_to_csv(df, ranking, ranking_year)
return df;
| 43.307692
| 128
| 0.576718
|
7a416fadc03b039aeeb53260ace412577993da89
| 1,393
|
py
|
Python
|
test.py
|
MenesesGHZ/KNN-python
|
db9cc4eb4e11a068ddb639664a9c8d1df548c308
|
[
"MIT"
] | null | null | null |
test.py
|
MenesesGHZ/KNN-python
|
db9cc4eb4e11a068ddb639664a9c8d1df548c308
|
[
"MIT"
] | null | null | null |
test.py
|
MenesesGHZ/KNN-python
|
db9cc4eb4e11a068ddb639664a9c8d1df548c308
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
from knn import KNN
from graphs import display_knn_test_graph
def test():
# Dataset -> Labeled Examples
X = np.array([
[1, 3.6], #blue
[2.2, 6.4], #blue
[3.3, 0.8], #blue
[3.8, 7.4], #orange
[4.3, 5.1], #blue
[5.6, 9.2], #orange
[5.6, 2.8], #orange
[6.8, 8.2], #orange
[6.8, 1.8], #orange
[7.9, 8.2], #blue
[7.9, 3.7], #orange
[9.1, 7.3] #blue
])
Y = np.array([[1,1,1,0,1,0,0,0,0,1,0,1]]).T
# Loading Model
euclidean_norm = lambda v1,v2: math.sqrt(sum((v1-v2)**2))
knn_model = KNN(X,Y,norm=euclidean_norm,k=5)
# Test Data
interval = (0,10)
random_points = np.array([
np.random.uniform(*interval,2),
np.random.uniform(*interval,2),
np.random.uniform(*interval,2)
])
# Predictions
predictions = knn_model.predict(random_points)
# Display Results with Plotly
category_names = ["orange","blue"]
category_colors = {"orange":"rgba(255,165,0,0.8)",
"blue":"rgba(0, 255, 255,0.9)",
"blue_pred":"rgba(0,0,255,0.75)",
"orange_pred":"rgba(255,70,0,0.8)"}
display_knn_test_graph(X,Y,random_points,predictions,category_names,category_colors)
| 29.020833
| 88
| 0.511127
|
241c62827da27889eecf1131b9959b6f886d28fa
| 1,851
|
py
|
Python
|
ooobuild/cssdyn/xml/__init__.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/cssdyn/xml/__init__.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/cssdyn/xml/__init__.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ...dyn.xml.attribute import Attribute as Attribute
from ...dyn.xml.attribute_container import AttributeContainer as AttributeContainer
from ...dyn.xml.attribute_data import AttributeData as AttributeData
from ...dyn.xml.export_filter import ExportFilter as ExportFilter
from ...dyn.xml.fast_attribute import FastAttribute as FastAttribute
from ...dyn.xml.import_filter import ImportFilter as ImportFilter
from ...dyn.xml.namespace_container import NamespaceContainer as NamespaceContainer
from ...dyn.xml.para_user_defined_attributes_supplier import ParaUserDefinedAttributesSupplier as ParaUserDefinedAttributesSupplier
from ...dyn.xml.text_user_defined_attributes_supplier import TextUserDefinedAttributesSupplier as TextUserDefinedAttributesSupplier
from ...dyn.xml.user_defined_attributes_supplier import UserDefinedAttributesSupplier as UserDefinedAttributesSupplier
from ...dyn.xml.x_export_filter import XExportFilter as XExportFilter
from ...dyn.xml.x_import_filter import XImportFilter as XImportFilter
from ...dyn.xml.x_import_filter2 import XImportFilter2 as XImportFilter2
from ...dyn.xml.xml_export_filter import XMLExportFilter as XMLExportFilter
from ...dyn.xml.xml_import_filter import XMLImportFilter as XMLImportFilter
| 57.84375
| 131
| 0.829281
|
0d09936106bf5b756aed5db0fb6884f9f6b195bb
| 285
|
py
|
Python
|
Util.py
|
CvvT/ElfParse
|
20f190a940ff2a48d80b05a282cfa0086c63ea75
|
[
"Apache-2.0"
] | 8
|
2015-11-19T01:51:51.000Z
|
2019-10-12T09:57:37.000Z
|
Util.py
|
CvvT/ElfParse
|
20f190a940ff2a48d80b05a282cfa0086c63ea75
|
[
"Apache-2.0"
] | null | null | null |
Util.py
|
CvvT/ElfParse
|
20f190a940ff2a48d80b05a282cfa0086c63ea75
|
[
"Apache-2.0"
] | 15
|
2015-09-19T19:56:41.000Z
|
2019-10-12T09:57:38.000Z
|
__author__ = 'CwT'
import struct
def getStrbyfd(file, addr):
file.seek(addr)
# print hex(addr)
str = []
byte = struct.unpack("B", file.read(1))[0]
while byte:
str.append(chr(byte))
byte = struct.unpack("B", file.read(1))[0]
return ''.join(str)
| 21.923077
| 50
| 0.578947
|
caab6b4215b9980ad964992aa39b17c02d20ec70
| 3,076
|
py
|
Python
|
SQM_discreteness/utils.py
|
Ohyeon5/SQM_basis
|
a04662f1a4520128dd347b1e84d14717feb0655a
|
[
"Apache-2.0"
] | 1
|
2020-09-22T10:32:20.000Z
|
2020-09-22T10:32:20.000Z
|
utils.py
|
Ohyeon5/SQM_discreteness
|
4f44d4f3e15e834ed544df35c065b715f1f7ce92
|
[
"Apache-2.0"
] | 1
|
2021-08-23T06:59:46.000Z
|
2021-08-23T06:59:46.000Z
|
utils.py
|
Ohyeon5/SQM_discreteness
|
4f44d4f3e15e834ed544df35c065b715f1f7ce92
|
[
"Apache-2.0"
] | null | null | null |
# utilization functions
import numpy as np
import pandas as pd
import argparse
import configparser
import matplotlib.pyplot as plt
# read configuration from the config.ini files
def get_configs():
parser = argparse.ArgumentParser()
parser.add_argument("--config", dest="config", help="Configuration file used to run the script", required=True)
parser.add_argument("--model_name", type=str, default=None, help='network type')
parser.add_argument("-e", "--epochs", type=int, default=None, help='number of epochs')
args = parser.parse_args()
config = configparser.RawConfigParser(allow_no_value=True)
config.read(args.config)
# initialize parameter
param = dict()
param['device_name'] = config.get('Device','name',fallback='test_device')
# path
param['data_path'] = config.get('Path','data_path')
param['img_path'] = param['data_path'] + config.get('Path','img_path', fallback='20bn-jester-v1')
param['csv_labels'] = param['data_path'] + config.get('Path','csv_labels',fallback='jester-v1-labels.csv')
param['csv_train'] = param['data_path'] + config.get('Path','csv_train', fallback='jester-v1-train.csv')
param['csv_val'] = param['data_path'] + config.get('Path','csv_val', fallback='jester-v1-validation.csv')
param['csv_test'] = param['data_path'] + config.get('Path','csv_test', fallback='jester-v1-test.csv')
# model
param['model_name'] = config.get('Model','model_name', fallback='test_model')
param['model_path'] = config.get('Model','model_path', fallback='./saved_models/')
param['batch_size'] = config.getint('Model','batch_size',fallback=20)
param['epochs'] = config.getint('Model','epochs', fallback=300)
# Data
param['labels'] = config.get('Data','labels').split(',') # need to implement if empty use all labels
# if param['labels'] is 'all': use all the labels from csv_labels file
if 'all' in param['labels']:
param['labels'] = pd.read_csv(param['csv_labels'], index_col=False).values.squeeze().tolist()
param['skip'] = config.getint('Data','skip', fallback=2)
param['im_size'] = config.getint('Data','im_size',fallback=50)
# mode
param['train'] = config.getboolean('Mode','train', fallback=True)
param['test'] = config.getboolean('Mode','test' , fallback=False)
# overwrite experiment specific parameters
if args.model_name is not None:
param['model_name'] = args.model_name
if args.epochs is not None:
param['epochs'] = args.epochs
return param
def plot_grad_flow(named_parameters):
ave_grads = []
layers = []
for n, p in named_parameters:
if(p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean())
plt.plot(ave_grads, alpha=0.3, color="b")
plt.hlines(0, 0, len(ave_grads)+1, linewidth=1, color="k" )
plt.xticks(range(0,len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(xmin=0, xmax=len(ave_grads))
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.savefig('gard_flow.png')
| 42.136986
| 112
| 0.68498
|
a0c9c5b42624f567a0a7e9328de03ae28ae55223
| 7,952
|
py
|
Python
|
spark_auto_mapper_fhir/value_sets/observation_interpretation_codes.py
|
imranq2/SparkAutoMapper.FHIR
|
dd23b218fb0097d1edc2f3e688e8d6d4d7278bd2
|
[
"Apache-2.0"
] | 1
|
2020-10-31T23:25:07.000Z
|
2020-10-31T23:25:07.000Z
|
spark_auto_mapper_fhir/value_sets/observation_interpretation_codes.py
|
icanbwell/SparkAutoMapper.FHIR
|
98f368e781b46523142c7cb513c670d659a93c9b
|
[
"Apache-2.0"
] | null | null | null |
spark_auto_mapper_fhir/value_sets/observation_interpretation_codes.py
|
icanbwell/SparkAutoMapper.FHIR
|
98f368e781b46523142c7cb513c670d659a93c9b
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class ObservationInterpretationCodesCode(GenericTypeCode):
"""
ObservationInterpretationCodes
From: http://hl7.org/fhir/ValueSet/observation-interpretation in valuesets.xml
A categorical assessment, providing a rough qualitative interpretation of the
observation value, such as “normal”/ “abnormal”,”low” / “high”, “better” /
“worse”, “susceptible” / “resistant”, “expected”/ “not expected”. The value
set is intended to be for ANY use where coded representation of an
interpretation is needed.
Notes:
This is being communicated in v2.x in OBX-8 (Observation Interpretation),
in v3 in ObservationInterpretation (CWE) in R1 (Representative Realm) and in
FHIR in Observation.interpretation. Historically these values come from the
laboratory domain, and these codes are extensively used. The value set
binding is extensible, so codes outside the value set that are needed for
interpretation concepts (i.e. particular meanings) that are not included in
the value set can be used, and these new codes may also be added to the
value set and published in a future version.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation
"""
codeset: FhirUri = (
"http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation"
)
class ObservationInterpretationCodesCodeValues:
"""
Codes that specify interpretation of genetic analysis, such as "positive",
"negative", "carrier", "responsive", etc.
From: http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation in v3-codesystems.xml
"""
GeneticObservationInterpretation = ObservationInterpretationCodesCode(
"_GeneticObservationInterpretation"
)
"""
Interpretations of change of quantity and/or severity. At most one of B or W
and one of U or D allowed.
From: http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation in v3-codesystems.xml
"""
ObservationInterpretationChange = ObservationInterpretationCodesCode(
"_ObservationInterpretationChange"
)
"""
Technical exceptions resulting in the inability to provide an interpretation.
At most one allowed. Does not imply normality or severity.
From: http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation in v3-codesystems.xml
"""
ObservationInterpretationExceptions = ObservationInterpretationCodesCode(
"_ObservationInterpretationExceptions"
)
"""
Interpretation of normality or degree of abnormality (including critical or
"alert" level). Concepts in this category are mutually exclusive, i.e., at
most one is allowed.
From: http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation in v3-codesystems.xml
"""
ObservationInterpretationNormality = ObservationInterpretationCodesCode(
"_ObservationInterpretationNormality"
)
"""
Interpretations of anti-microbial susceptibility testing results
(microbiology). At most one allowed.
From: http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation in v3-codesystems.xml
"""
ObservationInterpretationSusceptibility = ObservationInterpretationCodesCode(
"_ObservationInterpretationSusceptibility"
)
"""
The observation/test result is interpreted as being outside the inclusion
range for a particular protocol within which the result is being reported.
Example: A positive result on a Hepatitis screening
test.
Open Issue: EX, HX, LX: These three concepts do not
seem to meet a clear need in the vocabulary, and their use in observation
interpretation appears likely to be covered by other existing concepts (e.g.,
A, H, L). The only apparent significant difference is their reference to use
in protocols for exclusion of study subjects.
These concepts/codes were proposed by RCRIM for use in the CTLaboratory
message. They were submitted and approved in the November 2005 Harmonization
cycle in proposal "030103C_VOCAB_RCRIM_l_quade_RCRIM Obs
Interp_20051028154455". However, this proposal was not fully implemented in
the vocabulary. The proposal recommended creation of the
x_ClinicalResearchExclusion domain in ObservationInterpretation with a value
set including those three concepts/codes, but there is no subdomain of that
name or equivalent with a binding to either of the value sets that contain
these concepts/codes.
Members of the OO WG have recently attempted to contact members of RCRIM
regarding these concepts, both by email and at the recent WGM in Atlanta,
without response. It is felt by OO that the best course of action to take at
this time is to add this comprehensive Open Issue rather than deprecate these
three concepts at this time, until further discussion is held.
From: http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation in v3-codesystems.xml
"""
OutsideThreshold = ObservationInterpretationCodesCode("EX")
"""
Hold for Medical Review
Usage Note: This code is not intended for use in V3
artifacts. It is included in the code system to maintain alignment with the
V2 Table 0078 "Interpretation Codes."
From: http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation in v3-codesystems.xml
"""
HoldForMedicalReview = ObservationInterpretationCodesCode("HM")
"""
Interpretations of the presence or absence of a component / analyte or
organism in a test or of a sign in a clinical observation. In keeping with
laboratory data processing practice, these concepts provide a categorical
interpretation of the "meaning" of the quantitative value for the same
observation.
From: http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation in v3-codesystems.xml
"""
ObservationInterpretationDetection = ObservationInterpretationCodesCode(
"ObservationInterpretationDetection"
)
"""
Interpretation of the observed result taking into account additional
information (contraindicators) about the patient's situation. Concepts in this
category are mutually exclusive, i.e., at most one is allowed.
From: http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation in v3-codesystems.xml
"""
ObservationInterpretationExpectation = ObservationInterpretationCodesCode(
"ObservationInterpretationExpectation"
)
"""
Interpretation qualifiers in separate OBX segments
Usage Note: This code is not intended for use in V3
artifacts. It is included in the code system to maintain alignment with the
V2 Table 0078 "Interpretation Codes."
From: http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation in v3-codesystems.xml
"""
InterpretationQualifiersInSeparateOBXSegments = ObservationInterpretationCodesCode(
"OBX"
)
"""
Interpretations of the presence and level of reactivity of the specified
component / analyte with the reagent in the performed laboratory test.
From: http://terminology.hl7.org/CodeSystem/v3-ObservationInterpretation in v3-codesystems.xml
"""
ReactivityObservationInterpretation = ObservationInterpretationCodesCode(
"ReactivityObservationInterpretation"
)
| 47.903614
| 98
| 0.743587
|
c42b9176df1cc76071d99aff4b2a7e191dd7bb76
| 347
|
py
|
Python
|
tests/test_result.py
|
efremov-dma/elasticmagic
|
90a39153289a545bd14084b9ffb17b87121da405
|
[
"Apache-2.0"
] | 25
|
2015-04-27T12:43:12.000Z
|
2021-04-06T07:11:48.000Z
|
tests/test_result.py
|
efremov-dma/elasticmagic
|
90a39153289a545bd14084b9ffb17b87121da405
|
[
"Apache-2.0"
] | 41
|
2015-09-28T10:51:59.000Z
|
2021-07-23T16:09:19.000Z
|
tests/test_result.py
|
efremov-dma/elasticmagic
|
90a39153289a545bd14084b9ffb17b87121da405
|
[
"Apache-2.0"
] | 12
|
2015-08-28T17:30:02.000Z
|
2021-12-01T15:36:23.000Z
|
from elasticmagic import agg, types
from elasticmagic.result import SearchResult
def test_search_result_with_error_and_aggregations():
raw_result = {'error': True}
res = SearchResult(
raw_result,
aggregations={'types': agg.Terms(field='type', type=types.Integer)}
)
assert res.aggregations['types'].buckets == []
| 28.916667
| 75
| 0.708934
|
5c08b7981a94fbb7cee01547e252be61efd8b4b3
| 1,863
|
py
|
Python
|
2015/solutions/24.py
|
adtok/advent-of-code
|
df1f61759bd8f3bfd7995b7e2a124d7f6e97ba01
|
[
"MIT"
] | null | null | null |
2015/solutions/24.py
|
adtok/advent-of-code
|
df1f61759bd8f3bfd7995b7e2a124d7f6e97ba01
|
[
"MIT"
] | null | null | null |
2015/solutions/24.py
|
adtok/advent-of-code
|
df1f61759bd8f3bfd7995b7e2a124d7f6e97ba01
|
[
"MIT"
] | null | null | null |
"""Advent of Code 2015: Day 24"""
import itertools
import functools
from typing import Callable, List
def parse_input(input_file: str) -> List[int]:
"""Parses a file of line-separated integers into a list"""
with open(input_file, "r") as file:
data = list(map(int, file))
return data
def product(weights: List[int]) -> int:
"""Returns the product of a list of integers"""
return functools.reduce(lambda a, b: a * b, weights, 1)
def target_sum(weights: List[int], num_groups: int) -> int:
"""Determines the weight a group of presents needs to be"""
return sum(weights) // num_groups
def minimum_quantum_entanglement(weights: List[int], num_groups: int):
"""Determines the minimum quantum entanglement"""
num_presents = len(weights)
target = target_sum(weights, num_groups)
min_qe = float("inf")
for group_size in itertools.count(1):
found = False
for combination in itertools.combinations(weights, group_size):
if sum(combination) != target:
continue
found = True
quantum_entanglement = product(combination)
min_qe = min(min_qe, quantum_entanglement)
if found or group_size == num_presents:
break
result = min_qe
return result
def part_one(input_file: str) -> int:
weights = parse_input(input_file)
result = minimum_quantum_entanglement(weights, 3)
return result
def part_two(input_file: str) -> int:
weights = parse_input(input_file)
result = minimum_quantum_entanglement(weights, 4)
return result
def solve(func: Callable[[str], int]):
input_file = "data/24.solution"
result = func(input_file)
print(f"The solution for {func.__name__!r} is {result}")
def main():
solve(part_one)
solve(part_two)
if __name__ == "__main__":
main()
| 26.239437
| 71
| 0.66452
|
dcb746f824588e134e0f944fa3ca64479a088790
| 7,537
|
py
|
Python
|
python/sqlflow_submitter/xgboost/explain.py
|
Kelang-Tian/sqlflow
|
36eab7802cff31d862c1983d23407647339fd18a
|
[
"Apache-2.0"
] | 2
|
2020-07-02T09:21:24.000Z
|
2021-07-20T03:29:12.000Z
|
python/sqlflow_submitter/xgboost/explain.py
|
Anxuy/sqlflow
|
400eff5f9d20dcf4af8ee5e940b078bfeca98197
|
[
"Apache-2.0"
] | null | null | null |
python/sqlflow_submitter/xgboost/explain.py
|
Anxuy/sqlflow
|
400eff5f9d20dcf4af8ee5e940b078bfeca98197
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import shap
import six
import xgboost as xgb
from sqlflow_submitter import db, explainer
def xgb_shap_dataset(datasource,
select,
feature_column_names,
label_spec,
feature_specs,
is_pai,
pai_explain_table,
transform_fn=None,
feature_column_code=""):
label_column_name = label_spec["feature_name"]
if is_pai:
pai_table_parts = pai_explain_table.split(".")
formatted_pai_table = "odps://%s/tables/%s" % (pai_table_parts[0],
pai_table_parts[1])
stream = db.pai_maxcompute_db_generator(formatted_pai_table,
feature_column_names,
label_column_name,
feature_specs)
selected_cols = db.pai_selected_cols(formatted_pai_table)
else:
conn = db.connect_with_data_source(datasource)
stream = db.db_generator(conn.driver, conn, select,
feature_column_names, label_spec,
feature_specs)
selected_cols = db.selected_cols(conn.driver, conn, select)
if transform_fn:
column_names = transform_fn.get_column_names()
else:
column_names = feature_column_names
# NOTE(sneaxiy): pandas.DataFrame does not support Tensor whose rank is larger than 2.
# But `INDICATOR` would generate one hot vector for each element, and pandas.DataFrame
# would not accept `INDICATOR` results as its input. In a word, we do not support
# `TO EXPLAIN` when using `INDICATOR`.
xs = pd.DataFrame(columns=column_names)
dtypes = []
i = 0
for row, label in stream():
features = db.read_features_from_row(row, selected_cols,
feature_column_names,
feature_specs)
if transform_fn:
features = transform_fn(features)
# TODO(sneaxiy): support sparse features in `TO EXPLAIN`
features = [item[0] for item in features]
xs.loc[i] = features
if i == 0:
for f in features:
if isinstance(f, np.ndarray):
if f.dtype == np.float32 or f.dtype == np.float64:
dtypes.append('float32')
elif f.dtype == np.int32 or f.dtype == np.int64:
dtypes.append('int64')
else:
raise ValueError('Not supported data type {}'.format(
f.dtype))
elif isinstance(f, (np.float32, np.float64, float)):
dtypes.append('float32')
elif isinstance(f, (np.int32, np.int64, six.integer_types)):
dtypes.append('int64')
else:
raise ValueError('Not supported data type {}'.format(
type(f)))
i += 1
# NOTE(typhoonzero): set dtype to the feature's actual type, or the dtype
# may be "object". Use below code to reproduce:
# import pandas as pd
# feature_column_names=["a", "b"]
# xs = pd.DataFrame(columns=feature_column_names)
# for i in range(10):
# xs.loc[i] = [int(j) for j in range(2)]
# print(xs.dtypes)
for dtype, name in zip(dtypes, column_names):
xs[name] = xs[name].astype(dtype)
return xs
def xgb_shap_values(x):
bst = xgb.Booster()
bst.load_model("my_model")
explainer = shap.TreeExplainer(bst)
return explainer.shap_values(x), explainer.shap_interaction_values(
x), explainer.expected_value
def explain(datasource,
select,
feature_field_meta,
feature_column_names,
label_spec,
summary_params,
result_table="",
is_pai=False,
pai_explain_table="",
hdfs_namenode_addr="",
hive_location="",
hdfs_user="",
hdfs_pass="",
oss_dest=None,
oss_ak=None,
oss_sk=None,
oss_endpoint=None,
oss_bucket_name=None,
transform_fn=None,
feature_column_code=""):
x = xgb_shap_dataset(datasource,
select,
feature_column_names,
label_spec,
feature_field_meta,
is_pai,
pai_explain_table,
transform_fn=transform_fn,
feature_column_code=feature_column_code)
shap_values, shap_interaction_values, expected_value = xgb_shap_values(x)
if result_table != "":
if is_pai:
# TODO(typhoonzero): the shape of shap_values is (3, num_samples, num_features)
# use the first dimension here, should find out how to use the other two.
write_shap_values(shap_values[0], "pai_maxcompute", None,
result_table, feature_column_names,
hdfs_namenode_addr, hive_location, hdfs_user,
hdfs_pass)
else:
conn = connect_with_data_source(datasource)
write_shap_values(shap_values[0], conn.driver, conn, result_table,
feature_column_names, hdfs_namenode_addr,
hive_location, hdfs_user, hdfs_pass)
return
if summary_params.get("plot_type") == "decision":
explainer.plot_and_save(
lambda: shap.decision_plot(expected_value,
shap_interaction_values,
x,
show=False,
feature_display_range=slice(
None, -40, -1),
alpha=1), is_pai, oss_dest, oss_ak,
oss_sk, oss_endpoint, oss_bucket_name)
else:
explainer.plot_and_save(
lambda: shap.summary_plot(
shap_values, x, show=False, **summary_params), is_pai,
oss_dest, oss_ak, oss_sk, oss_endpoint, oss_bucket_name)
def write_shap_values(shap_values, driver, conn, result_table,
feature_column_names, hdfs_namenode_addr, hive_location,
hdfs_user, hdfs_pass):
with db.buffered_db_writer(driver, conn, result_table,
feature_column_names, 100, hdfs_namenode_addr,
hive_location, hdfs_user, hdfs_pass) as w:
for row in shap_values:
w.write(list(row))
| 40.740541
| 91
| 0.551546
|
2cea00cdafc40a96424e3b0f137007f7cc06e2a2
| 1,871
|
py
|
Python
|
app/core/admin.py
|
tusharkhatiwada/swim-app-api
|
23050f86275569ab032a287c50268b6c641fbe0e
|
[
"MIT"
] | null | null | null |
app/core/admin.py
|
tusharkhatiwada/swim-app-api
|
23050f86275569ab032a287c50268b6c641fbe0e
|
[
"MIT"
] | 1
|
2019-08-20T17:40:23.000Z
|
2019-08-20T17:40:23.000Z
|
app/core/admin.py
|
tusharkhatiwada/swim-app-api
|
23050f86275569ab032a287c50268b6c641fbe0e
|
[
"MIT"
] | 2
|
2019-10-24T07:31:22.000Z
|
2019-10-25T08:39:36.000Z
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import Group
from rest_framework.authtoken.models import Token
from django.utils.translation import gettext as _
from core import models
@admin.register(models.User)
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('name',)}),
(
_('Permissions'),
{
'fields': (
'is_active',
'is_staff',
'is_superuser',
)
}
),
(_('Important dates'), {'fields': ('last_login',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
class InlineCompetition(admin.TabularInline):
model = models.Competition
extra = 3
@admin.register(models.Swimmer)
class SwimmerAdmin(admin.ModelAdmin):
inlines = [InlineCompetition]
ordering = ['id']
list_display = (
'thumbnail',
'user', 'age', 'city_of_birth', 'fathers_name', 'mothers_name',
'country', 'city', 'state', 'max_heart_rate', 'rest_heart_rate',
'height_in_cm', 'weight_in_pound', 'distance', 'stroke_rate',
'main_stroke', 'school', 'phone_no')
list_filter = ('city', 'state')
search_fields = ['city', 'school', 'fathers_name']
list_display_links = ['user']
@admin.register(models.Game)
class GamesAdmin(admin.ModelAdmin):
list_display = (
'name', 'start_date', 'end_date',
)
admin.site.site_header = 'Swim App'
admin.site.site_title = 'Swim App'
admin.site.unregister(Group)
admin.site.unregister(Token)
| 26.728571
| 72
| 0.590593
|
afd58db5d410a62f537d6b673687eec09ed7ac61
| 5,346
|
py
|
Python
|
modin/engines/dask/pandas_on_dask/frame/partition_manager.py
|
ienkovich/modin
|
b7e1188f7ba01e3a313996e092a65b63f6b76fa0
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-05-19T04:01:17.000Z
|
2021-05-19T04:01:17.000Z
|
modin/engines/dask/pandas_on_dask/frame/partition_manager.py
|
ienkovich/modin
|
b7e1188f7ba01e3a313996e092a65b63f6b76fa0
|
[
"ECL-2.0",
"Apache-2.0"
] | 57
|
2021-01-22T15:52:03.000Z
|
2021-06-12T18:22:04.000Z
|
modin/engines/dask/pandas_on_dask/frame/partition_manager.py
|
ienkovich/modin
|
b7e1188f7ba01e3a313996e092a65b63f6b76fa0
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-01-29T12:12:42.000Z
|
2022-01-29T12:12:42.000Z
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
from modin.engines.base.frame.partition_manager import BaseFrameManager
from .axis_partition import (
PandasOnDaskFrameColumnPartition,
PandasOnDaskFrameRowPartition,
)
from .partition import PandasOnDaskFramePartition
from modin.error_message import ErrorMessage
import pandas
from distributed.client import _get_global_client
import cloudpickle as pkl
def deploy_func(df, apply_func, call_queue_df=None, call_queues_other=None, *others):
if call_queue_df is not None and len(call_queue_df) > 0:
for call, kwargs in call_queue_df:
if isinstance(call, bytes):
call = pkl.loads(call)
if isinstance(kwargs, bytes):
kwargs = pkl.loads(kwargs)
df = call(df, **kwargs)
new_others = np.empty(shape=len(others), dtype=object)
for i, call_queue_other in enumerate(call_queues_other):
other = others[i]
if call_queue_other is not None and len(call_queue_other) > 0:
for call, kwargs in call_queue_other:
if isinstance(call, bytes):
call = pkl.loads(call)
if isinstance(kwargs, bytes):
kwargs = pkl.loads(kwargs)
other = call(other, **kwargs)
new_others[i] = other
if isinstance(apply_func, bytes):
apply_func = pkl.loads(apply_func)
return apply_func(df, new_others)
class DaskFrameManager(BaseFrameManager):
"""This class implements the interface in `BaseFrameManager`."""
# This object uses RayRemotePartition objects as the underlying store.
_partition_class = PandasOnDaskFramePartition
_column_partitions_class = PandasOnDaskFrameColumnPartition
_row_partition_class = PandasOnDaskFrameRowPartition
@classmethod
def get_indices(cls, axis, partitions, index_func):
"""
This gets the internal indices stored in the partitions.
Parameters
----------
axis : 0 or 1
This axis to extract the labels (0 - index, 1 - columns).
partitions : NumPy array
The array of partitions from which need to extract the labels.
index_func : callable
The function to be used to extract the function.
Returns
-------
Index
A Pandas Index object.
Notes
-----
These are the global indices of the object. This is mostly useful
when you have deleted rows/columns internally, but do not know
which ones were deleted.
"""
client = _get_global_client()
ErrorMessage.catch_bugs_and_request_email(not callable(index_func))
func = cls.preprocess_func(index_func)
if axis == 0:
# We grab the first column of blocks and extract the indices
new_idx = (
[idx.apply(func).future for idx in partitions.T[0]]
if len(partitions.T)
else []
)
else:
new_idx = (
[idx.apply(func).future for idx in partitions[0]]
if len(partitions)
else []
)
new_idx = client.gather(new_idx)
return new_idx[0].append(new_idx[1:]) if len(new_idx) else new_idx
@classmethod
def broadcast_apply(cls, axis, apply_func, left, right, other_name="r"):
def mapper(df, others):
other = pandas.concat(others, axis=axis ^ 1)
return apply_func(df, **{other_name: other})
client = _get_global_client()
return np.array(
[
[
PandasOnDaskFramePartition(
client.submit(
deploy_func,
part.future,
mapper,
part.call_queue,
[obj[col_idx].call_queue for obj in right]
if axis
else [obj.call_queue for obj in right[row_idx]],
*(
[obj[col_idx].future for obj in right]
if axis
else [obj.future for obj in right[row_idx]]
),
pure=False,
)
)
for col_idx, part in enumerate(left[row_idx])
]
for row_idx in range(len(left))
]
)
| 39.021898
| 87
| 0.589974
|
c6f72e4d7a4123c914394056c2c541f68d3c22c7
| 3,537
|
py
|
Python
|
components/google-cloud/tests/experimental/custom_job/integration/test_custom_job_complie.py
|
ryansteakley/pipelines
|
98677b2190fb327be68e4bb0d00c520593707f21
|
[
"Apache-2.0"
] | 1
|
2020-05-29T06:45:46.000Z
|
2020-05-29T06:45:46.000Z
|
components/google-cloud/tests/experimental/custom_job/integration/test_custom_job_complie.py
|
ryansteakley/pipelines
|
98677b2190fb327be68e4bb0d00c520593707f21
|
[
"Apache-2.0"
] | null | null | null |
components/google-cloud/tests/experimental/custom_job/integration/test_custom_job_complie.py
|
ryansteakley/pipelines
|
98677b2190fb327be68e4bb0d00c520593707f21
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test google-cloud-pipeline-Components to ensure the compile without error."""
import json
import os
import unittest
import kfp
from kfp import components
from kfp.v2 import compiler
from google_cloud_pipeline_components.experimental.custom_job import custom_job
class CustomJobCompileTest(unittest.TestCase):
def setUp(self):
super(CustomJobCompileTest, self).setUp()
self._project = "test_project"
self._location = "us-central1"
self._test_input_string = "test_input_string"
self._package_path = "pipeline.json"
self._container_component = components.load_component_from_text(
"name: Producer\n"
"inputs:\n"
"- {name: input_text, type: String, description: 'Represents an input parameter.'}\n"
"outputs:\n"
"- {name: output_value, type: String, description: 'Represents an output paramter.'}\n"
"implementation:\n"
" container:\n"
" image: google/cloud-sdk:latest\n"
" command:\n"
" - sh\n"
" - -c\n"
" - |\n"
" set -e -x\n"
" echo '$0, this is an output parameter' | gsutil cp - '$1'\n"
" - {inputValue: input_text}\n"
" - {outputPath: output_value}\n")
self._python_componeont = self._create_a_pytnon_based_component()
def tearDown(self):
if os.path.exists(self._package_path):
os.remove(self._package_path)
def _create_a_pytnon_based_component(self) -> callable:
"""Creates a test python based component factory."""
@kfp.v2.dsl.component
def sum_numbers(a: int, b: int) -> int:
return a + b
return sum_numbers
def test_container_based_custom_job_op_compile(self):
custom_job_op = custom_job.custom_training_job_op(
self._container_component)
@kfp.dsl.pipeline(name="training-test")
def pipeline():
custom_job_task = custom_job_op(
self._test_input_string,
project=self._project,
location=self._location)
compiler.Compiler().compile(
pipeline_func=pipeline, package_path=self._package_path)
with open(self._package_path) as f:
executor_output_json = json.load(f, strict=False)
with open(
os.path.join(
os.path.dirname(__file__),
'../testdata/custom_job_container_component_pipeline.json')
) as ef:
expected_executor_output_json = json.load(ef, strict=False)
# Ignore the kfp SDK & schema version during comparision
del executor_output_json['pipelineSpec']['sdkVersion']
del executor_output_json['pipelineSpec']['schemaVersion']
self.assertEqual(executor_output_json, expected_executor_output_json)
| 37.62766
| 99
| 0.642635
|
f7319d88946b78f26293dd74a23f8a820010f76c
| 4,775
|
py
|
Python
|
.environment/lib/python3.8/site-packages/docplex/mp/engine_factory.py
|
LuisMi1245/QPath-and-Snakes
|
48f784da67d9720c955890a28543c9863e02a455
|
[
"MIT"
] | null | null | null |
.environment/lib/python3.8/site-packages/docplex/mp/engine_factory.py
|
LuisMi1245/QPath-and-Snakes
|
48f784da67d9720c955890a28543c9863e02a455
|
[
"MIT"
] | null | null | null |
.environment/lib/python3.8/site-packages/docplex/mp/engine_factory.py
|
LuisMi1245/QPath-and-Snakes
|
48f784da67d9720c955890a28543c9863e02a455
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2015, 2016
# --------------------------------------------------------------------------
# gendoc: ignore
from docplex.mp.engine import NoSolveEngine, ZeroSolveEngine
from docplex.mp.utils import is_string
from docplex.mp.error_handler import docplex_fatal
class EngineFactory(object):
""" A factory class that manages creation of solver instances.
"""
_default_engine_map = {"nosolve": NoSolveEngine,
"zero": ZeroSolveEngine
}
def __init__(self, env=None):
self._engine_types_by_agent = self._default_engine_map.copy()
# no cplex engine type yet?
if env is not None:
self._resolve_cplex(env)
def _get_engine_type_from_agent(self, agent, default_engine, default_engine_name):
if agent is None:
return default_engine
elif is_string(agent):
agent_key = agent.lower()
engine_type = self._engine_types_by_agent.get(agent_key)
if engine_type:
return engine_type
elif 'cplex' == agent_key:
print('* warning: CPLEX runtime not found in path, using {0} instead'.format(default_engine_name))
return self._engine_types_by_agent.get(default_engine_name)
elif '.' in agent:
# assuming a qualified name, e.g. com.ibm.docplex.quantum.QuantumEngine
from docplex.mp.internal.mloader import import_class
try:
agent_class = import_class(agent)
return agent_class
except ValueError as ve:
print(
"Cannot load agent class {0}, expecting 'cplex' or valid class path, error: {1}".format(
agent, str(ve)))
raise ve
else:
docplex_fatal("Unexpected agent name: {0}, expecting 'cplex' or valid class path", agent)
else:
# try a class type
try:
# noinspection PyUnresolvedReferences
from inspect import isclass
if isclass(agent):
return agent
except ImportError:
if type(agent) == type:
return agent
# agent cannot be mapped to any class.
docplex_fatal("* unexpected agent: {0!r} -expecting 'cplex', class or class name", agent)
def _is_cplex_resolved(self):
return hasattr(self, "_cplex_engine_type")
def _resolve_cplex(self, env):
# INTERNAL
if env is None:
docplex_fatal("need an environment to resolve cplex, got None")
if not self._is_cplex_resolved():
if env.has_cplex:
env.check_cplex_version()
from docplex.mp.cplex_engine import CplexEngine
self._cplex_engine_type = CplexEngine
# noinspection PyTypeChecker
self._engine_types_by_agent["cplex"] = CplexEngine
else:
self._cplex_engine_type = None
def _ensure_cplex_resolved(self, env):
if not self._is_cplex_resolved():
self._resolve_cplex(env)
assert self._is_cplex_resolved()
def new_engine(self, agent, env, model, context=None):
self._ensure_cplex_resolved(env)
# compute a default engine and kwargs to use..
kwargs = {}
if self._cplex_engine_type:
# default is CPLEX if we have it
default_engine_type = self._cplex_engine_type
default_engine_name = 'cplex'
else:
default_engine_type = NoSolveEngine
default_engine_name = 'nosolve'
if context is not None:
kwargs['context'] = context
engine_type = self._get_engine_type_from_agent(agent=agent,
default_engine=default_engine_type,
default_engine_name=default_engine_name)
assert engine_type is not None
try:
return engine_type(model, **kwargs)
except TypeError:
docplex_fatal("agent: {0!s} failed to create instance from model, kwargs.", agent)
def extend(self, new_agent, new_engine):
# INTERNAL
assert new_engine is not None
self._engine_types_by_agent[new_agent] = new_engine
| 39.46281
| 115
| 0.553927
|
146aa1e4089e245131e6204d9835b10f12c57304
| 347
|
py
|
Python
|
XiuxiuService/AliSDK/top/api/rest/AlibabaAliqinFcIotCardofferRequest.py
|
nightHearter/XiuxiuService
|
281c2d5eef85936edcd0d9ec97c8d165078f444c
|
[
"MIT"
] | null | null | null |
XiuxiuService/AliSDK/top/api/rest/AlibabaAliqinFcIotCardofferRequest.py
|
nightHearter/XiuxiuService
|
281c2d5eef85936edcd0d9ec97c8d165078f444c
|
[
"MIT"
] | null | null | null |
XiuxiuService/AliSDK/top/api/rest/AlibabaAliqinFcIotCardofferRequest.py
|
nightHearter/XiuxiuService
|
281c2d5eef85936edcd0d9ec97c8d165078f444c
|
[
"MIT"
] | null | null | null |
'''
Created by auto_sdk on 2017.04.21
'''
from top.api.base import RestApi
class AlibabaAliqinFcIotCardofferRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.billreal = None
self.billsource = None
def getapiname(self):
return 'alibaba.aliqin.fc.iot.cardoffer'
| 26.692308
| 56
| 0.731988
|
243832ce8da6f53e18b0f5f7c2b15bd3ceb02ebc
| 1,268
|
py
|
Python
|
numba/tests/test_unsigned_arith.py
|
liuzhenhai/numba
|
855a2b262ae3d82bd6ac1c3e1c0acb36ee2e2acf
|
[
"BSD-2-Clause"
] | 1
|
2015-01-29T06:52:36.000Z
|
2015-01-29T06:52:36.000Z
|
numba/tests/test_unsigned_arith.py
|
shiquanwang/numba
|
a41c85fdd7d6abf8ea1ebe9116939ddc2217193b
|
[
"BSD-2-Clause"
] | null | null | null |
numba/tests/test_unsigned_arith.py
|
shiquanwang/numba
|
a41c85fdd7d6abf8ea1ebe9116939ddc2217193b
|
[
"BSD-2-Clause"
] | null | null | null |
import numpy as np
import unittest
from numba import void, int32, uint32, jit, int64
@jit(void(uint32[:], uint32, uint32))
def prng(X, A, C):
for i in range(X.shape[0]):
for j in range(100):
v = (A * X[i] + C)
X[i] = v & 0xffffffff
@jit(uint32())
def unsigned_literal():
return abs(0xFFFFFFFF)
@jit(int64())
def unsigned_literal_64():
return 0x100000000
@jit(int64(int32))
def constant_int_add(a):
return 0xffffffff + a
class Test(unittest.TestCase):
def test_prng(self):
N = 100
A = 1664525
C = 1013904223
X0 = np.arange(N, dtype=np.uint32)
X1 = X0.copy()
prng.py_func(X0, A, C)
prng(X1, A, C)
self.assertTrue(np.all(X1 >= 0))
self.assertTrue(np.all(X0 == X1))
def test_unsigned_literal(self):
got = unsigned_literal()
expect = abs(0xFFFFFFFF)
self.assertEqual(expect, got)
def test_unsigned_literal_64(self):
got = unsigned_literal_64()
expect = 0x100000000
self.assertEqual(expect, got)
def test_constant_int_add(self):
got = constant_int_add(1)
expect = 0xffffffff + 1
self.assertEqual(expect, got)
if __name__ == '__main__':
unittest.main()
| 23.481481
| 49
| 0.601735
|
6d2c44d8c0e7d6cdfbfa8ab703f917b2a8871b7f
| 337
|
py
|
Python
|
coursework2/task1/mapper.py
|
foundnet/UOE_EP_coursework1
|
25ab8ff2beaa4fbde5a0d4519abb84d8e43cf9d8
|
[
"Apache-2.0"
] | null | null | null |
coursework2/task1/mapper.py
|
foundnet/UOE_EP_coursework1
|
25ab8ff2beaa4fbde5a0d4519abb84d8e43cf9d8
|
[
"Apache-2.0"
] | null | null | null |
coursework2/task1/mapper.py
|
foundnet/UOE_EP_coursework1
|
25ab8ff2beaa4fbde5a0d4519abb84d8e43cf9d8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import sys
import os
file_name = os.environ["mapreduce_map_input_file"]
file_name = file_name[file_name.rfind("/")+1:]
for line in sys.stdin:
line = line.strip()
token_list = line.split(" ")
for item in token_list:
if item.strip() != "":
print item.strip() + "\t" + file_name + "\t1"
| 22.466667
| 57
| 0.620178
|
4b3645c2bd6c51656ff5aa50a95b177164be0c94
| 47
|
py
|
Python
|
run.py
|
mdaue/sanic_scaffolding
|
70a5f3423f92d2377cddea04836f5107d0503e77
|
[
"MIT"
] | 3
|
2017-04-06T14:06:49.000Z
|
2020-02-23T21:10:57.000Z
|
run.py
|
mdaue/sanic_scaffolding
|
70a5f3423f92d2377cddea04836f5107d0503e77
|
[
"MIT"
] | null | null | null |
run.py
|
mdaue/sanic_scaffolding
|
70a5f3423f92d2377cddea04836f5107d0503e77
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sanic_scaffolding
| 15.666667
| 24
| 0.808511
|
efe54d25a0348dae43c095647fa90b313a6c8c60
| 4,921
|
py
|
Python
|
third_party/nucleus/io/fastq.py
|
ruif2009/deepvariant
|
c7fd07016577c253f81ef253aed65c416e4c0ef7
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/nucleus/io/fastq.py
|
ruif2009/deepvariant
|
c7fd07016577c253f81ef253aed65c416e4c0ef7
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/nucleus/io/fastq.py
|
ruif2009/deepvariant
|
c7fd07016577c253f81ef253aed65c416e4c0ef7
|
[
"BSD-3-Clause"
] | 1
|
2022-02-03T21:54:57.000Z
|
2022-02-03T21:54:57.000Z
|
# Copyright 2018 Google Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Class for reading FASTQ files.
API for reading:
with FastqReader(input_path) as reader:
for record in reader:
print(record)
API for writing:
with FastqWriter(output_path) as writer:
for record in records:
writer.write(record)
where `record` is a nucleus.genomics.v1.FastqRecord protocol buffer.
If the path contains '.tfrecord' as an extension, a TFRecord file is
assumed. Otherwise, it is treated as a true FASTQ file. In either case,
an extension of '.gz' will cause the file to be treated as compressed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from third_party.nucleus.io import genomics_reader
from third_party.nucleus.io import genomics_writer
from third_party.nucleus.io.python import fastq_reader
from third_party.nucleus.io.python import fastq_writer
from third_party.nucleus.protos import fastq_pb2
class NativeFastqReader(genomics_reader.GenomicsReader):
"""Class for reading from native FASTQ files.
Most users will want to use FastqReader instead, because it dynamically
dispatches between reading native FASTQ files and TFRecord files based on the
filename's extension.
"""
def __init__(self, input_path):
"""Initializes a NativeFastqReader.
Args:
input_path: string. A path to a resource containing FASTQ records.
"""
super(NativeFastqReader, self).__init__()
fastq_path = input_path.encode('utf8')
if fastq_path.endswith('.gz'):
options = fastq_pb2.FastqReaderOptions(
compression_type=fastq_pb2.FastqReaderOptions.GZIP)
else:
options = fastq_pb2.FastqReaderOptions()
self._reader = fastq_reader.FastqReader.from_file(fastq_path, options)
self.header = None
def query(self):
raise NotImplementedError('Can not query a FASTQ file')
def iterate(self):
"""Returns an iterable of FastqRecord protos in the file."""
return self._reader.iterate()
def __exit__(self, exit_type, exit_value, exit_traceback):
self._reader.__exit__(exit_type, exit_value, exit_traceback)
class FastqReader(genomics_reader.DispatchingGenomicsReader):
"""Class for reading FastqRecord protos from FASTQ or TFRecord files."""
def _native_reader(self, input_path, **kwargs):
return NativeFastqReader(input_path, **kwargs)
def _record_proto(self):
return fastq_pb2.FastqRecord
class NativeFastqWriter(genomics_writer.GenomicsWriter):
"""Class for writing to native FASTQ files.
Most users will want FastqWriter, which will write to either native FASTQ
files or TFRecord files, based on the output filename's extension.
"""
def __init__(self, output_path):
"""Initializer for NativeFastqWriter.
Args:
output_path: str. The path to which to write the FASTQ file.
"""
super(NativeFastqWriter, self).__init__()
writer_options = fastq_pb2.FastqWriterOptions()
self._writer = fastq_writer.FastqWriter.to_file(output_path, writer_options)
def write(self, proto):
self._writer.write(proto)
def __exit__(self, exit_type, exit_value, exit_traceback):
self._writer.__exit__(exit_type, exit_value, exit_traceback)
class FastqWriter(genomics_writer.DispatchingGenomicsWriter):
"""Class for writing FastqRecord protos to FASTQ or TFRecord files."""
def _native_writer(self, output_path):
return NativeFastqWriter(output_path)
| 36.451852
| 80
| 0.766917
|
357b7489b9159212c268319ba5d1ecadbc52ffa7
| 836
|
py
|
Python
|
1980.py
|
ShawonBarman/URI-Online-judge-Ad-Hoc-level-problem-solution-in-python
|
9a0f0ad5efd4a9e73589c357ab4b34b7c73a11da
|
[
"MIT"
] | 1
|
2022-01-14T08:45:32.000Z
|
2022-01-14T08:45:32.000Z
|
1980.py
|
ShawonBarman/URI-Online-judge-Ad-Hoc-level-problem-solution-in-python
|
9a0f0ad5efd4a9e73589c357ab4b34b7c73a11da
|
[
"MIT"
] | null | null | null |
1980.py
|
ShawonBarman/URI-Online-judge-Ad-Hoc-level-problem-solution-in-python
|
9a0f0ad5efd4a9e73589c357ab4b34b7c73a11da
|
[
"MIT"
] | null | null | null |
def factorial(num):
if num == 0 or num == 1:
return 1
elif num == 2:
return 2
elif num == 3:
return 6
elif num == 4:
return 24
elif num == 5:
return 120
elif num == 6:
return 720
elif num == 7:
return 5040
elif num == 8:
return 40320
elif num == 9:
return 362880
elif num == 10:
return 3628800
elif num == 11:
return 39916800
elif num == 12:
return 479001600
elif num == 13:
return 6227020800
elif num == 14:
return 87178291200
elif num == 15:
return 1307674368000
s = input()
while s != "0":
length = len(s)
z = factorial(length)
print(z)
try:
s = input()
except EOFError:
break
| 20.390244
| 29
| 0.4689
|
0c550b8eec4852984083ef098b534c0aa9c89eae
| 5,860
|
py
|
Python
|
single_stage_detector/ssd/model/anchor_utils.py
|
matthew-frank/training
|
dc72f48cc5279f80e8e1736d0756c83f9e3cf116
|
[
"Apache-2.0"
] | 567
|
2018-09-13T05:07:49.000Z
|
2020-11-23T11:52:11.000Z
|
single_stage_detector/ssd/model/anchor_utils.py
|
matthew-frank/training
|
dc72f48cc5279f80e8e1736d0756c83f9e3cf116
|
[
"Apache-2.0"
] | 222
|
2018-09-14T10:15:39.000Z
|
2020-11-20T22:21:09.000Z
|
single_stage_detector/ssd/model/anchor_utils.py
|
matthew-frank/training
|
dc72f48cc5279f80e8e1736d0756c83f9e3cf116
|
[
"Apache-2.0"
] | 279
|
2018-09-16T12:40:29.000Z
|
2020-11-17T14:22:52.000Z
|
import math
import torch
from torch import nn, Tensor
from typing import List, Optional
from model.image_list import ImageList
class AnchorGenerator(nn.Module):
"""
Module that generates anchors for a set of feature maps and
image sizes.
The module support computing anchors at multiple sizes and aspect ratios
per feature map. This module assumes aspect ratio = height / width for
each anchor.
sizes and aspect_ratios should have the same number of elements, and it should
correspond to the number of feature maps.
sizes[i] and aspect_ratios[i] can have an arbitrary number of elements,
and AnchorGenerator will output a set of sizes[i] * aspect_ratios[i] anchors
per spatial location for feature map i.
Args:
sizes (Tuple[Tuple[int]]):
aspect_ratios (Tuple[Tuple[float]]):
"""
__annotations__ = {
"cell_anchors": List[torch.Tensor],
}
def __init__(
self,
sizes=((128, 256, 512),),
aspect_ratios=((0.5, 1.0, 2.0),),
):
super(AnchorGenerator, self).__init__()
if not isinstance(sizes[0], (list, tuple)):
# TODO change this
sizes = tuple((s,) for s in sizes)
if not isinstance(aspect_ratios[0], (list, tuple)):
aspect_ratios = (aspect_ratios,) * len(sizes)
assert len(sizes) == len(aspect_ratios)
self.sizes = sizes
self.aspect_ratios = aspect_ratios
self.cell_anchors = [self.generate_anchors(size, aspect_ratio)
for size, aspect_ratio in zip(sizes, aspect_ratios)]
# TODO: https://github.com/pytorch/pytorch/issues/26792
# For every (aspect_ratios, scales) combination, output a zero-centered anchor with those values.
# (scales, aspect_ratios) are usually an element of zip(self.scales, self.aspect_ratios)
# This method assumes aspect ratio = height / width for an anchor.
def generate_anchors(self, scales: List[int], aspect_ratios: List[float], dtype: torch.dtype = torch.float32,
device: torch.device = torch.device("cpu")):
scales = torch.as_tensor(scales, dtype=dtype, device=device)
aspect_ratios = torch.as_tensor(aspect_ratios, dtype=dtype, device=device)
h_ratios = torch.sqrt(aspect_ratios)
w_ratios = 1 / h_ratios
ws = (w_ratios[:, None] * scales[None, :]).view(-1)
hs = (h_ratios[:, None] * scales[None, :]).view(-1)
base_anchors = torch.stack([-ws, -hs, ws, hs], dim=1) / 2
return base_anchors.round()
def set_cell_anchors(self, dtype: torch.dtype, device: torch.device):
self.cell_anchors = [cell_anchor.to(dtype=dtype, device=device)
for cell_anchor in self.cell_anchors]
def num_anchors_per_location(self):
return [len(s) * len(a) for s, a in zip(self.sizes, self.aspect_ratios)]
# For every combination of (a, (g, s), i) in (self.cell_anchors, zip(grid_sizes, strides), 0:2),
# output g[i] anchors that are s[i] distance apart in direction i, with the same dimensions as a.
def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) -> List[Tensor]:
anchors = []
cell_anchors = self.cell_anchors
assert cell_anchors is not None
if not (len(grid_sizes) == len(strides) == len(cell_anchors)):
raise ValueError("Anchors should be Tuple[Tuple[int]] because each feature "
"map could potentially have different sizes and aspect ratios. "
"There needs to be a match between the number of "
"feature maps passed and the number of sizes / aspect ratios specified.")
for size, stride, base_anchors in zip(
grid_sizes, strides, cell_anchors
):
grid_height, grid_width = size
stride_height, stride_width = stride
device = base_anchors.device
# For output anchor, compute [x_center, y_center, x_center, y_center]
shifts_x = torch.arange(
0, grid_width, dtype=torch.float32, device=device
) * stride_width
shifts_y = torch.arange(
0, grid_height, dtype=torch.float32, device=device
) * stride_height
shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
shift_x = shift_x.reshape(-1)
shift_y = shift_y.reshape(-1)
shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1)
# For every (base anchor, output anchor) pair,
# offset each zero-centered base anchor by the center of the output anchor.
anchors.append(
(shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)
)
return anchors
def forward(self, image_list: ImageList, feature_maps: List[Tensor]) -> List[Tensor]:
grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]
image_size = image_list.tensors.shape[-2:]
dtype, device = feature_maps[0].dtype, feature_maps[0].device
strides = [[torch.tensor(image_size[0] // g[0], dtype=torch.int64, device=device),
torch.tensor(image_size[1] // g[1], dtype=torch.int64, device=device)] for g in grid_sizes]
self.set_cell_anchors(dtype, device)
anchors_over_all_feature_maps = self.grid_anchors(grid_sizes, strides)
anchors: List[List[torch.Tensor]] = []
for _ in range(len(image_list.image_sizes)):
anchors_in_image = [anchors_per_feature_map for anchors_per_feature_map in anchors_over_all_feature_maps]
anchors.append(anchors_in_image)
anchors = [torch.cat(anchors_per_image) for anchors_per_image in anchors]
return anchors
| 44.393939
| 117
| 0.636519
|
763ab4ee80df1b0a20c7881ccd062ebbae8175e8
| 250
|
py
|
Python
|
custom/up_nrhm/urls.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2020-05-05T13:10:01.000Z
|
2020-05-05T13:10:01.000Z
|
custom/up_nrhm/urls.py
|
kkrampa/commcare-hq
|
d64d7cad98b240325ad669ccc7effb07721b4d44
|
[
"BSD-3-Clause"
] | 1
|
2019-12-09T14:00:14.000Z
|
2019-12-09T14:00:14.000Z
|
custom/up_nrhm/urls.py
|
MaciejChoromanski/commcare-hq
|
fd7f65362d56d73b75a2c20d2afeabbc70876867
|
[
"BSD-3-Clause"
] | 5
|
2015-11-30T13:12:45.000Z
|
2019-07-01T19:27:07.000Z
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import url
from custom.up_nrhm.views import asha_af_report
urlpatterns = [
url(r'^asha_af_report/$', asha_af_report, name='asha_af_report'),
]
| 25
| 69
| 0.804
|
26c2b6ded4fa12dcb8e5522435547b7263d92309
| 179
|
py
|
Python
|
configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 1
|
2022-03-15T07:36:04.000Z
|
2022-03-15T07:36:04.000Z
|
configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 5
|
2022-03-02T02:58:56.000Z
|
2022-03-23T05:51:53.000Z
|
configs/mobilenet_v2/mobilenet_v2_b32x8_imagenet.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 1
|
2021-12-28T18:30:40.000Z
|
2021-12-28T18:30:40.000Z
|
_base_ = 'mobilenet-v2_8xb32_in1k.py'
_deprecation_ = dict(
expected='mobilenet-v2_8xb32_in1k.py',
reference='https://github.com/open-mmlab/mmclassification/pull/508',
)
| 25.571429
| 72
| 0.748603
|
aff762a7edcf62c15bcc1704da5489fd1a6c3cc7
| 2,535
|
py
|
Python
|
day8/day8.py
|
BLannoo/Advent-of-Code-2020
|
a4efa637e5885a3f849b8bad13c7115c82bdec97
|
[
"MIT"
] | null | null | null |
day8/day8.py
|
BLannoo/Advent-of-Code-2020
|
a4efa637e5885a3f849b8bad13c7115c82bdec97
|
[
"MIT"
] | null | null | null |
day8/day8.py
|
BLannoo/Advent-of-Code-2020
|
a4efa637e5885a3f849b8bad13c7115c82bdec97
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
def test_silver_example():
assert 5 == run_till_loop(read_instructions("example.txt"))[0]
def test_silver():
assert 1548 == run_till_loop(read_instructions("input.txt"))[0]
def test_gold_example():
assert 8 == run_till_loop(read_instructions("example.txt"), True)[0]
def test_gold():
assert 1375 == run_till_loop(read_instructions("input.txt"), True)[0]
@dataclass(frozen=True)
class Instruction:
description: str
def execute(self):
if self.operation() == "nop":
return 1, 0
if self.operation() == "acc":
return 1, self.argument()
assert self.operation() == "jmp"
return self.argument(), 0
def operation(self):
return self.description[:3]
def argument(self):
return int(self.description[3:])
def read_instructions(file_name: str):
with open(file_name) as file:
instructions = [
Instruction(line)
for line in file.read().split("\n")
]
return instructions
def run_till_loop(instructions, branching=False, next_pointer=0):
pointers = [next_pointer]
accumulator = 0
while pointers[-1] not in pointers[:-2]:
pointer_increment, accumulator_increment = instructions[pointers[-1]].execute()
next_pointer = pointers[-1] + pointer_increment
pointers.append(next_pointer)
accumulator += accumulator_increment
if next_pointer == len(instructions):
return accumulator, next_pointer
next_instruction = instructions[next_pointer]
if branching and "acc" != next_instruction.operation():
corrected_instructions = correct_instructions(instructions, next_pointer)
branch_accumulator, final_pointer = run_till_loop(corrected_instructions, False, next_pointer)
if final_pointer == len(instructions):
return branch_accumulator + accumulator, final_pointer
return accumulator, next_pointer
def correct_instructions(instructions, next_pointer):
next_instruction = instructions[next_pointer]
corrected_instructions = instructions.copy()
if "nop" == next_instruction.operation():
correction = "jmp"
elif "jmp" == next_instruction.operation():
correction = "nop"
else:
raise Exception(f"Invalid operation {next_instruction.operation()}")
corrected_instructions[next_pointer] = Instruction(f"{correction} {next_instruction.argument()}")
return corrected_instructions
| 31.296296
| 106
| 0.67929
|
71d0832275c7552dd377158c32acab0a02ee0a0a
| 104,399
|
py
|
Python
|
src/com/dtmilano/android/viewclient.py
|
Fuzion24/AndroidViewClient
|
b1dad1f059cb564aca8e55247dc16d55a3248ee9
|
[
"Apache-2.0"
] | 3
|
2016-06-22T17:24:57.000Z
|
2020-05-29T20:08:18.000Z
|
src/com/dtmilano/android/viewclient.py
|
Fuzion24/AndroidViewClient
|
b1dad1f059cb564aca8e55247dc16d55a3248ee9
|
[
"Apache-2.0"
] | null | null | null |
src/com/dtmilano/android/viewclient.py
|
Fuzion24/AndroidViewClient
|
b1dad1f059cb564aca8e55247dc16d55a3248ee9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Copyright (C) 2012-2014 Diego Torres Milano
Created on Feb 2, 2012
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: Diego Torres Milano
'''
__version__ = '8.6.0'
import sys
import warnings
if sys.executable:
if 'monkeyrunner' in sys.executable:
warnings.warn(
'''
You should use a 'python' interpreter, not 'monkeyrunner' for this module
''', RuntimeWarning)
import subprocess
import re
import socket
import os
import types
import time
import signal
import copy
import pickle
import platform
import xml.parsers.expat
import unittest
from com.dtmilano.android.adb import adbclient
DEBUG = False
DEBUG_DEVICE = DEBUG and False
DEBUG_RECEIVED = DEBUG and False
DEBUG_TREE = DEBUG and False
DEBUG_GETATTR = DEBUG and False
DEBUG_CALL = DEBUG and False
DEBUG_COORDS = DEBUG and False
DEBUG_TOUCH = DEBUG and False
DEBUG_STATUSBAR = DEBUG and False
DEBUG_WINDOWS = DEBUG and False
DEBUG_BOUNDS = DEBUG and False
DEBUG_DISTANCE = DEBUG and False
WARNINGS = False
VIEW_SERVER_HOST = 'localhost'
VIEW_SERVER_PORT = 4939
ADB_DEFAULT_PORT = 5555
OFFSET = 25
''' This assumes the smallest touchable view on the screen is approximately 50px x 50px
and touches it at M{(x+OFFSET, y+OFFSET)} '''
USE_ADB_CLIENT_TO_GET_BUILD_PROPERTIES = True
''' Use C{AdbClient} to obtain the needed properties. If this is
C{False} then C{adb shell getprop} is used '''
SKIP_CERTAIN_CLASSES_IN_GET_XY_ENABLED = False
''' Skips some classes related with the Action Bar and the PhoneWindow$DecorView in the
coordinates calculation
@see: L{View.getXY()} '''
VIEW_CLIENT_TOUCH_WORKAROUND_ENABLED = False
''' Under some conditions the touch event should be longer [t(DOWN) << t(UP)]. C{True} enables a
workaround to delay the events.'''
# some device properties
VERSION_SDK_PROPERTY = 'ro.build.version.sdk'
VERSION_RELEASE_PROPERTY = 'ro.build.version.release'
# some constants for the attributes
ID_PROPERTY = 'mID'
ID_PROPERTY_UI_AUTOMATOR = 'uniqueId'
TEXT_PROPERTY = 'text:mText'
TEXT_PROPERTY_API_10 = 'mText'
TEXT_PROPERTY_UI_AUTOMATOR = 'text'
WS = u"\xfe" # the whitespace replacement char for TEXT_PROPERTY
LEFT_PROPERTY = 'layout:mLeft'
LEFT_PROPERTY_API_8 = 'mLeft'
TOP_PROPERTY = 'layout:mTop'
TOP_PROPERTY_API_8 = 'mTop'
WIDTH_PROPERTY = 'layout:getWidth()'
WIDTH_PROPERTY_API_8 = 'getWidth()'
HEIGHT_PROPERTY = 'layout:getHeight()'
HEIGHT_PROPERTY_API_8 = 'getHeight()'
GET_VISIBILITY_PROPERTY = 'getVisibility()'
LAYOUT_TOP_MARGIN_PROPERTY = 'layout:layout_topMargin'
IS_FOCUSED_PROPERTY_UI_AUTOMATOR = 'focused'
IS_FOCUSED_PROPERTY = 'focus:isFocused()'
# visibility
VISIBLE = 0x0
INVISIBLE = 0x4
GONE = 0x8
RegexType = type(re.compile(''))
IP_RE = re.compile('^(\d{1,3}\.){3}\d{1,3}$')
ID_RE = re.compile('id/([^/]*)(/(\d+))?')
def _nd(name):
'''
@return: Returns a named decimal regex
'''
return '(?P<%s>\d+)' % name
def _nh(name):
'''
@return: Returns a named hex regex
'''
return '(?P<%s>[0-9a-f]+)' % name
def _ns(name, greedy=False):
'''
NOTICE: this is using a non-greedy (or minimal) regex
@type name: str
@param name: the name used to tag the expression
@type greedy: bool
@param greedy: Whether the regex is greedy or not
@return: Returns a named string regex (only non-whitespace characters allowed)
'''
return '(?P<%s>\S+%s)' % (name, '' if greedy else '?')
class Window:
'''
Window class
'''
def __init__(self, num, winId, activity, wvx, wvy, wvw, wvh, px, py, visibility):
'''
Constructor
@type num: int
@param num: Ordering number in Window Manager
@type winId: str
@param winId: the window ID
@type activity: str
@param activity: the activity (or sometimes other component) owning the window
@type wvx: int
@param wvx: window's virtual X
@type wvy: int
@param wvy: window's virtual Y
@type wvw: int
@param wvw: window's virtual width
@type wvh: int
@param wvh: window's virtual height
@type px: int
@param px: parent's X
@type py: int
@param py: parent's Y
@type visibility: int
@param visibility: visibility of the window
'''
if DEBUG_COORDS: print >> sys.stderr, "Window(%d, %s, %s, %d, %d, %d, %d, %d, %d, %d)" % \
(num, winId, activity, wvx, wvy, wvw, wvh, px, py, visibility)
self.num = num
self.winId = winId
self.activity = activity
self.wvx = wvx
self.wvy = wvy
self.wvw = wvw
self.wvh = wvh
self.px = px
self.py = py
self.visibility = visibility
def __str__(self):
return "Window(%d, wid=%s, a=%s, x=%d, y=%d, w=%d, h=%d, px=%d, py=%d, v=%d)" % \
(self.num, self.winId, self.activity, self.wvx, self.wvy, self.wvw, self.wvh, self.px, self.py, self.visibility)
class ViewNotFoundException(Exception):
'''
ViewNotFoundException is raised when a View is not found.
'''
def __init__(self, attr, value, root):
if isinstance(value, RegexType):
msg = "Couldn't find View with %s that matches '%s' in tree with root=%s" % (attr, value.pattern, root)
else:
msg = "Couldn't find View with %s='%s' in tree with root=%s" % (attr, value, root)
super(Exception, self).__init__(msg)
class View:
'''
View class
'''
@staticmethod
def factory(arg1, arg2, version=-1, forceviewserveruse=False):
'''
View factory
@type arg1: ClassType or dict
@type arg2: View instance or AdbClient
'''
if type(arg1) == types.ClassType:
cls = arg1
attrs = None
else:
cls = None
attrs = arg1
if isinstance(arg2, View):
view = arg2
device = None
else:
device = arg2
view = None
if attrs and attrs.has_key('class'):
clazz = attrs['class']
if clazz == 'android.widget.TextView':
return TextView(attrs, device, version, forceviewserveruse)
elif clazz == 'android.widget.EditText':
return EditText(attrs, device, version, forceviewserveruse)
else:
return View(attrs, device, version, forceviewserveruse)
elif cls:
if view:
return cls.__copy(view)
else:
return cls(attrs, device, version, forceviewserveruse)
elif view:
return copy.copy(view)
else:
return View(attrs, device, version, forceviewserveruse)
@classmethod
def __copy(cls, view):
'''
Copy constructor
'''
return cls(view.map, view.device, view.version, view.forceviewserveruse)
def __init__(self, map, device, version=-1, forceviewserveruse=False):
'''
Constructor
@type map: map
@param map: the map containing the (attribute, value) pairs
@type device: MonkeyDevice
@param device: the device containing this View
@type version: int
@param version: the Android SDK version number of the platform where this View belongs. If
this is C{-1} then the Android SDK version will be obtained in this
constructor.
@type forceviewserveruse: boolean
@param forceviewserveruse: Force the use of C{ViewServer} even if the conditions were given
to use C{UiAutomator}.
'''
self.map = map
''' The map that contains the C{attr},C{value} pairs '''
self.device = device
''' The MonkeyDevice '''
self.children = []
''' The children of this View '''
self.parent = None
''' The parent of this View '''
self.windows = {}
self.currentFocus = None
''' The current focus '''
self.build = {}
''' Build properties '''
self.version = version
''' API version number '''
self.forceviewserveruse = forceviewserveruse
''' Force ViewServer use '''
if version != -1:
self.build[VERSION_SDK_PROPERTY] = version
else:
try:
if USE_ADB_CLIENT_TO_GET_BUILD_PROPERTIES:
self.build[VERSION_SDK_PROPERTY] = int(device.getProperty(VERSION_SDK_PROPERTY))
else:
self.build[VERSION_SDK_PROPERTY] = int(device.shell('getprop ' + VERSION_SDK_PROPERTY)[:-2])
except:
self.build[VERSION_SDK_PROPERTY] = -1
version = self.build[VERSION_SDK_PROPERTY]
self.useUiAutomator = (version >= 16) and not forceviewserveruse
''' Whether to use UIAutomator or ViewServer '''
self.idProperty = None
''' The id property depending on the View attribute format '''
self.textProperty = None
''' The text property depending on the View attribute format '''
self.leftProperty = None
''' The left property depending on the View attribute format '''
self.topProperty = None
''' The top property depending on the View attribute format '''
self.widthProperty = None
''' The width property depending on the View attribute format '''
self.heightProperty = None
''' The height property depending on the View attribute format '''
self.isFocusedProperty = None
''' The focused property depending on the View attribute format '''
if version >= 16 and self.useUiAutomator:
self.idProperty = ID_PROPERTY_UI_AUTOMATOR
self.textProperty = TEXT_PROPERTY_UI_AUTOMATOR
self.leftProperty = LEFT_PROPERTY
self.topProperty = TOP_PROPERTY
self.widthProperty = WIDTH_PROPERTY
self.heightProperty = HEIGHT_PROPERTY
self.isFocusedProperty = IS_FOCUSED_PROPERTY_UI_AUTOMATOR
elif version > 10 and (version < 16 or self.useUiAutomator):
self.idProperty = ID_PROPERTY
self.textProperty = TEXT_PROPERTY
self.leftProperty = LEFT_PROPERTY
self.topProperty = TOP_PROPERTY
self.widthProperty = WIDTH_PROPERTY
self.heightProperty = HEIGHT_PROPERTY
self.isFocusedProperty = IS_FOCUSED_PROPERTY
elif version == 10:
self.idProperty = ID_PROPERTY
self.textProperty = TEXT_PROPERTY_API_10
self.leftProperty = LEFT_PROPERTY
self.topProperty = TOP_PROPERTY
self.widthProperty = WIDTH_PROPERTY
self.heightProperty = HEIGHT_PROPERTY
self.isFocusedProperty = IS_FOCUSED_PROPERTY
elif version >= 7 and version < 10:
self.idProperty = ID_PROPERTY
self.textProperty = TEXT_PROPERTY_API_10
self.leftProperty = LEFT_PROPERTY_API_8
self.topProperty = TOP_PROPERTY_API_8
self.widthProperty = WIDTH_PROPERTY_API_8
self.heightProperty = HEIGHT_PROPERTY_API_8
self.isFocusedProperty = IS_FOCUSED_PROPERTY
elif version > 0 and version < 7:
self.idProperty = ID_PROPERTY
self.textProperty = TEXT_PROPERTY_API_10
self.leftProperty = LEFT_PROPERTY
self.topProperty = TOP_PROPERTY
self.widthProperty = WIDTH_PROPERTY
self.heightProperty = HEIGHT_PROPERTY
self.isFocusedProperty = IS_FOCUSED_PROPERTY
elif version == -1:
self.idProperty = ID_PROPERTY
self.textProperty = TEXT_PROPERTY
self.leftProperty = LEFT_PROPERTY
self.topProperty = TOP_PROPERTY
self.widthProperty = WIDTH_PROPERTY
self.heightProperty = HEIGHT_PROPERTY
self.isFocusedProperty = IS_FOCUSED_PROPERTY
else:
self.idProperty = ID_PROPERTY
self.textProperty = TEXT_PROPERTY
self.leftProperty = LEFT_PROPERTY
self.topProperty = TOP_PROPERTY
self.widthProperty = WIDTH_PROPERTY
self.heightProperty = HEIGHT_PROPERTY
self.isFocusedProperty = IS_FOCUSED_PROPERTY
def __getitem__(self, key):
return self.map[key]
def __getattr__(self, name):
if DEBUG_GETATTR:
print >>sys.stderr, "__getattr__(%s) version: %d" % (name, self.build[VERSION_SDK_PROPERTY])
# NOTE:
# I should try to see if 'name' is a defined method
# but it seems that if I call locals() here an infinite loop is entered
if self.map.has_key(name):
r = self.map[name]
elif self.map.has_key(name + '()'):
# the method names are stored in the map with their trailing '()'
r = self.map[name + '()']
elif name.count("_") > 0:
mangledList = self.allPossibleNamesWithColon(name)
mangledName = self.intersection(mangledList, self.map.keys())
if len(mangledName) > 0 and self.map.has_key(mangledName[0]):
r = self.map[mangledName[0]]
else:
# Default behavior
raise AttributeError, name
else:
# try removing 'is' prefix
if DEBUG_GETATTR:
print >> sys.stderr, " __getattr__: trying without 'is' prefix"
suffix = name[2:].lower()
if self.map.has_key(suffix):
r = self.map[suffix]
else:
# Default behavior
raise AttributeError, name
# if the method name starts with 'is' let's assume its return value is boolean
# if name[:2] == 'is':
# r = True if r == 'true' else False
if r == 'true':
r = True
elif r == 'false':
r = False
# this should not cached in some way
def innerMethod():
if DEBUG_GETATTR:
print >>sys.stderr, "innerMethod: %s returning %s" % (innerMethod.__name__, r)
return r
innerMethod.__name__ = name
# this should work, but then there's problems with the arguments of innerMethod
# even if innerMethod(self) is added
#setattr(View, innerMethod.__name__, innerMethod)
#setattr(self, innerMethod.__name__, innerMethod)
return innerMethod
def __call__(self, *args, **kwargs):
if DEBUG_CALL:
print >>sys.stderr, "__call__(%s)" % (args if args else None)
def getClass(self):
'''
Gets the L{View} class
@return: the L{View} class or C{None} if not defined
'''
try:
return self.map['class']
except:
return None
def getId(self):
'''
Gets the L{View} Id
@return: the L{View} C{Id} or C{None} if not defined
@see: L{getUniqueId()}
'''
try:
return self.map['resource-id']
except:
pass
try:
return self.map[self.idProperty]
except:
return None
def getContentDescription(self):
'''
Gets the content description.
'''
try:
return self.map['content-desc']
except:
return None
def getParent(self):
'''
Gets the parent.
'''
return self.parent
def getText(self):
'''
Gets the text attribute.
@return: the text attribute or C{None} if not defined
'''
try:
return self.map[self.textProperty]
except Exception:
return None
def getHeight(self):
'''
Gets the height.
'''
if self.useUiAutomator:
return self.map['bounds'][1][1] - self.map['bounds'][0][1]
else:
try:
return int(self.map[self.heightProperty])
except:
return 0
def getWidth(self):
'''
Gets the width.
'''
if self.useUiAutomator:
return self.map['bounds'][1][0] - self.map['bounds'][0][0]
else:
try:
return int(self.map[self.widthProperty])
except:
return 0
def getUniqueId(self):
'''
Gets the unique Id of this View.
@see: L{ViewClient.__splitAttrs()} for a discussion on B{Unique Ids}
'''
try:
return self.map['uniqueId']
except:
return None
def getVisibility(self):
'''
Gets the View visibility
'''
try:
if self.map[GET_VISIBILITY_PROPERTY] == 'VISIBLE':
return VISIBLE
elif self.map[GET_VISIBILITY_PROPERTY] == 'INVISIBLE':
return INVISIBLE
elif self.map[GET_VISIBILITY_PROPERTY] == 'GONE':
return GONE
else:
return -2
except:
return -1
def getX(self):
'''
Gets the View X coordinate
'''
if DEBUG_COORDS:
print >>sys.stderr, "getX(%s %s ## %s)" % (self.getClass(), self.getId(), self.getUniqueId())
x = 0
if self.useUiAutomator:
x = self.map['bounds'][0][0]
else:
try:
if GET_VISIBILITY_PROPERTY in self.map and self.map[GET_VISIBILITY_PROPERTY] == 'VISIBLE':
_x = int(self.map[self.leftProperty])
if DEBUG_COORDS: print >>sys.stderr, " getX: VISIBLE adding %d" % _x
x += _x
except:
warnings.warn("View %s has no '%s' property" % (self.getId(), self.leftProperty))
if DEBUG_COORDS: print >>sys.stderr, " getX: returning %d" % (x)
return x
def getY(self):
'''
Gets the View Y coordinate
'''
if DEBUG_COORDS:
print >>sys.stderr, "getY(%s %s ## %s)" % (self.getClass(), self.getId(), self.getUniqueId())
y = 0
if self.useUiAutomator:
y = self.map['bounds'][0][1]
else:
try:
if GET_VISIBILITY_PROPERTY in self.map and self.map[GET_VISIBILITY_PROPERTY] == 'VISIBLE':
_y = int(self.map[self.topProperty])
if DEBUG_COORDS: print >>sys.stderr, " getY: VISIBLE adding %d" % _y
y += _y
except:
warnings.warn("View %s has no '%s' property" % (self.getId(), self.topProperty))
if DEBUG_COORDS: print >>sys.stderr, " getY: returning %d" % (y)
return y
def getXY(self, debug=False):
'''
Returns the I{screen} coordinates of this C{View}.
@return: The I{screen} coordinates of this C{View}
'''
if DEBUG_COORDS or debug:
try:
id = self.getId()
except:
id = "NO_ID"
print >> sys.stderr, "getXY(%s %s ## %s)" % (self.getClass(), id, self.getUniqueId())
x = self.getX()
y = self.getY()
if self.useUiAutomator:
return (x, y)
parent = self.parent
if DEBUG_COORDS: print >> sys.stderr, " getXY: x=%s y=%s parent=%s" % (x, y, parent.getUniqueId() if parent else "None")
hx = 0
''' Hierarchy accumulated X '''
hy = 0
''' Hierarchy accumulated Y '''
if DEBUG_COORDS: print >> sys.stderr, " getXY: not using UiAutomator, calculating parent coordinates"
while parent != None:
if DEBUG_COORDS: print >> sys.stderr, " getXY: parent: %s %s <<<<" % (parent.getClass(), parent.getId())
if SKIP_CERTAIN_CLASSES_IN_GET_XY_ENABLED:
if parent.getClass() in [ 'com.android.internal.widget.ActionBarView',
'com.android.internal.widget.ActionBarContextView',
'com.android.internal.view.menu.ActionMenuView',
'com.android.internal.policy.impl.PhoneWindow$DecorView' ]:
if DEBUG_COORDS: print >> sys.stderr, " getXY: skipping %s %s (%d,%d)" % (parent.getClass(), parent.getId(), parent.getX(), parent.getY())
parent = parent.parent
continue
if DEBUG_COORDS: print >> sys.stderr, " getXY: parent=%s x=%d hx=%d y=%d hy=%d" % (parent.getId(), x, hx, y, hy)
hx += parent.getX()
hy += parent.getY()
parent = parent.parent
(wvx, wvy) = self.__dumpWindowsInformation(debug=debug)
if DEBUG_COORDS or debug:
print >>sys.stderr, " getXY: wv=(%d, %d) (windows information)" % (wvx, wvy)
try:
fw = self.windows[self.currentFocus]
if DEBUG_STATUSBAR:
print >> sys.stderr, " getXY: focused window=", fw
print >> sys.stderr, " getXY: deciding whether to consider statusbar offset because current focused windows is at", (fw.wvx, fw.wvy), "parent", (fw.px, fw.py)
except KeyError:
fw = None
(sbw, sbh) = self.__obtainStatusBarDimensionsIfVisible()
if DEBUG_COORDS or debug:
print >>sys.stderr, " getXY: sb=(%d, %d) (statusbar dimensions)" % (sbw, sbh)
statusBarOffset = 0
pwx = 0
pwy = 0
if fw:
if DEBUG_COORDS:
print >>sys.stderr, " getXY: focused window=", fw, "sb=", (sbw, sbh)
if fw.wvy <= sbh: # it's very unlikely that fw.wvy < sbh, that is a window over the statusbar
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: yes, considering offset=", sbh
statusBarOffset = sbh
else:
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: no, ignoring statusbar offset fw.wvy=", fw.wvy, ">", sbh
if fw.py == fw.wvy:
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: but wait, fw.py == fw.wvy so we are adjusting by ", (fw.px, fw.py)
pwx = fw.px
pwy = fw.py
else:
if DEBUG_STATUSBAR: print >>sys.stderr, " getXY: fw.py=%d <= fw.wvy=%d, no adjustment" % (fw.py, fw.wvy)
if DEBUG_COORDS or DEBUG_STATUSBAR or debug:
print >>sys.stderr, " getXY: returning (%d, %d) ***" % (x+hx+wvx+pwx, y+hy+wvy-statusBarOffset+pwy)
print >>sys.stderr, " x=%d+%d+%d+%d" % (x,hx,wvx,pwx)
print >>sys.stderr, " y=%d+%d+%d-%d+%d" % (y,hy,wvy,statusBarOffset,pwy)
return (x+hx+wvx+pwx, y+hy+wvy-statusBarOffset+pwy)
def getCoords(self):
'''
Gets the coords of the View
@return: A tuple containing the View's coordinates ((L, T), (R, B))
'''
if DEBUG_COORDS:
print >>sys.stderr, "getCoords(%s %s ## %s)" % (self.getClass(), self.getId(), self.getUniqueId())
(x, y) = self.getXY();
w = self.getWidth()
h = self.getHeight()
return ((x, y), (x+w, y+h))
def getPositionAndSize(self):
'''
Gets the position and size (X,Y, W, H)
@return: A tuple containing the View's coordinates (X, Y, W, H)
'''
(x, y) = self.getXY();
w = self.getWidth()
h = self.getHeight()
return (x, y, w, h)
def getCenter(self):
'''
Gets the center coords of the View
@author: U{Dean Morin <https://github.com/deanmorin>}
'''
(left, top), (right, bottom) = self.getCoords()
x = left + (right - left) / 2
y = top + (bottom - top) / 2
return (x, y)
def __obtainStatusBarDimensionsIfVisible(self):
sbw = 0
sbh = 0
for winId in self.windows:
w = self.windows[winId]
if DEBUG_COORDS: print >> sys.stderr, " __obtainStatusBarDimensionsIfVisible: w=", w, " w.activity=", w.activity, "%%%"
if w.activity == 'StatusBar':
if w.wvy == 0 and w.visibility == 0:
if DEBUG_COORDS: print >> sys.stderr, " __obtainStatusBarDimensionsIfVisible: statusBar=", (w.wvw, w.wvh)
sbw = w.wvw
sbh = w.wvh
break
return (sbw, sbh)
def __obtainVxVy(self, m):
wvx = int(m.group('vx'))
wvy = int(m.group('vy'))
return wvx, wvy
def __obtainVwVh(self, m):
(wvx, wvy) = self.__obtainVxVy(m)
wvx1 = int(m.group('vx1'))
wvy1 = int(m.group('vy1'))
return (wvx1-wvx, wvy1-wvy)
def __obtainPxPy(self, m):
px = int(m.group('px'))
py = int(m.group('py'))
return (px, py)
def __dumpWindowsInformation(self, debug=False):
self.windows = {}
self.currentFocus = None
dww = self.device.shell('dumpsys window windows')
if DEBUG_WINDOWS or debug: print >> sys.stderr, dww
lines = dww.split('\n')
widRE = re.compile('^ *Window #%s Window{%s (u\d+ )?%s?.*}:' %
(_nd('num'), _nh('winId'), _ns('activity', greedy=True)))
currentFocusRE = re.compile('^ mCurrentFocus=Window{%s .*' % _nh('winId'))
viewVisibilityRE = re.compile(' mViewVisibility=0x%s ' % _nh('visibility'))
# This is for 4.0.4 API-15
containingFrameRE = re.compile('^ *mContainingFrame=\[%s,%s\]\[%s,%s\] mParentFrame=\[%s,%s\]\[%s,%s\]' %
(_nd('cx'), _nd('cy'), _nd('cw'), _nd('ch'), _nd('px'), _nd('py'), _nd('pw'), _nd('ph')))
contentFrameRE = re.compile('^ *mContentFrame=\[%s,%s\]\[%s,%s\] mVisibleFrame=\[%s,%s\]\[%s,%s\]' %
(_nd('x'), _nd('y'), _nd('w'), _nd('h'), _nd('vx'), _nd('vy'), _nd('vx1'), _nd('vy1')))
# This is for 4.1 API-16
framesRE = re.compile('^ *Frames: containing=\[%s,%s\]\[%s,%s\] parent=\[%s,%s\]\[%s,%s\]' %
(_nd('cx'), _nd('cy'), _nd('cw'), _nd('ch'), _nd('px'), _nd('py'), _nd('pw'), _nd('ph')))
contentRE = re.compile('^ *content=\[%s,%s\]\[%s,%s\] visible=\[%s,%s\]\[%s,%s\]' %
(_nd('x'), _nd('y'), _nd('w'), _nd('h'), _nd('vx'), _nd('vy'), _nd('vx1'), _nd('vy1')))
policyVisibilityRE = re.compile('mPolicyVisibility=%s ' % _ns('policyVisibility', greedy=True))
for l in range(len(lines)):
m = widRE.search(lines[l])
if m:
num = int(m.group('num'))
winId = m.group('winId')
activity = m.group('activity')
wvx = 0
wvy = 0
wvw = 0
wvh = 0
px = 0
py = 0
visibility = -1
policyVisibility = 0x0
for l2 in range(l+1, len(lines)):
m = widRE.search(lines[l2])
if m:
l += (l2-1)
break
m = viewVisibilityRE.search(lines[l2])
if m:
visibility = int(m.group('visibility'))
if DEBUG_COORDS: print >> sys.stderr, "__dumpWindowsInformation: visibility=", visibility
if self.build[VERSION_SDK_PROPERTY] >= 17:
wvx, wvy = (0, 0)
wvw, wvh = (0, 0)
if self.build[VERSION_SDK_PROPERTY] >= 16:
m = framesRE.search(lines[l2])
if m:
px, py = self.__obtainPxPy(m)
m = contentRE.search(lines[l2+1])
if m:
# FIXME: the information provided by 'dumpsys window windows' in 4.2.1 (API 16)
# when there's a system dialog may not be correct and causes the View coordinates
# be offset by this amount, see
# https://github.com/dtmilano/AndroidViewClient/issues/29
wvx, wvy = self.__obtainVxVy(m)
wvw, wvh = self.__obtainVwVh(m)
elif self.build[VERSION_SDK_PROPERTY] == 15:
m = containingFrameRE.search(lines[l2])
if m:
px, py = self.__obtainPxPy(m)
m = contentFrameRE.search(lines[l2+1])
if m:
wvx, wvy = self.__obtainVxVy(m)
wvw, wvh = self.__obtainVwVh(m)
elif self.build[VERSION_SDK_PROPERTY] == 10:
m = containingFrameRE.search(lines[l2])
if m:
px, py = self.__obtainPxPy(m)
m = contentFrameRE.search(lines[l2+1])
if m:
wvx, wvy = self.__obtainVxVy(m)
wvw, wvh = self.__obtainVwVh(m)
else:
warnings.warn("Unsupported Android version %d" % self.build[VERSION_SDK_PROPERTY])
#print >> sys.stderr, "Searching policyVisibility in", lines[l2]
m = policyVisibilityRE.search(lines[l2])
if m:
policyVisibility = 0x0 if m.group('policyVisibility') == 'true' else 0x8
self.windows[winId] = Window(num, winId, activity, wvx, wvy, wvw, wvh, px, py, visibility + policyVisibility)
else:
m = currentFocusRE.search(lines[l])
if m:
self.currentFocus = m.group('winId')
if self.currentFocus in self.windows and self.windows[self.currentFocus].visibility == 0:
if DEBUG_COORDS or debug:
print >> sys.stderr, "__dumpWindowsInformation: focus=", self.currentFocus
print >> sys.stderr, "__dumpWindowsInformation:", self.windows[self.currentFocus]
w = self.windows[self.currentFocus]
return (w.wvx, w.wvy)
else:
if DEBUG_COORDS: print >> sys.stderr, "__dumpWindowsInformation: (0,0)"
return (0,0)
def touch(self, type=adbclient.DOWN_AND_UP):
'''
Touches the center of this C{View}
'''
(x, y) = self.getCenter()
if DEBUG_TOUCH:
print >>sys.stderr, "should touch @ (%d, %d)" % (x, y)
if VIEW_CLIENT_TOUCH_WORKAROUND_ENABLED and type == adbclient.DOWN_AND_UP:
if WARNINGS:
print >> sys.stderr, "ViewClient: touch workaround enabled"
self.device.touch(x, y, adbclient.DOWN)
time.sleep(50/1000.0)
self.device.touch(x+10, y+10, adbclient.UP)
else:
self.device.touch(x, y, type)
def longTouch(self, duration=2000):
'''
Long touches this C{View}
@param duration: duration in ms
'''
c = self.getCenter()
self.device.longTouch(c, c, duration, 1)
def allPossibleNamesWithColon(self, name):
l = []
for i in range(name.count("_")):
name = name.replace("_", ":", 1)
l.append(name)
return l
def intersection(self, l1, l2):
return list(set(l1) & set(l2))
def containsPoint(self, (x, y)):
(X, Y, W, H) = self.getPositionAndSize()
return (((x >= X) and (x <= (X+W)) and ((y >= Y) and (y <= (Y+H)))))
def add(self, child):
'''
Adds a child
@type child: View
@param child: The child to add
'''
child.parent = self
self.children.append(child)
def isClickable(self):
return self.__getattr__('isClickable')()
def isFocused(self):
'''
Gets the focused value
@return: the focused value. If the property cannot be found returns C{False}
'''
try:
return True if self.map[self.isFocusedProperty].lower() == 'true' else False
except Exception:
return False
def variableNameFromId(self):
_id = self.getId()
if _id:
var = _id.replace('.', '_').replace(':', '___').replace('/', '_')
else:
_id = self.getUniqueId()
m = ID_RE.match(_id)
if m:
var = m.group(1)
if m.group(3):
var += m.group(3)
if re.match('^\d', var):
var = 'id_' + var
return var
def writeImageToFile(self, filename, format="PNG"):
'''
Write the View image to the specified filename in the specified format.
@type filename: str
@param filename: Absolute path and optional filename receiving the image. If this points to
a directory, then the filename is determined by this View unique ID and
format extension.
@type format: str
@param format: Image format (default format is PNG)
'''
if not os.path.isabs(filename):
raise ValueError("writeImageToFile expects an absolute path")
if os.path.isdir(filename):
filename = os.path.join(filename, self.variableNameFromId() + '.' + format.lower())
if DEBUG:
print >> sys.stderr, "writeImageToFile: saving image to '%s' in %s format" % (filename, format)
#self.device.takeSnapshot().getSubImage(self.getPositionAndSize()).writeToFile(filename, format)
# crop:
# im.crop(box) ⇒ image
# Returns a copy of a rectangular region from the current image.
# The box is a 4-tuple defining the left, upper, right, and lower pixel coordinate.
((l, t), (r, b)) = self.getCoords()
box = (l, t, r, b)
if DEBUG:
print >> sys.stderr, "writeImageToFile: cropping", box, " reconnect=", self.device.reconnect
self.device.takeSnapshot(reconnect=self.device.reconnect).crop(box).save(filename, format)
def __smallStr__(self):
__str = unicode("View[", 'utf-8', 'replace')
if "class" in self.map:
__str += " class=" + self.map['class']
__str += " id=%s" % self.getId()
__str += " ] parent="
if self.parent and "class" in self.parent.map:
__str += "%s" % self.parent.map["class"]
else:
__str += "None"
return __str
def __tinyStr__(self):
__str = unicode("View[", 'utf-8', 'replace')
if "class" in self.map:
__str += " class=" + re.sub('.*\.', '', self.map['class'])
__str += " id=%s" % self.getId()
__str += " ]"
return __str
def __microStr__(self):
__str = unicode('', 'utf-8', 'replace')
if "class" in self.map:
__str += re.sub('.*\.', '', self.map['class'])
id = self.getId().replace('id/no_id/', '-')
__str += id
((L, T), (R, B)) = self.getCoords()
__str += '@%04d%04d%04d%04d' % (L, T, R, B)
__str += ''
return __str
def __str__(self):
__str = unicode("View[", 'utf-8', 'replace')
if "class" in self.map:
__str += " class=" + self.map["class"].__str__() + " "
for a in self.map:
__str += a + "="
# decode() works only on python's 8-bit strings
if isinstance(self.map[a], unicode):
__str += self.map[a]
else:
__str += unicode(str(self.map[a]), 'utf-8', errors='replace')
__str += " "
__str += "] parent="
if self.parent:
if "class" in self.parent.map:
__str += "%s" % self.parent.map["class"]
else:
__str += self.parent.getId().__str__()
else:
__str += "None"
return __str
class TextView(View):
'''
TextView class.
'''
pass
class EditText(TextView):
'''
EditText class.
'''
def type(self, text, alreadyTouch = False):
if not alreadyTouch:
self.touch()
time.sleep(0.5)
escaped = text.replace('%s', '\\%s')
encoded = escaped.replace(' ', '%s')
self.device.type(encoded)
time.sleep(0.5)
def setText(self, text):
"""
This function makes sure that any previously entered text is deleted before
setting the value of the field.
"""
if self.text() == text:
return
self.touch()
guardrail = 0
maxSize = len(self.text()) + 1
while maxSize > guardrail:
guardrail += 1
self.device.press('KEYCODE_DEL', adbclient.DOWN_AND_UP)
self.device.press('KEYCODE_FORWARD_DEL', adbclient.DOWN_AND_UP)
self.type(text,alreadyTouch=True)
def backspace(self):
self.touch()
time.sleep(1)
self.device.press('KEYCODE_DEL', adbclient.DOWN_AND_UP)
class UiAutomator2AndroidViewClient():
'''
UiAutomator XML to AndroidViewClient
'''
def __init__(self, device, version):
self.device = device
self.version = version
self.root = None
self.nodeStack = []
self.parent = None
self.views = []
self.idCount = 1
def StartElement(self, name, attributes):
'''
Expat start element event handler
'''
if name == 'hierarchy':
pass
elif name == 'node':
# Instantiate an Element object
attributes['uniqueId'] = 'id/no_id/%d' % self.idCount
bounds = re.split('[\][,]', attributes['bounds'])
attributes['bounds'] = ((int(bounds[1]), int(bounds[2])), (int(bounds[4]), int(bounds[5])))
if DEBUG_BOUNDS:
print >> sys.stderr, "bounds=", attributes['bounds']
self.idCount += 1
child = View.factory(attributes, self.device, self.version)
self.views.append(child)
# Push element onto the stack and make it a child of parent
if not self.nodeStack:
self.root = child
else:
self.parent = self.nodeStack[-1]
self.parent.add(child)
self.nodeStack.append(child)
def EndElement(self, name):
'''
Expat end element event handler
'''
if name == 'hierarchy':
pass
elif name == 'node':
self.nodeStack.pop()
def CharacterData(self, data):
'''
Expat character data event handler
'''
if data.strip():
data = data.encode()
element = self.nodeStack[-1]
element.cdata += data
def Parse(self, uiautomatorxml):
# Create an Expat parser
parser = xml.parsers.expat.ParserCreate()
# Set the Expat event handlers to our methods
parser.StartElementHandler = self.StartElement
parser.EndElementHandler = self.EndElement
parser.CharacterDataHandler = self.CharacterData
# Parse the XML File
try:
parserStatus = parser.Parse(uiautomatorxml.encode(encoding='utf-8', errors='replace'), True)
except xml.parsers.expat.ExpatError, ex:
print >>sys.stderr, "ERROR: Offending XML:\n", repr(uiautomatorxml)
raise RuntimeError(ex)
return self.root
class Excerpt2Code():
''' Excerpt XML to code '''
def __init__(self):
self.data = None
def StartElement(self, name, attributes):
'''
Expat start element event handler
'''
if name == 'excerpt':
pass
else:
warnings.warn("Unexpected element: '%s'" % name)
def EndElement(self, name):
'''
Expat end element event handler
'''
if name == 'excerpt':
pass
def CharacterData(self, data):
'''
Expat character data event handler
'''
if data.strip():
data = data.encode()
if not self.data:
self.data = data
else:
self.data += data
def Parse(self, excerpt):
# Create an Expat parser
parser = xml.parsers.expat.ParserCreate()
# Set the Expat event handlers to our methods
parser.StartElementHandler = self.StartElement
parser.EndElementHandler = self.EndElement
parser.CharacterDataHandler = self.CharacterData
# Parse the XML
parserStatus = parser.Parse(excerpt, 1)
return self.data
class ViewClient:
'''
ViewClient is a I{ViewServer} client.
ViewServer backend
==================
If not running the ViewServer is started on the target device or emulator and then the port
mapping is created.
UiAutomator backend
===================
No service is started.
'''
def __init__(self, device, serialno, adb=None, autodump=True, forceviewserveruse=False, localport=VIEW_SERVER_PORT, remoteport=VIEW_SERVER_PORT, startviewserver=True, ignoreuiautomatorkilled=False):
'''
Constructor
@type device: MonkeyDevice
@param device: The device running the C{View server} to which this client will connect
@type serialno: str
@param serialno: the serial number of the device or emulator to connect to
@type adb: str
@param adb: the path of the C{adb} executable or None and C{ViewClient} will try to find it
@type autodump: boolean
@param autodump: whether an automatic dump is performed at the end of this constructor
@type forceviewserveruse: boolean
@param forceviewserveruse: Force the use of C{ViewServer} even if the conditions to use
C{UiAutomator} are satisfied
@type localport: int
@param localport: the local port used in the redirection
@type remoteport: int
@param remoteport: the remote port used to start the C{ViewServer} in the device or
emulator
@type startviewserver: boolean
@param startviewserver: Whether to start the B{global} ViewServer
@type ignoreuiautomatorkilled: boolean
@param ignoreuiautomatorkilled: Ignores received B{Killed} message from C{uiautomator}
'''
if not device:
raise Exception('Device is not connected')
self.device = device
''' The C{MonkeyDevice} device instance '''
if not serialno:
raise ValueError("Serialno cannot be None")
self.serialno = self.__mapSerialNo(serialno)
''' The serial number of the device '''
if DEBUG_DEVICE: print >> sys.stderr, "ViewClient: using device with serialno", self.serialno
if adb:
if not os.access(adb, os.X_OK):
raise Exception('adb="%s" is not executable' % adb)
else:
# Using adbclient we don't need adb executable yet (maybe it's needed if we want to
# start adb if not running)
adb = ViewClient.__obtainAdbPath()
self.adb = adb
''' The adb command '''
self.root = None
''' The root node '''
self.viewsById = {}
''' The map containing all the L{View}s indexed by their L{View.getUniqueId()} '''
self.display = {}
''' The map containing the device's display properties: width, height and density '''
for prop in [ 'width', 'height', 'density' ]:
self.display[prop] = -1
if USE_ADB_CLIENT_TO_GET_BUILD_PROPERTIES:
try:
self.display[prop] = int(device.getProperty('display.' + prop))
except:
if WARNINGS:
warnings.warn("Couldn't determine display %s" % prop)
else:
# these values are usually not defined as properties, so we stick to the -1 set
# before
pass
self.build = {}
''' The map containing the device's build properties: version.sdk, version.release '''
for prop in [VERSION_SDK_PROPERTY, VERSION_RELEASE_PROPERTY]:
self.build[prop] = -1
try:
if USE_ADB_CLIENT_TO_GET_BUILD_PROPERTIES:
self.build[prop] = device.getProperty(prop)
else:
self.build[prop] = device.shell('getprop ro.build.' + prop)[:-2]
except:
if WARNINGS:
warnings.warn("Couldn't determine build %s" % prop)
if prop == VERSION_SDK_PROPERTY:
# we expect it to be an int
self.build[prop] = int(self.build[prop] if self.build[prop] else -1)
self.ro = {}
''' The map containing the device's ro properties: secure, debuggable '''
for prop in ['secure', 'debuggable']:
try:
self.ro[prop] = device.shell('getprop ro.' + prop)[:-2]
except:
if WARNINGS:
warnings.warn("Couldn't determine ro %s" % prop)
self.ro[prop] = 'UNKNOWN'
self.forceViewServerUse = forceviewserveruse
''' Force the use of ViewServer even if the conditions to use UiAutomator are satisfied '''
self.useUiAutomator = (self.build[VERSION_SDK_PROPERTY] >= 16) and not forceviewserveruse # jelly bean 4.1 & 4.2
if DEBUG:
print >> sys.stderr, " ViewClient.__init__: useUiAutomator=", self.useUiAutomator, "sdk=", self.build[VERSION_SDK_PROPERTY], "forceviewserveruse=", forceviewserveruse
''' If UIAutomator is supported by the device it will be used '''
self.ignoreUiAutomatorKilled = ignoreuiautomatorkilled
''' On some devices (i.e. Nexus 7 running 4.2.2) uiautomator is killed just after generating
the dump file. In many cases the file is already complete so we can ask to ignore the 'Killed'
message by setting L{ignoreuiautomatorkilled} to C{True}.
Changes in v2.3.21 that uses C{/dev/tty} instead of a file may have turned this variable
unnecessary, however it has been kept for backward compatibility.
'''
if self.useUiAutomator:
self.textProperty = TEXT_PROPERTY_UI_AUTOMATOR
else:
if self.build[VERSION_SDK_PROPERTY] <= 10:
self.textProperty = TEXT_PROPERTY_API_10
else:
self.textProperty = TEXT_PROPERTY
if startviewserver:
if not self.serviceResponse(device.shell('service call window 3')):
try:
self.assertServiceResponse(device.shell('service call window 1 i32 %d' %
remoteport))
except:
msg = 'Cannot start View server.\n' \
'This only works on emulator and devices running developer versions.\n' \
'Does hierarchyviewer work on your device?\n' \
'See https://github.com/dtmilano/AndroidViewClient/wiki/Secure-mode\n\n' \
'Device properties:\n' \
' ro.secure=%s\n' \
' ro.debuggable=%s\n' % (self.ro['secure'], self.ro['debuggable'])
raise Exception(msg)
self.localPort = localport
self.remotePort = remoteport
# FIXME: it seems there's no way of obtaining the serialno from the MonkeyDevice
subprocess.check_call([self.adb, '-s', self.serialno, 'forward', 'tcp:%d' % self.localPort,
'tcp:%d' % self.remotePort])
self.windows = None
''' The list of windows as obtained by L{ViewClient.list()} '''
if autodump:
self.dump()
def __del__(self):
# should clean up some things
pass
@staticmethod
def __obtainAdbPath():
'''
Obtains the ADB path attempting know locations for different OSs
'''
osName = platform.system()
isWindows = False
if osName.startswith('Windows'):
adb = 'adb.exe'
isWindows = True
else:
adb = 'adb'
ANDROID_HOME = os.environ['ANDROID_HOME'] if os.environ.has_key('ANDROID_HOME') else '/opt/android-sdk'
HOME = os.environ['HOME'] if os.environ.has_key('HOME') else ''
possibleChoices = [ os.path.join(ANDROID_HOME, 'platform-tools', adb),
os.path.join(HOME, "android", 'platform-tools', adb),
os.path.join(HOME, "android-sdk", 'platform-tools', adb),
adb,
]
if osName.startswith('Windows'):
possibleChoices.append(os.path.join("""C:\Program Files\Android\android-sdk\platform-tools""", adb))
possibleChoices.append(os.path.join("""C:\Program Files (x86)\Android\android-sdk\platform-tools""", adb))
elif osName.startswith('Linux'):
possibleChoices.append(os.path.join("opt", "android-sdk-linux", 'platform-tools', adb))
possibleChoices.append(os.path.join(HOME, "opt", "android-sdk-linux", 'platform-tools', adb))
possibleChoices.append(os.path.join(HOME, "android-sdk-linux", 'platform-tools', adb))
elif osName.startswith('Mac'):
possibleChoices.append(os.path.join("opt", "android-sdk-mac_x86", 'platform-tools', adb))
possibleChoices.append(os.path.join(HOME, "opt", "android-sdk-mac", 'platform-tools', adb))
possibleChoices.append(os.path.join(HOME, "android-sdk-mac", 'platform-tools', adb))
possibleChoices.append(os.path.join(HOME, "opt", "android-sdk-mac_x86", 'platform-tools', adb))
possibleChoices.append(os.path.join(HOME, "android-sdk-mac_x86", 'platform-tools', adb))
else:
# Unsupported OS
pass
for exeFile in possibleChoices:
if os.access(exeFile, os.X_OK):
return exeFile
for path in os.environ["PATH"].split(os.pathsep):
exeFile = os.path.join(path, adb)
if exeFile != None and os.access(exeFile, os.X_OK if not isWindows else os.F_OK):
return exeFile
raise Exception('adb="%s" is not executable. Did you forget to set ANDROID_HOME in the environment?' % adb)
@staticmethod
def __mapSerialNo(serialno):
serialno = serialno.strip()
#ipRE = re.compile('^\d+\.\d+.\d+.\d+$')
if IP_RE.match(serialno):
if DEBUG_DEVICE: print >>sys.stderr, "ViewClient: adding default port to serialno", serialno, ADB_DEFAULT_PORT
return serialno + ':%d' % ADB_DEFAULT_PORT
ipPortRE = re.compile('^\d+\.\d+.\d+.\d+:\d+$')
if ipPortRE.match(serialno):
# nothing to map
return serialno
if re.search("[.*()+]", serialno):
raise ValueError("Regular expression not supported as serialno in ViewClient")
return serialno
@staticmethod
def __obtainDeviceSerialNumber(device):
if DEBUG_DEVICE: print >>sys.stderr, "ViewClient: obtaining serial number for connected device"
serialno = device.getProperty('ro.serialno')
if not serialno:
serialno = device.shell('getprop ro.serialno')
if serialno:
serialno = serialno[:-2]
if not serialno:
qemu = device.shell('getprop ro.kernel.qemu')
if qemu:
qemu = qemu[:-2]
if qemu and int(qemu) == 1:
# FIXME !!!!!
# this must be calculated from somewhere, though using a fixed serialno for now
warnings.warn("Running on emulator but no serial number was specified then 'emulator-5554' is used")
serialno = 'emulator-5554'
if not serialno:
# If there's only one device connected get its serialno
adb = ViewClient.__obtainAdbPath()
if DEBUG_DEVICE: print >>sys.stderr, " using adb=%s" % adb
s = subprocess.Popen([adb, 'get-serialno'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, env={}).communicate()[0][:-1]
if s != 'unknown':
serialno = s
if DEBUG_DEVICE: print >>sys.stderr, " serialno=%s" % serialno
if not serialno:
warnings.warn("Couldn't obtain the serialno of the connected device")
return serialno
@staticmethod
def setAlarm(timeout):
osName = platform.system()
if osName.startswith('Windows'): # alarm is not implemented in Windows
return
signal.alarm(timeout)
@staticmethod
def connectToDeviceOrExit(timeout=60, verbose=False, ignoresecuredevice=False, ignoreversioncheck=False, serialno=None):
'''
Connects to a device which serial number is obtained from the script arguments if available
or using the default regex C{.*}.
If the connection is not successful the script exits.
L{MonkeyRunner.waitForConnection()} returns a L{MonkeyDevice} even if the connection failed.
Then, to detect this situation, C{device.wake()} is attempted and if it fails then it is
assumed the previous connection failed.
@type timeout: int
@param timeout: timeout for the connection
@type verbose: bool
@param verbose: Verbose output
@type ignoresecuredevice: bool
@param ignoresecuredevice: Ignores the check for a secure device
@type ignoreversioncheck: bool
@param ignoreversioncheck: Ignores the check for a supported ADB version
@type serialno: str
@param serialno: The device or emulator serial number
@return: the device and serialno used for the connection
'''
progname = os.path.basename(sys.argv[0])
if serialno is None:
# eat all the extra options the invoking script may have added
args = sys.argv
while len(args) > 1 and args[1][0] == '-':
args.pop(1)
serialno = args[1] if len(args) > 1 else \
os.environ['ANDROID_SERIAL'] if os.environ.has_key('ANDROID_SERIAL') \
else '.*'
if IP_RE.match(serialno):
# If matches an IP address format and port was not specified add the default
serialno += ':%d' % ADB_DEFAULT_PORT
if verbose:
print >> sys.stderr, 'Connecting to a device with serialno=%s with a timeout of %d secs...' % \
(serialno, timeout)
ViewClient.setAlarm(timeout+5)
device = adbclient.AdbClient(serialno, ignoreversioncheck=ignoreversioncheck)
ViewClient.setAlarm(0)
if verbose:
print >> sys.stderr, 'Connected to device with serialno=%s' % serialno
secure = device.getSystemProperty('ro.secure')
debuggable = device.getSystemProperty('ro.debuggable')
versionProperty = device.getProperty(VERSION_SDK_PROPERTY)
if versionProperty:
version = int(versionProperty)
else:
if verbose:
print "Couldn't obtain device SDK version"
version = -1
# we are going to use UiAutomator for versions >= 16 that's why we ignore if the device
# is secure if this is true
if secure == '1' and debuggable == '0' and not ignoresecuredevice and version < 16:
print >> sys.stderr, "%s: ERROR: Device is secure, AndroidViewClient won't work." % progname
if verbose:
print >> sys.stderr, " secure=%s debuggable=%s version=%d ignoresecuredevice=%s" % \
(secure, debuggable, version, ignoresecuredevice)
sys.exit(2)
if re.search("[.*()+]", serialno) and not re.search("(\d{1,3}\.){3}\d{1,3}", serialno):
# if a regex was used we have to determine the serialno used
serialno = ViewClient.__obtainDeviceSerialNumber(device)
if verbose:
print >> sys.stderr, 'Actual device serialno=%s' % serialno
return device, serialno
@staticmethod
def traverseShowClassIdAndText(view, extraInfo=None, noextrainfo=None):
'''
Shows the View class, id and text if available.
This function can be used as a transform function to L{ViewClient.traverse()}
@type view: I{View}
@param view: the View
@type extraInfo: method
@param extraInfo: the View method to add extra info
@type noextrainfo: bool
@param noextrainfo: Don't add extra info
@return: the string containing class, id, and text if available
'''
try:
eis = ''
if extraInfo:
eis = extraInfo(view).__str__()
if not eis and noextrainfo:
eis = noextrainfo
if eis:
eis = ' ' + eis
return u'%s %s %s%s' % (view.getClass(), view.getId(), view.getText(), eis)
except Exception, e:
return u'Exception in view=%s: %s' % (view.__smallStr__(), e)
@staticmethod
def traverseShowClassIdTextAndUniqueId(view):
'''
Shows the View class, id, text if available and unique id.
This function can be used as a transform function to L{ViewClient.traverse()}
@type view: I{View}
@param view: the View
@return: the string containing class, id, and text if available and unique Id
'''
return ViewClient.traverseShowClassIdAndText(view, View.getUniqueId)
@staticmethod
def traverseShowClassIdTextAndContentDescription(view):
'''
Shows the View class, id, text if available and unique id.
This function can be used as a transform function to L{ViewClient.traverse()}
@type view: I{View}
@param view: the View
@return: the string containing class, id, and text if available and the content description
'''
return ViewClient.traverseShowClassIdAndText(view, View.getContentDescription, 'NAF')
@staticmethod
def traverseShowClassIdTextAndCenter(view):
'''
Shows the View class, id and text if available.
This function can be used as a transform function to L{ViewClient.traverse()}
@type view: I{View}
@param view: the View
@return: the string containing class, id, and text if available
'''
return ViewClient.traverseShowClassIdAndText(view, View.getCenter)
@staticmethod
def traverseShowClassIdTextPositionAndSize(view):
'''
Shows the View class, id and text if available.
This function can be used as a transform function to L{ViewClient.traverse()}
@type view: I{View}
@param view: the View
@return: the string containing class, id, and text if available
'''
return ViewClient.traverseShowClassIdAndText(view, View.getPositionAndSize)
# methods that can be used to transform ViewClient.traverse output
TRAVERSE_CIT = traverseShowClassIdAndText
''' An alias for L{traverseShowClassIdAndText(view)} '''
TRAVERSE_CITUI = traverseShowClassIdTextAndUniqueId
''' An alias for L{traverseShowClassIdTextAndUniqueId(view)} '''
TRAVERSE_CITCD = traverseShowClassIdTextAndContentDescription
''' An alias for L{traverseShowClassIdTextAndContentDescription(view)} '''
TRAVERSE_CITC = traverseShowClassIdTextAndCenter
''' An alias for L{traverseShowClassIdTextAndCenter(view)} '''
TRAVERSE_CITPS = traverseShowClassIdTextPositionAndSize
''' An alias for L{traverseShowClassIdTextPositionAndSize(view)} '''
@staticmethod
def sleep(secs=1.0):
'''
Sleeps for the specified number of seconds.
@type secs: float
@param secs: number of seconds
'''
time.sleep(secs)
def assertServiceResponse(self, response):
'''
Checks whether the response received from the server is correct or raises and Exception.
@type response: str
@param response: Response received from the server
@raise Exception: If the response received from the server is invalid
'''
if not self.serviceResponse(response):
raise Exception('Invalid response received from service.')
def serviceResponse(self, response):
'''
Checks the response received from the I{ViewServer}.
@return: C{True} if the response received matches L{PARCEL_TRUE}, C{False} otherwise
'''
PARCEL_TRUE = "Result: Parcel(00000000 00000001 '........')\r\n"
''' The TRUE response parcel '''
if DEBUG:
print >>sys.stderr, "serviceResponse: comparing '%s' vs Parcel(%s)" % (response, PARCEL_TRUE)
return response == PARCEL_TRUE
def setViews(self, received):
'''
Sets L{self.views} to the received value splitting it into lines.
@type received: str
@param received: the string received from the I{View Server}
'''
if not received or received == "":
raise ValueError("received is empty")
self.views = []
''' The list of Views represented as C{str} obtained after splitting it into lines after being received from the server. Done by L{self.setViews()}. '''
self.__parseTree(received.split("\n"))
if DEBUG:
print >>sys.stderr, "there are %d views in this dump" % len(self.views)
def setViewsFromUiAutomatorDump(self, received):
'''
Sets L{self.views} to the received value parsing the received XML.
@type received: str
@param received: the string received from the I{UI Automator}
'''
if not received or received == "":
raise ValueError("received is empty")
self.views = []
''' The list of Views represented as C{str} obtained after splitting it into lines after being received from the server. Done by L{self.setViews()}. '''
self.__parseTreeFromUiAutomatorDump(received)
if DEBUG:
print >>sys.stderr, "there are %d views in this dump" % len(self.views)
def __splitAttrs(self, strArgs):
'''
Splits the C{View} attributes in C{strArgs} and optionally adds the view id to the C{viewsById} list.
Unique Ids
==========
It is very common to find C{View}s having B{NO_ID} as the Id. This turns very difficult to
use L{self.findViewById()}. To help in this situation this method assigns B{unique Ids}.
The B{unique Ids} are generated using the pattern C{id/no_id/<number>} with C{<number>} starting
at 1.
@type strArgs: str
@param strArgs: the string containing the raw list of attributes and values
@return: Returns the attributes map.
'''
if self.useUiAutomator:
raise RuntimeError("This method is not compatible with UIAutomator")
# replace the spaces in text:mText to preserve them in later split
# they are translated back after the attribute matches
textRE = re.compile('%s=%s,' % (self.textProperty, _nd('len')))
m = textRE.search(strArgs)
if m:
__textStart = m.end()
__textLen = int(m.group('len'))
__textEnd = m.end() + __textLen
s1 = strArgs[__textStart:__textEnd]
s2 = s1.replace(' ', WS)
strArgs = strArgs.replace(s1, s2, 1)
idRE = re.compile("(?P<viewId>id/\S+)")
attrRE = re.compile('%s(?P<parens>\(\))?=%s,(?P<val>[^ ]*)' % (_ns('attr'), _nd('len')), flags=re.DOTALL)
hashRE = re.compile('%s@%s' % (_ns('class'), _nh('oid')))
attrs = {}
viewId = None
m = idRE.search(strArgs)
if m:
viewId = m.group('viewId')
if DEBUG:
print >>sys.stderr, "found view with id=%s" % viewId
for attr in strArgs.split():
m = attrRE.match(attr)
if m:
__attr = m.group('attr')
__parens = '()' if m.group('parens') else ''
__len = int(m.group('len'))
__val = m.group('val')
if WARNINGS and __len != len(__val):
warnings.warn("Invalid len: expected: %d found: %d s=%s e=%s" % (__len, len(__val), __val[:50], __val[-50:]))
if __attr == self.textProperty:
# restore spaces that have been replaced
__val = __val.replace(WS, ' ')
attrs[__attr + __parens] = __val
else:
m = hashRE.match(attr)
if m:
attrs['class'] = m.group('class')
attrs['oid'] = m.group('oid')
else:
if DEBUG:
print >>sys.stderr, attr, "doesn't match"
if True: # was assignViewById
if not viewId:
# If the view has NO_ID we are assigning a default id here (id/no_id) which is
# immediately incremented if another view with no id was found before to generate
# a unique id
viewId = "id/no_id/1"
if viewId in self.viewsById:
# sometimes the view ids are not unique, so let's generate a unique id here
i = 1
while True:
newId = re.sub('/\d+$', '', viewId) + '/%d' % i
if not newId in self.viewsById:
break
i += 1
viewId = newId
if DEBUG:
print >>sys.stderr, "adding viewById %s" % viewId
# We are assigning a new attribute to keep the original id preserved, which could have
# been NO_ID repeated multiple times
attrs['uniqueId'] = viewId
return attrs
def __parseTree(self, receivedLines):
'''
Parses the View tree contained in L{receivedLines}. The tree is created and the root node assigned to L{self.root}.
This method also assigns L{self.viewsById} values using L{View.getUniqueId} as the key.
@type receivedLines: str
@param receivedLines: the string received from B{View Server}
'''
self.root = None
self.viewsById = {}
self.views = []
parent = None
parents = []
treeLevel = -1
newLevel = -1
lastView = None
for v in receivedLines:
if v == '' or v == 'DONE' or v == 'DONE.':
break
attrs = self.__splitAttrs(v)
if not self.root:
if v[0] == ' ':
raise Exception("Unexpected root element starting with ' '.")
self.root = View.factory(attrs, self.device, self.build[VERSION_SDK_PROPERTY], self.forceViewServerUse)
if DEBUG: self.root.raw = v
treeLevel = 0
newLevel = 0
lastView = self.root
parent = self.root
parents.append(parent)
else:
newLevel = (len(v) - len(v.lstrip()))
if newLevel == 0:
raise Exception("newLevel==0 treeLevel=%d but tree can have only one root, v=%s" % (treeLevel, v))
child = View.factory(attrs, self.device, self.build[VERSION_SDK_PROPERTY], self.forceViewServerUse)
if DEBUG: child.raw = v
if newLevel == treeLevel:
parent.add(child)
lastView = child
elif newLevel > treeLevel:
if (newLevel - treeLevel) != 1:
raise Exception("newLevel jumps %d levels, v=%s" % ((newLevel-treeLevel), v))
parent = lastView
parents.append(parent)
parent.add(child)
lastView = child
treeLevel = newLevel
else: # newLevel < treeLevel
for i in range(treeLevel - newLevel):
parents.pop()
parent = parents.pop()
parents.append(parent)
parent.add(child)
treeLevel = newLevel
lastView = child
self.views.append(lastView)
self.viewsById[lastView.getUniqueId()] = lastView
def __parseTreeFromUiAutomatorDump(self, receivedXml):
parser = UiAutomator2AndroidViewClient(self.device, self.build[VERSION_SDK_PROPERTY])
start_xml_index = receivedXml.index("<")
self.root = parser.Parse(receivedXml[start_xml_index:])
self.views = parser.views
self.viewsById = {}
for v in self.views:
self.viewsById[v.getUniqueId()] = v
def getRoot(self):
'''
Gets the root node of the C{View} tree
@return: the root node of the C{View} tree
'''
return self.root
def traverse(self, root="ROOT", indent="", transform=View.__str__, stream=sys.stdout):
'''
Traverses the C{View} tree and prints its nodes.
The nodes are printed converting them to string but other transformations can be specified
by providing a method name as the C{transform} parameter.
@type root: L{View}
@param root: the root node from where the traverse starts
@type indent: str
@param indent: the indentation string to use to print the nodes
@type transform: method
@param transform: a method to use to transform the node before is printed
'''
if type(root) == types.StringType and root == "ROOT":
root = self.root
return ViewClient.__traverse(root, indent, transform, stream)
# if not root:
# return
#
# s = transform(root)
# if s:
# print >>stream, "%s%s" % (indent, s)
#
# for ch in root.children:
# self.traverse(ch, indent=indent+" ", transform=transform, stream=stream)
@staticmethod
def __traverse(root, indent="", transform=View.__str__, stream=sys.stdout):
if not root:
return
s = transform(root)
if s:
ius = "%s%s" % (indent, s if isinstance(s, unicode) else unicode(s, 'utf-8', 'replace'))
print >>stream, ius.encode('utf-8', 'replace')
for ch in root.children:
ViewClient.__traverse(ch, indent=indent+" ", transform=transform, stream=stream)
def dump(self, window=-1, sleep=1):
'''
Dumps the window content.
Sleep is useful to wait some time before obtaining the new content when something in the
window has changed.
@type window: int or str
@param window: the window id or name of the window to dump.
The B{name} is the package name or the window name (i.e. StatusBar) for
system windows.
The window id can be provided as C{int} or C{str}. The C{str} should represent
and C{int} in either base 10 or 16.
Use -1 to dump all windows.
This parameter only is used when the backend is B{ViewServer} and it's
ignored for B{UiAutomator}.
@type sleep: int
@param sleep: sleep in seconds before proceeding to dump the content
@return: the list of Views as C{str} received from the server after being split into lines
'''
if sleep > 0:
time.sleep(sleep)
if self.useUiAutomator:
# NOTICE:
# Using /dev/tty this works even on devices with no sdcard
received = unicode(self.device.shell('uiautomator dump /dev/tty >/dev/null'), encoding='utf-8', errors='replace')
if not received:
raise RuntimeError('ERROR: Empty UiAutomator dump was received')
if DEBUG:
self.received = received
if DEBUG_RECEIVED:
print >>sys.stderr, "received %d chars" % len(received)
print >>sys.stderr
print >>sys.stderr, repr(received)
print >>sys.stderr
onlyKilledRE = re.compile('[\n\S]*Killed[\n\r\S]*', re.MULTILINE)
if onlyKilledRE.search(received):
MONKEY = 'com.android.commands.monkey'
extraInfo = ''
if self.device.shell('ps | grep "%s"' % MONKEY):
extraInfo = "\nIt is know that '%s' conflicts with 'uiautomator'. Please kill it and try again." % MONKEY
raise RuntimeError('''ERROR: UiAutomator output contains no valid information. UiAutomator was killed, no reason given.''' + extraInfo)
if self.ignoreUiAutomatorKilled:
if DEBUG_RECEIVED:
print >>sys.stderr, "ignoring UiAutomator Killed"
killedRE = re.compile('</hierarchy>[\n\S]*Killed', re.MULTILINE)
if killedRE.search(received):
received = re.sub(killedRE, '</hierarchy>', received)
elif DEBUG_RECEIVED:
print "UiAutomator Killed: NOT FOUND!"
# It seems that API18 uiautomator spits this message to stdout
dumpedToDevTtyRE = re.compile('</hierarchy>[\n\S]*UI hierchary dumped to: /dev/tty.*', re.MULTILINE)
if dumpedToDevTtyRE.search(received):
received = re.sub(dumpedToDevTtyRE, '</hierarchy>', received)
if DEBUG_RECEIVED:
print >>sys.stderr, "received=", received
# API19 seems to send this warning as part of the XML.
# Let's remove it if present
received = received.replace('WARNING: linker: libdvm.so has text relocations. This is wasting memory and is a security risk. Please fix.\r\n', '')
if re.search('\[: not found', received):
raise RuntimeError('''ERROR: Some emulator images (i.e. android 4.1.2 API 16 generic_x86) does not include the '[' command.
While UiAutomator back-end might be supported 'uiautomator' command fails.
You should force ViewServer back-end.''')
if re.search('could not get idle state', received):
raise RuntimeError('''The view is being refreshed too frequently to dump''')
self.setViewsFromUiAutomatorDump(received)
else:
if isinstance(window, str):
if window != '-1':
self.list(sleep=0)
found = False
for wId in self.windows:
try:
if window == self.windows[wId]:
window = wId
found = True
break
except:
pass
try:
if int(window) == wId:
window = wId
found = True
break
except:
pass
try:
if int(window, 16) == wId:
window = wId
found = True
break
except:
pass
if not found:
raise RuntimeError("ERROR: Cannot find window '%s' in %s" % (window, self.windows))
else:
window = -1
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((VIEW_SERVER_HOST, self.localPort))
except socket.error, ex:
raise RuntimeError("ERROR: Connecting to %s:%d: %s" % (VIEW_SERVER_HOST, self.localPort, ex))
cmd = 'dump %x\r\n' % window
if DEBUG:
print >>sys.stderr, "executing: '%s'" % cmd
s.send(cmd)
received = ""
doneRE = re.compile("DONE")
ViewClient.setAlarm(120)
while True:
if DEBUG_RECEIVED:
print >>sys.stderr, " reading from socket..."
received += s.recv(1024)
if doneRE.search(received[-7:]):
break
s.close()
ViewClient.setAlarm(0)
if DEBUG:
self.received = received
if DEBUG_RECEIVED:
print >>sys.stderr, "received %d chars" % len(received)
print >>sys.stderr
print >>sys.stderr, received
print >>sys.stderr
if received:
for c in received:
if ord(c) > 127:
received = unicode(received, encoding='utf-8', errors='replace')
break
self.setViews(received)
if DEBUG_TREE:
self.traverse(self.root)
return self.views
def list(self, sleep=1):
'''
List the windows.
Sleep is useful to wait some time before obtaining the new content when something in the
window has changed.
This also sets L{self.windows} as the list of windows.
@type sleep: int
@param sleep: sleep in seconds before proceeding to dump the content
@return: the list of windows
'''
if sleep > 0:
time.sleep(sleep)
if self.useUiAutomator:
raise Exception("Not implemented yet: listing windows with UiAutomator")
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((VIEW_SERVER_HOST, self.localPort))
except socket.error, ex:
raise RuntimeError("ERROR: Connecting to %s:%d: %s" % (VIEW_SERVER_HOST, self.localPort, ex))
s.send('list\r\n')
received = ""
doneRE = re.compile("DONE")
while True:
received += s.recv(1024)
if doneRE.search(received[-7:]):
break
s.close()
if DEBUG:
self.received = received
if DEBUG_RECEIVED:
print >>sys.stderr, "received %d chars" % len(received)
print >>sys.stderr
print >>sys.stderr, received
print >>sys.stderr
self.windows = {}
for line in received.split('\n'):
if not line:
break
if doneRE.search(line):
break
values = line.split()
if len(values) > 1:
package = values[1]
else:
package = "UNKNOWN"
if len(values) > 0:
wid = values[0]
else:
wid = '00000000'
self.windows[int('0x' + wid, 16)] = package
return self.windows
def findViewById(self, viewId, root="ROOT", viewFilter=None):
'''
Finds the View with the specified viewId.
@type viewId: str
@param viewId: the ID of the view to find
@type root: str
@type root: View
@param root: the root node of the tree where the View will be searched
@type: viewFilter: function
@param viewFilter: a function that will be invoked providing the candidate View as a parameter
and depending on the return value (C{True} or C{False}) the View will be
selected and returned as the result of C{findViewById()} or ignored.
This can be C{None} and no extra filtering is applied.
@return: the C{View} found or C{None}
'''
if not root:
return None
if type(root) == types.StringType and root == "ROOT":
return self.findViewById(viewId, self.root, viewFilter)
if root.getId() == viewId:
if viewFilter:
if viewFilter(root):
return root
else:
return root
if re.match('^id/no_id', viewId) or re.match('^id/.+/.+', viewId):
if root.getUniqueId() == viewId:
if viewFilter:
if viewFilter(root):
return root;
else:
return root
for ch in root.children:
foundView = self.findViewById(viewId, ch, viewFilter)
if foundView:
if viewFilter:
if viewFilter(foundView):
return foundView
else:
return foundView
def findViewByIdOrRaise(self, viewId, root="ROOT", viewFilter=None):
'''
Finds the View or raise a ViewNotFoundException.
@type viewId: str
@param viewId: the ID of the view to find
@type root: str
@type root: View
@param root: the root node of the tree where the View will be searched
@type: viewFilter: function
@param viewFilter: a function that will be invoked providing the candidate View as a parameter
and depending on the return value (C{True} or C{False}) the View will be
selected and returned as the result of C{findViewById()} or ignored.
This can be C{None} and no extra filtering is applied.
@return: the View found
@raise ViewNotFoundException: raise the exception if View not found
'''
view = self.findViewById(viewId, root, viewFilter)
if view:
return view
else:
raise ViewNotFoundException("ID", viewId, root)
def findViewByTag(self, tag, root="ROOT"):
'''
Finds the View with the specified tag
'''
return self.findViewWithAttribute('getTag()', tag, root)
def findViewByTagOrRaise(self, tag, root="ROOT"):
'''
Finds the View with the specified tag or raise a ViewNotFoundException
'''
view = self.findViewWithAttribute('getTag()', tag, root)
if view:
return view
else:
raise ViewNotFoundException("tag", tag, root)
def __findViewsWithAttributeInTree(self, attr, val, root):
matchingViews = []
if not self.root:
print >>sys.stderr, "ERROR: no root, did you forget to call dump()?"
return matching_views
if type(root) == types.StringType and root == "ROOT":
root = self.root
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTree: type val=", type(val)
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTree: checking if root=%s has attr=%s == %s" % (root.__smallStr__(), attr, val)
if root and attr in root.map and root.map[attr] == val:
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTree: FOUND: %s" % root.__smallStr__()
matchingViews.append(root)
else:
for ch in root.children:
matchingViews += self.__findViewsWithAttributeInTree(attr, val, ch)
return matchingViews
def __findViewWithAttributeInTree(self, attr, val, root):
if not self.root:
print >>sys.stderr, "ERROR: no root, did you forget to call dump()?"
return None
if type(root) == types.StringType and root == "ROOT":
root = self.root
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTree: type val=", type(val)
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTree: checking if root=%s has attr=%s == %s" % (root.__smallStr__(), attr, val)
if isinstance(val, RegexType):
return self.__findViewWithAttributeInTreeThatMatches(attr, val, root)
else:
if root and attr in root.map and root.map[attr] == val:
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTree: FOUND: %s" % root.__smallStr__()
return root
else:
for ch in root.children:
v = self.__findViewWithAttributeInTree(attr, val, ch)
if v:
return v
return None
def __findViewWithAttributeInTreeOrRaise(self, attr, val, root):
view = self.__findViewWithAttributeInTree(attr, val, root)
if view:
return view
else:
raise ViewNotFoundException(attr, val, root)
def __findViewWithAttributeInTreeThatMatches(self, attr, regex, root, rlist=[]):
if not self.root:
print >>sys.stderr, "ERROR: no root, did you forget to call dump()?"
return None
if type(root) == types.StringType and root == "ROOT":
root = self.root
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTreeThatMatches: checking if root=%s attr=%s matches %s" % (root.__smallStr__(), attr, regex)
if root and attr in root.map and regex.match(root.map[attr]):
if DEBUG: print >>sys.stderr, "__findViewWithAttributeInTreeThatMatches: FOUND: %s" % root.__smallStr__()
return root
#print >>sys.stderr, "appending root=%s to rlist=%s" % (root.__smallStr__(), rlist)
#return rlist.append(root)
else:
for ch in root.children:
v = self.__findViewWithAttributeInTreeThatMatches(attr, regex, ch, rlist)
if v:
return v
#print >>sys.stderr, "appending v=%s to rlist=%s" % (v.__smallStr__(), rlist)
#return rlist.append(v)
return None
#return rlist
def findViewWithAttribute(self, attr, val, root="ROOT"):
'''
Finds the View with the specified attribute and value
'''
return self.__findViewWithAttributeInTree(attr, val, root)
def findViewsWithAttribute(self, attr, val, root="ROOT"):
'''
Finds the Views with the specified attribute and value.
This allows you to see all items that match your criteria in the view hierarchy
Usage:
buttons = v.findViewsWithAttribute("class", "android.widget.Button")
'''
return self.__findViewsWithAttributeInTree(attr, val, root)
def findViewWithAttributeOrRaise(self, attr, val, root="ROOT"):
'''
Finds the View or raise a ViewNotFoundException.
@return: the View found
@raise ViewNotFoundException: raise the exception if View not found
'''
view = self.findViewWithAttribute(attr, val, root)
if view:
return view
else:
raise ViewNotFoundException(attr, val, root)
def findViewWithAttributeThatMatches(self, attr, regex, root="ROOT"):
'''
Finds the list of Views with the specified attribute matching
regex
'''
return self.__findViewWithAttributeInTreeThatMatches(attr, regex, root)
def findViewWithText(self, text, root="ROOT"):
if DEBUG:
print >>sys.stderr, "findViewWithText(%s, %s)" % (text, root)
if isinstance(text, RegexType):
return self.findViewWithAttributeThatMatches(self.textProperty, text, root)
#l = self.findViewWithAttributeThatMatches(TEXT_PROPERTY, text)
#ll = len(l)
#if ll == 0:
# return None
#elif ll == 1:
# return l[0]
#else:
# print >>sys.stderr, "WARNING: findViewWithAttributeThatMatches invoked by findViewWithText returns %d items." % ll
# return l
else:
return self.findViewWithAttribute(self.textProperty, text, root)
def findViewWithTextOrRaise(self, text, root="ROOT"):
'''
Finds the View or raise a ViewNotFoundException.
@return: the View found
@raise ViewNotFoundException: raise the exception if View not found
'''
if DEBUG:
print >>sys.stderr, "findViewWithTextOrRaise(%s, %s)" % (text, root)
view = self.findViewWithText(text, root)
if view:
return view
else:
raise ViewNotFoundException("text", text, root)
def findViewWithContentDescription(self, contentdescription, root="ROOT"):
'''
Finds the View with the specified content description
'''
return self.__findViewWithAttributeInTree('content-desc', contentdescription, root)
def findViewWithContentDescriptionOrRaise(self, contentdescription, root="ROOT"):
'''
Finds the View with the specified content description
'''
return self.__findViewWithAttributeInTreeOrRaise('content-desc', contentdescription, root)
def findViewsContainingPoint(self, (x, y), _filter=None):
'''
Finds the list of Views that contain the point (x, y).
'''
if not _filter:
_filter = lambda v: True
return [v for v in self.views if (v.containsPoint((x,y)) and _filter(v))]
def getViewIds(self):
'''
@deprecated: Use L{getViewsById} instead.
Returns the Views map.
'''
return self.viewsById
def getViewsById(self):
'''
Returns the Views map. The keys are C{uniqueIds} and the values are C{View}s.
'''
return self.viewsById
def __getFocusedWindowPosition(self):
return self.__getFocusedWindowId()
def getSdkVersion(self):
'''
Gets the SDK version.
'''
return self.build[VERSION_SDK_PROPERTY]
def isKeyboardShown(self):
'''
Whether the keyboard is displayed.
'''
return self.device.isKeyboardShown()
def writeImageToFile(self, filename, _format="PNG"):
'''
Write the View image to the specified filename in the specified _format.
@type filename: str
@param filename: Absolute path and optional filename receiving the image. If this points to
a directory, then the filename is determined by the serialno of the device and
_format extension.
@type _format: str
@param _format: Image _format (default _format is PNG)
'''
if not os.path.isabs(filename):
raise ValueError("writeImageToFile expects an absolute path")
if os.path.isdir(filename):
filename = os.path.join(filename, self.serialno + '.' + _format.lower())
if DEBUG:
print >> sys.stderr, "writeImageToFile: saving image to '%s' in %s _format" % (filename, _format)
self.device.takeSnapshot().save(filename, _format)
@staticmethod
def __pickleable(tree):
'''
Makes the tree pickleable.
'''
def removeDeviceReference(view):
'''
Removes the reference to a L{MonkeyDevice}.
'''
view.device = None
###########################################################################################
# FIXME: Unfortunatelly deepcopy does not work with MonkeyDevice objects, which is
# sadly the reason why we cannot pickle the tree and we need to remove the MonkeyDevice
# references.
# We wanted to copy the tree to preserve the original and make piclkleable the copy.
#treeCopy = copy.deepcopy(tree)
treeCopy = tree
# IMPORTANT:
# This assumes that the first element in the list is the tree root
ViewClient.__traverse(treeCopy[0], transform=removeDeviceReference)
###########################################################################################
return treeCopy
def distance(self, tree):
'''
Calculates the distance between this tree and the tree passed as argument.
@type tree: list of Views
@param tree: Tree of Views
@return: the distance
'''
################################################################
#FIXME: this should copy the entire tree and then transform it #
################################################################
pickleableViews = ViewClient.__pickleable(self.views)
pickleableTree = ViewClient.__pickleable(tree)
s1 = pickle.dumps(pickleableViews)
s2 = pickle.dumps(pickleableTree)
if DEBUG_DISTANCE:
print >>sys.stderr, "distance: calculating distance between", s1[:20], "and", s2[:20]
l1 = len(s1)
l2 = len(s2)
t = float(max(l1, l2))
if l1 == l2:
if DEBUG_DISTANCE:
print >>sys.stderr, "distance: trees have same length, using Hamming distance"
return ViewClient.__hammingDistance(s1, s2)/t
else:
if DEBUG_DISTANCE:
print >>sys.stderr, "distance: trees have different length, using Levenshtein distance"
return ViewClient.__levenshteinDistance(s1, s2)/t
@staticmethod
def __hammingDistance(s1, s2):
'''
Finds the Hamming distance between two strings.
@param s1: string
@param s2: string
@return: the distance
@raise ValueError: if the lenght of the strings differ
'''
l1 = len(s1)
l2 = len(s2)
if l1 != l2:
raise ValueError("Hamming distance requires strings of same size.")
return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))
def hammingDistance(self, tree):
'''
Finds the Hamming distance between this tree and the one passed as argument.
'''
s1 = ' '.join(map(View.__str__, self.views))
s2 = ' '.join(map(View.__str__, tree))
return ViewClient.__hammingDistance(s1, s2)
@staticmethod
def __levenshteinDistance(s, t):
'''
Find the Levenshtein distance between two Strings.
Python version of Levenshtein distance method implemented in Java at
U{http://www.java2s.com/Code/Java/Data-Type/FindtheLevenshteindistancebetweentwoStrings.htm}.
This is the number of changes needed to change one String into
another, where each change is a single character modification (deletion,
insertion or substitution).
The previous implementation of the Levenshtein distance algorithm
was from U{http://www.merriampark.com/ld.htm}
Chas Emerick has written an implementation in Java, which avoids an OutOfMemoryError
which can occur when my Java implementation is used with very large strings.
This implementation of the Levenshtein distance algorithm
is from U{http://www.merriampark.com/ldjava.htm}::
StringUtils.getLevenshteinDistance(null, *) = IllegalArgumentException
StringUtils.getLevenshteinDistance(*, null) = IllegalArgumentException
StringUtils.getLevenshteinDistance("","") = 0
StringUtils.getLevenshteinDistance("","a") = 1
StringUtils.getLevenshteinDistance("aaapppp", "") = 7
StringUtils.getLevenshteinDistance("frog", "fog") = 1
StringUtils.getLevenshteinDistance("fly", "ant") = 3
StringUtils.getLevenshteinDistance("elephant", "hippo") = 7
StringUtils.getLevenshteinDistance("hippo", "elephant") = 7
StringUtils.getLevenshteinDistance("hippo", "zzzzzzzz") = 8
StringUtils.getLevenshteinDistance("hello", "hallo") = 1
@param s: the first String, must not be null
@param t: the second String, must not be null
@return: result distance
@raise ValueError: if either String input C{null}
'''
if s is None or t is None:
raise ValueError("Strings must not be null")
n = len(s)
m = len(t)
if n == 0:
return m
elif m == 0:
return n
if n > m:
tmp = s
s = t
t = tmp
n = m;
m = len(t)
p = [None]*(n+1)
d = [None]*(n+1)
for i in range(0, n+1):
p[i] = i
for j in range(1, m+1):
if DEBUG_DISTANCE:
if j % 100 == 0:
print >>sys.stderr, "DEBUG:", int(j/(m+1.0)*100),"%\r",
t_j = t[j-1]
d[0] = j
for i in range(1, n+1):
cost = 0 if s[i-1] == t_j else 1
# minimum of cell to the left+1, to the top+1, diagonally left and up +cost
d[i] = min(min(d[i-1]+1, p[i]+1), p[i-1]+cost)
_d = p
p = d
d = _d
if DEBUG_DISTANCE:
print >> sys.stderr, "\n"
return p[n]
def levenshteinDistance(self, tree):
'''
Finds the Levenshtein distance between this tree and the one passed as argument.
'''
s1 = ' '.join(map(View.__microStr__, self.views))
s2 = ' '.join(map(View.__microStr__, tree))
return ViewClient.__levenshteinDistance(s1, s2)
@staticmethod
def excerpt(str, execute=False):
code = Excerpt2Code().Parse(str)
if execute:
exec code
else:
return code
class CulebraOptions:
'''
Culebra options helper class
'''
HELP = 'help'
VERBOSE = 'verbose'
VERSION = 'version'
IGNORE_SECURE_DEVICE = 'ignore-secure-device'
IGNORE_VERSION_CHECK = 'ignore-version-check'
FORCE_VIEW_SERVER_USE = 'force-view-server-use'
DO_NOT_START_VIEW_SERVER = 'do-not-start-view-server'
DO_NOT_IGNORE_UIAUTOMATOR_KILLED = 'do-not-ignore-uiautomator-killed'
FIND_VIEWS_BY_ID = 'find-views-by-id'
FIND_VIEWS_WITH_TEXT = 'find-views-with-text'
FIND_VIEWS_WITH_CONTENT_DESCRIPTION = 'find-views-with-content-description'
USE_REGEXPS = 'use-regexps'
VERBOSE_COMMENTS = 'verbose-comments'
UNIT_TEST_CLASS = 'unit-test-class'
UNIT_TEST_METHOD = 'unit-test-method'
USE_JAR = 'use-jar'
USE_DICTIONARY = 'use-dictionary'
DICTIONARY_KEYS_FROM = 'dictionary-keys-from'
AUTO_REGEXPS = 'auto-regexps'
START_ACTIVITY = 'start-activity'
OUTPUT = 'output'
INTERACTIVE = 'interactive'
WINDOW = 'window'
APPEND_TO_SYS_PATH = 'append-to-sys-path'
SAVE_SCREENSHOT = 'save-screenshot'
SAVE_VIEW_SCREENSHOTS = 'save-view-screenshots'
GUI = 'gui'
SCALE = 'scale'
DO_NOT_VERIFY_INITIAL_SCREEN_DUMP = 'do-not-verify-initial-screen-dump'
SHORT_OPTS = 'HVvIEFSkw:i:t:d:rCUM:j:D:K:R:a:o:Aps:W:GuP:'
LONG_OPTS = [HELP, VERBOSE, VERSION, IGNORE_SECURE_DEVICE, IGNORE_VERSION_CHECK, FORCE_VIEW_SERVER_USE,
DO_NOT_START_VIEW_SERVER,
DO_NOT_IGNORE_UIAUTOMATOR_KILLED,
WINDOW + '=',
FIND_VIEWS_BY_ID + '=', FIND_VIEWS_WITH_TEXT + '=', FIND_VIEWS_WITH_CONTENT_DESCRIPTION + '=',
USE_REGEXPS, VERBOSE_COMMENTS, UNIT_TEST_CLASS, UNIT_TEST_METHOD + '=',
USE_JAR + '=', USE_DICTIONARY + '=', DICTIONARY_KEYS_FROM + '=', AUTO_REGEXPS + '=',
START_ACTIVITY + '=',
OUTPUT + '=', INTERACTIVE, APPEND_TO_SYS_PATH,
SAVE_SCREENSHOT + '=', SAVE_VIEW_SCREENSHOTS + '=',
GUI,
DO_NOT_VERIFY_INITIAL_SCREEN_DUMP,
SCALE + '=',
]
LONG_OPTS_ARG = {WINDOW: 'WINDOW',
FIND_VIEWS_BY_ID: 'BOOL', FIND_VIEWS_WITH_TEXT: 'BOOL', FIND_VIEWS_WITH_CONTENT_DESCRIPTION: 'BOOL',
USE_JAR: 'BOOL', USE_DICTIONARY: 'BOOL', DICTIONARY_KEYS_FROM: 'VALUE', AUTO_REGEXPS: 'LIST',
START_ACTIVITY: 'COMPONENT',
OUTPUT: 'FILENAME',
SAVE_SCREENSHOT: 'FILENAME', SAVE_VIEW_SCREENSHOTS: 'DIR',
UNIT_TEST_METHOD: 'NAME',
SCALE: 'FLOAT'}
OPTS_HELP = {
'H': 'prints this help',
'V': 'verbose comments',
'k': 'don\'t ignore UiAutomator killed',
'w': 'use WINDOW content (default: -1, all windows)',
'i': 'whether to use findViewById() in script',
't': 'whether to use findViewWithText() in script',
'd': 'whether to use findViewWithContentDescription',
'r': 'use regexps in matches',
'U': 'generates unit test class and script',
'M': 'generates unit test method. Can be used with or without -U',
'j': 'use jar and appropriate shebang to run script (deprecated)',
'D': 'use a dictionary to store the Views found',
'K': 'dictionary keys from: id, text, content-description',
'R': 'auto regexps (i.e. clock), implies -r. help list options',
'a': 'starts Activity before dump',
'o': 'output filename',
'A': 'interactive',
'p': 'append environment variables values to sys.path',
's': 'save screenshot to file',
'W': 'save View screenshots to files in directory',
'E': 'ignores ADB version check',
'G': 'presents the GUI (EXPERIMENTAL)',
'P': 'scale percentage (i.e. 0.5)',
'u': 'do not verify initial screen dump state',
}
class CulebraTestCase(unittest.TestCase):
kwargs1 = None
kwargs2 = None
serialno = None
options = {}
@classmethod
def setUpClass(cls):
cls.kwargs1 = {'ignoreversioncheck': False, 'verbose': False, 'ignoresecuredevice': False}
cls.kwargs2 = {'startviewserver': True, 'forceviewserveruse': False, 'autodump': False, 'ignoreuiautomatorkilled': True}
def setUp(self):
self.device, self.serialno = ViewClient.connectToDeviceOrExit(serialno=self.serialno, **self.kwargs1)
if self.options[CulebraOptions.START_ACTIVITY]:
self.device.startActivity(component=self.options[CulebraOptions.START_ACTIVITY])
self.vc = ViewClient(self.device, self.serialno, **self.kwargs2)
def tearDown(self):
pass
@staticmethod
def main():
# if you want to specify tests classes and methods in the command line you will be forced
# to include -s or --serialno and the serial number of the device (could be a regexp)
# as ViewClient would have no way of determine what it is
ser = ['-s', '--serialno']
old = '%(failfast)'
new = ' %s s The serial number to connect to\n%s' % (', '.join(ser), old)
unittest.TestProgram.USAGE = unittest.TestProgram.USAGE.replace(old, new)
if len(sys.argv) >= 2 and sys.argv[1] in ser:
sys.argv.pop(1)
CulebraTestCase.serialno = sys.argv.pop(1)
unittest.main()
if __name__ == "__main__":
try:
vc = ViewClient(None)
except:
print "%s: Don't expect this to do anything" % __file__
| 38.325624
| 202
| 0.562371
|
389185dbd3902154e239648b04013c0b63f6333d
| 11,003
|
py
|
Python
|
tests/optimize/test_lie_algebra.py
|
jsmz97/pennylane
|
de7b7c0b452c8d59867d11f84b9c332a36e08ab1
|
[
"Apache-2.0"
] | 3
|
2022-01-19T13:41:52.000Z
|
2022-01-22T03:38:58.000Z
|
tests/optimize/test_lie_algebra.py
|
jsmz97/pennylane
|
de7b7c0b452c8d59867d11f84b9c332a36e08ab1
|
[
"Apache-2.0"
] | 1
|
2021-05-18T07:38:21.000Z
|
2021-05-18T07:38:21.000Z
|
tests/optimize/test_lie_algebra.py
|
jsmz97/pennylane
|
de7b7c0b452c8d59867d11f84b9c332a36e08ab1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the ``LieAlgebraOptimizer``.
"""
import pytest
from scipy.sparse.linalg import expm
import numpy as np
import pennylane as qml
from pennylane.optimize import LieAlgebraOptimizer
def circuit_1():
"""Simple circuit."""
qml.Hadamard(wires=[0])
qml.Hadamard(wires=[1])
def circuit_2():
"""Simply parameterized circuit."""
qml.RX(0.1, wires=[0])
qml.RY(0.5, wires=[1])
qml.CNOT(wires=[0, 1])
qml.RY(0.6, wires=[0])
def circuit_3():
"""Three-qubit circuit."""
qml.RY(0.5, wires=[0])
qml.RY(0.6, wires=[1])
qml.RY(0.7, wires=[2])
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
qml.RX(-0.6, wires=[0])
qml.RX(-0.3, wires=[1])
qml.RX(-0.2, wires=[2])
hamiltonian_1 = qml.Hamiltonian(
coeffs=[-1.0] * 3,
observables=[qml.PauliX(0), qml.PauliZ(1), qml.PauliY(0) @ qml.PauliX(1)],
)
hamiltonian_2 = qml.Hamiltonian(
coeffs=[-0.2, 0.3, -0.15],
observables=[
qml.PauliY(1),
qml.PauliZ(0) @ qml.PauliZ(1),
qml.PauliX(0) @ qml.PauliX(1),
],
)
hamiltonian_3 = qml.Hamiltonian(
coeffs=[-2.0], observables=[qml.PauliY(0) @ qml.PauliY(1) @ qml.PauliY(2)]
)
@pytest.mark.parametrize(
"circuit,hamiltonian",
[
(circuit_1, hamiltonian_1),
(circuit_1, hamiltonian_2),
(circuit_2, hamiltonian_1),
(circuit_2, hamiltonian_2),
(circuit_3, hamiltonian_3),
],
)
def test_lie_algebra_omegas(circuit, hamiltonian):
"""Test that we calculate the Riemannian gradient coefficients Tr{[rho, H] P_j} correctly."""
# pylint: disable=no-member
nqubits = max([max(ps.wires) for ps in hamiltonian.ops]) + 1
wires = range(nqubits)
dev = qml.device("default.qubit", wires=nqubits)
@qml.qnode(dev)
def get_state():
circuit()
return qml.state()
@qml.qnode(dev)
def lie_circuit():
circuit()
return qml.expval(hamiltonian)
phi = get_state()
rho = np.outer(phi, phi.conj())
hamiltonian_np = qml.utils.sparse_hamiltonian(hamiltonian, wires).toarray()
lie_algebra_np = hamiltonian_np @ rho - rho @ hamiltonian_np
opt = LieAlgebraOptimizer(circuit=lie_circuit)
ops = opt.get_su_n_operators(None)[0]
omegas_np = []
for op in ops:
op = qml.utils.expand(op.matrix, op.wires, wires)
omegas_np.append(-np.trace(lie_algebra_np @ op).imag / 2)
omegas = opt.get_omegas()
assert np.allclose(omegas, omegas_np)
@pytest.mark.parametrize(
"circuit,hamiltonian",
[
(circuit_1, hamiltonian_1),
(circuit_1, hamiltonian_2),
(circuit_2, hamiltonian_1),
(circuit_2, hamiltonian_2),
(circuit_3, hamiltonian_3),
],
)
def test_lie_algebra_omegas_restricted(circuit, hamiltonian):
"""Test that we calculate the (restricted) Riemannian gradient coefficients correctly."""
# pylint: disable=no-member
nqubits = max([max(ps.wires) for ps in hamiltonian.ops]) + 1
wires = range(nqubits)
dev = qml.device("default.qubit", wires=nqubits)
@qml.qnode(dev)
def get_state():
circuit()
return qml.state()
@qml.qnode(dev)
def lie_circuit():
circuit()
return qml.expval(hamiltonian)
phi = get_state()
rho = np.outer(phi, phi.conj())
hamiltonian_np = qml.utils.sparse_hamiltonian(hamiltonian, wires).toarray()
lie_algebra_np = hamiltonian_np @ rho - rho @ hamiltonian_np
restriction = qml.Hamiltonian(
coeffs=[1.0] * 3,
observables=[qml.PauliX(0), qml.PauliY(1), qml.PauliY(0) @ qml.PauliY(1)],
)
opt = LieAlgebraOptimizer(circuit=lie_circuit, restriction=restriction)
ops = opt.get_su_n_operators(restriction)[0]
omegas_np = []
for op in ops:
op = qml.utils.expand(op.matrix, op.wires, wires)
omegas_np.append(-np.trace(lie_algebra_np @ op).imag / 2)
omegas = opt.get_omegas()
assert np.allclose(omegas, omegas_np)
@pytest.mark.parametrize(
"circuit,hamiltonian",
[
(circuit_1, hamiltonian_1),
(circuit_1, hamiltonian_2),
(circuit_2, hamiltonian_1),
(circuit_2, hamiltonian_2),
],
)
def test_lie_algebra_evolution(circuit, hamiltonian):
"""Test that the optimizer produces the correct unitary to append."""
# pylint: disable=no-member
nqubits = max([max(ps.wires) for ps in hamiltonian.ops]) + 1
wires = range(nqubits)
dev = qml.device("default.qubit", wires=nqubits)
@qml.qnode(dev)
def get_state():
circuit()
return qml.state()
@qml.qnode(dev)
def lie_circuit():
circuit()
return qml.expval(hamiltonian)
phi = get_state()
rho = np.outer(phi, phi.conj())
hamiltonian_np = qml.utils.sparse_hamiltonian(hamiltonian, wires).toarray()
lie_algebra_np = hamiltonian_np @ rho - rho @ hamiltonian_np
phi_exact = expm(-0.001 * lie_algebra_np) @ phi
rho_exact = np.outer(phi_exact, phi_exact.conj())
opt = LieAlgebraOptimizer(circuit=lie_circuit, stepsize=0.001, exact=True)
opt.step()
cost_pl = opt.circuit()
cost_exact = np.trace(rho_exact @ hamiltonian_np)
assert np.allclose(cost_pl, cost_exact, atol=1e-2)
@pytest.mark.parametrize(
"circuit,hamiltonian",
[
(circuit_1, hamiltonian_1),
(circuit_1, hamiltonian_2),
(circuit_2, hamiltonian_1),
(circuit_2, hamiltonian_2),
(circuit_3, hamiltonian_3),
],
)
def test_lie_algebra_step(circuit, hamiltonian):
"""Test that we can take subsequent steps with the optimizer."""
nqubits = max([max(ps.wires) for ps in hamiltonian.ops]) + 1
dev = qml.device("default.qubit", wires=nqubits)
@qml.qnode(dev)
def lie_circuit():
circuit()
return qml.expval(hamiltonian)
opt = LieAlgebraOptimizer(circuit=lie_circuit)
opt.step()
opt.step()
@pytest.mark.parametrize(
"circuit,hamiltonian",
[
(circuit_1, hamiltonian_1),
(circuit_1, hamiltonian_2),
(circuit_2, hamiltonian_1),
(circuit_2, hamiltonian_2),
(circuit_3, hamiltonian_3),
],
)
def test_lie_algebra_step_trotterstep(circuit, hamiltonian):
"""Test that we can take subsequent steps with the optimizer."""
nqubits = max([max(ps.wires) for ps in hamiltonian.ops]) + 1
dev = qml.device("default.qubit", wires=nqubits)
@qml.qnode(dev)
def lie_circuit():
circuit()
return qml.expval(hamiltonian)
opt = LieAlgebraOptimizer(circuit=lie_circuit, trottersteps=3)
opt.step()
opt.step()
def test_lie_algebra_circuit_input_1_check():
"""Test that a type error is raise for non-QNode circuits."""
def circuit():
qml.RY(0.5, wires=0)
with pytest.raises(TypeError, match="circuit must be a QNode"):
LieAlgebraOptimizer(circuit=circuit, stepsize=0.001)
def test_lie_algebra_hamiltonian_input_1_check():
"""Test that a type error is raise for non-QNode circuits."""
@qml.qnode(qml.device("default.qubit", wires=3))
def circuit():
qml.RY(0.5, wires=0)
return qml.state()
with pytest.raises(
TypeError,
match="circuit must return the expectation value of a Hamiltonian",
):
LieAlgebraOptimizer(circuit=circuit, stepsize=0.001)
def test_lie_algebra_nqubits_check():
"""Test that we warn if the system is too big."""
@qml.qnode(qml.device("default.qubit", wires=5))
def circuit():
qml.RY(0.5, wires=0)
return qml.expval(qml.Hamiltonian(coeffs=[-1.0], observables=[qml.PauliX(0)]))
with pytest.warns(UserWarning, match="The exact Riemannian gradient is exponentially"):
LieAlgebraOptimizer(circuit=circuit, stepsize=0.001)
def test_lie_algebra_restriction_check():
"""Test that a type error is raise for non-QNode circuits."""
@qml.qnode(qml.device("default.qubit", wires=3))
def circuit():
qml.RY(0.5, wires=0)
return qml.expval(qml.Hamiltonian(coeffs=[-1.0], observables=[qml.PauliX(0)]))
restriction = "not_a_hamiltonian"
with pytest.raises(
TypeError,
match="restriction must be a Hamiltonian",
):
LieAlgebraOptimizer(circuit=circuit, restriction=restriction, stepsize=0.001)
def test_docstring_example():
"""Test the docstring example with Trotterized evolution."""
hamiltonian = qml.Hamiltonian(
coeffs=[-1.0] * 3,
observables=[qml.PauliX(0), qml.PauliZ(1), qml.PauliY(0) @ qml.PauliX(1)],
)
@qml.qnode(qml.device("default.qubit", wires=2))
def quant_fun():
qml.RX(0.1, wires=[0])
qml.RY(0.5, wires=[1])
qml.CNOT(wires=[0, 1])
qml.RY(0.6, wires=[0])
return qml.expval(hamiltonian)
opt = LieAlgebraOptimizer(circuit=quant_fun, stepsize=0.1)
for step in range(6):
circuit, cost = opt.step_and_cost()
print(f"Step {step} - cost {cost}")
print(circuit())
assert np.isclose(cost, -2.23, atol=1e-2)
def test_docstring_example_exact():
"""Test that the optimizer works with matrix exponential."""
hamiltonian = qml.Hamiltonian(
coeffs=[-1.0] * 3,
observables=[qml.PauliX(0), qml.PauliZ(1), qml.PauliY(0) @ qml.PauliX(1)],
)
@qml.qnode(qml.device("default.qubit", wires=2))
def quant_fun():
qml.RX(0.1, wires=[0])
qml.RY(0.5, wires=[1])
qml.CNOT(wires=[0, 1])
qml.RY(0.6, wires=[0])
return qml.expval(hamiltonian)
opt = LieAlgebraOptimizer(circuit=quant_fun, stepsize=0.1, exact=True)
for step in range(6):
_, cost = opt.step_and_cost()
print(f"Step {step} - cost {cost}")
assert np.isclose(cost, -2.23, atol=1e-2)
def test_example_shots():
"""Test that the optimizer works with finite shots."""
hamiltonian = qml.Hamiltonian(
coeffs=[-1.0] * 3,
observables=[qml.PauliX(0), qml.PauliZ(1), qml.PauliY(0) @ qml.PauliX(1)],
)
@qml.qnode(qml.device("default.qubit", wires=2, shots=1000))
def quant_fun():
qml.RX(0.1, wires=[0])
qml.RY(0.5, wires=[1])
qml.CNOT(wires=[0, 1])
qml.RY(0.6, wires=[0])
return qml.expval(hamiltonian)
opt = LieAlgebraOptimizer(circuit=quant_fun, stepsize=0.1, exact=False)
for step in range(3):
_, cost = opt.step_and_cost()
print(f"Step {step} - cost {cost}")
| 29.263298
| 97
| 0.644097
|
af0dc418e06e950ec43c98ccb0d1391a44d5d83f
| 9,680
|
py
|
Python
|
pyquil/api/_compiler.py
|
notmgsk/pyquil
|
db42246e4cf0a7801a58250ab16629c27815997a
|
[
"Apache-2.0"
] | 1
|
2019-03-11T13:37:20.000Z
|
2019-03-11T13:37:20.000Z
|
pyquil/api/_compiler.py
|
notmgsk/pyquil
|
db42246e4cf0a7801a58250ab16629c27815997a
|
[
"Apache-2.0"
] | null | null | null |
pyquil/api/_compiler.py
|
notmgsk/pyquil
|
db42246e4cf0a7801a58250ab16629c27815997a
|
[
"Apache-2.0"
] | null | null | null |
##############################################################################
# Copyright 2016-2018 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import logging
import warnings
from typing import Dict, Any, List, Optional, Tuple
from collections import Counter
from rpcq import Client
from rpcq.messages import (BinaryExecutableRequest, BinaryExecutableResponse,
NativeQuilRequest, TargetDevice,
PyQuilExecutableResponse, ParameterSpec,
RewriteArithmeticRequest)
from pyquil.api._base_connection import ForestConnection
from pyquil.api._qac import AbstractCompiler
from pyquil.api._error_reporting import _record_call
from pyquil.device import AbstractDevice
from pyquil.parser import parse_program
from pyquil.quil import Program, Measurement, Declare
_log = logging.getLogger(__name__)
PYQUIL_PROGRAM_PROPERTIES = ["native_quil_metadata", "num_shots"]
def _extract_attribute_dictionary_from_program(program: Program) -> Dict[str, Any]:
"""
Collects the attributes from PYQUIL_PROGRAM_PROPERTIES on the Program object program
into a dictionary.
:param program: Program to collect attributes from.
:return: Dictionary of attributes, keyed on the string attribute name.
"""
attrs = {}
for prop in PYQUIL_PROGRAM_PROPERTIES:
attrs[prop] = getattr(program, prop)
return attrs
def _extract_program_from_pyquil_executable_response(response: PyQuilExecutableResponse) -> Program:
"""
Unpacks a rpcq PyQuilExecutableResponse object into a pyQuil Program object.
:param response: PyQuilExecutableResponse object to be unpacked.
:return: Resulting pyQuil Program object.
"""
p = Program(response.program)
for attr, val in response.attributes.items():
setattr(p, attr, val)
return p
def _collect_classical_memory_write_locations(program: Program) -> List[Optional[Tuple[int, int]]]:
"""Collect classical memory locations that are the destination of MEASURE instructions
These locations are important for munging output buffers returned from the QPU
server to the shape expected by the user.
This is secretly stored on BinaryExecutableResponse. We're careful to make sure
these objects are json serializable.
:return: list whose value `(q, m)` at index `addr` records that the `m`-th measurement of
qubit `q` was measured into `ro` address `addr`. A value of `None` means nothing was
measured into `ro` address `addr`.
"""
ro_size = None
for instr in program:
if isinstance(instr, Declare) and instr.name == "ro":
if ro_size is not None:
raise ValueError("I found multiple places where a register named `ro` is declared! "
"Please only declare one register named `ro`.")
ro_size = instr.memory_size
measures_by_qubit: Dict[int, int] = Counter()
ro_sources: Dict[int, Tuple[int, int]] = {}
for instr in program:
if isinstance(instr, Measurement):
q = instr.qubit.index
if instr.classical_reg:
offset = instr.classical_reg.offset
assert instr.classical_reg.name == "ro", instr.classical_reg.name
if offset in ro_sources:
_log.warning(f"Overwriting the measured result in register "
f"{instr.classical_reg} from qubit {ro_sources[offset]} "
f"to qubit {q}")
# we track how often each qubit is measured (per shot) and into which register it is
# measured in its n-th measurement.
ro_sources[offset] = (q, measures_by_qubit[q])
measures_by_qubit[q] += 1
if ro_size:
return [ro_sources.get(i) for i in range(ro_size)]
elif ro_sources:
raise ValueError("Found MEASURE instructions, but no 'ro' or 'ro_table' "
"region was declared.")
else:
return []
def _collect_memory_descriptors(program: Program) -> Dict[str, ParameterSpec]:
"""Collect Declare instructions that are important for building the patch table.
This is secretly stored on BinaryExecutableResponse. We're careful to make sure
these objects are json serializable.
:return: A dictionary of variable names to specs about the declared region.
"""
return {
instr.name: ParameterSpec(type=instr.memory_type, length=instr.memory_size)
for instr in program if isinstance(instr, Declare)
}
class QPUCompiler(AbstractCompiler):
@_record_call
def __init__(self,
endpoint: str,
device: AbstractDevice,
timeout: int = 10,
name: Optional[str] = None) -> None:
"""
Client to communicate with the Compiler Server.
:param endpoint: TCP or IPC endpoint of the Compiler Server
:param device: PyQuil Device object to use as compilation target
:param timeout: Number of seconds to wait for a response from the client.
:param name: Name of the lattice being targeted
"""
self.client = Client(endpoint, timeout=timeout)
self.target_device = TargetDevice(isa=device.get_isa().to_dict(),
specs=device.get_specs().to_dict())
self.name = name
def get_version_info(self) -> dict:
return self.client.call('get_version_info')
@_record_call
def quil_to_native_quil(self, program: Program) -> Program:
request = NativeQuilRequest(quil=program.out(), target_device=self.target_device)
response = self.client.call('quil_to_native_quil', request).asdict() # type: Dict
nq_program = parse_program(response['quil'])
nq_program.native_quil_metadata = response['metadata']
nq_program.num_shots = program.num_shots
return nq_program
@_record_call
def native_quil_to_executable(self, nq_program: Program) -> BinaryExecutableResponse:
if nq_program.native_quil_metadata is None:
warnings.warn("It looks like you're trying to call `native_quil_to_binary` on a "
"Program that hasn't been compiled via `quil_to_native_quil`. This is "
"ok if you've hand-compiled your program to our native gateset, "
"but be careful!")
if self.name is not None:
targeted_lattice = self.client.call('get_config_info')['lattice_name']
if targeted_lattice and targeted_lattice != self.name:
warnings.warn(f'You requested compilation for device {self.name}, '
f'but you are engaged on device {targeted_lattice}.')
arithmetic_request = RewriteArithmeticRequest(quil=nq_program.out())
arithmetic_response = self.client.call('resolve_gate_parameter_arithmetic', arithmetic_request)
request = BinaryExecutableRequest(quil=arithmetic_response.quil, num_shots=nq_program.num_shots)
response = self.client.call('native_quil_to_binary', request)
# hack! we're storing a little extra info in the executable binary that we don't want to
# expose to anyone outside of our own private lives: not the user, not the Forest server,
# not anyone.
response.recalculation_table = arithmetic_response.recalculation_table
response.memory_descriptors = _collect_memory_descriptors(nq_program)
response.ro_sources = _collect_classical_memory_write_locations(nq_program)
return response
class QVMCompiler(AbstractCompiler):
@_record_call
def __init__(self, endpoint: str, device: AbstractDevice, timeout: float = None) -> None:
"""
Client to communicate with the Compiler Server.
:param endpoint: TCP or IPC endpoint of the Compiler Server
:param device: PyQuil Device object to use as compilation target
"""
self.client = Client(endpoint, timeout=timeout)
self.target_device = TargetDevice(isa=device.get_isa().to_dict(),
specs=device.get_specs().to_dict())
def get_version_info(self) -> dict:
return self.client.call('get_version_info')
@_record_call
def quil_to_native_quil(self, program: Program) -> Program:
request = NativeQuilRequest(quil=program.out(), target_device=self.target_device)
response = self.client.call('quil_to_native_quil', request).asdict() # type: Dict
nq_program = parse_program(response['quil'])
nq_program.native_quil_metadata = response['metadata']
nq_program.num_shots = program.num_shots
return nq_program
@_record_call
def native_quil_to_executable(self, nq_program: Program) -> PyQuilExecutableResponse:
return PyQuilExecutableResponse(
program=nq_program.out(),
attributes=_extract_attribute_dictionary_from_program(nq_program))
| 44
| 104
| 0.665702
|
66d5524755827697fe10230599637df076846136
| 585
|
py
|
Python
|
turnero/turnero_app/migrations/0004_auto_20190618_1259.py
|
Juannauta/Turnero
|
4af8186cf95508713ec9bcc076740ffbc40b2bb8
|
[
"MIT"
] | null | null | null |
turnero/turnero_app/migrations/0004_auto_20190618_1259.py
|
Juannauta/Turnero
|
4af8186cf95508713ec9bcc076740ffbc40b2bb8
|
[
"MIT"
] | null | null | null |
turnero/turnero_app/migrations/0004_auto_20190618_1259.py
|
Juannauta/Turnero
|
4af8186cf95508713ec9bcc076740ffbc40b2bb8
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.2 on 2019-06-18 12:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('turnero_app', '0003_auto_20190618_1256'),
]
operations = [
migrations.AddField(
model_name='serviciosusuarios',
name='fecha_creacion',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='serviciosusuarios',
name='fecha_finalización',
field=models.DateTimeField(null=True),
),
]
| 24.375
| 54
| 0.606838
|
8d0d3b5fabca091a8eb2d15cd070993902d29727
| 1,009
|
py
|
Python
|
12 Preprocessing pipeline/train.py
|
aishifugi/generating-sound-with-neural-networks
|
e0f0b875510178f7e31764a080ca7e463af46e38
|
[
"MIT"
] | 81
|
2020-11-16T12:26:55.000Z
|
2022-03-26T18:04:21.000Z
|
12 Preprocessing pipeline/train.py
|
aishifugi/generating-sound-with-neural-networks
|
e0f0b875510178f7e31764a080ca7e463af46e38
|
[
"MIT"
] | 3
|
2021-06-22T14:40:50.000Z
|
2022-01-16T17:35:49.000Z
|
11 Implementing VAE/code/train.py
|
musikalkemist/generating-sound-with-neural-networks
|
4e71d22683edb9bd56aa46de3f022f4e1dec1cf1
|
[
"MIT"
] | 27
|
2021-01-15T16:00:18.000Z
|
2022-03-16T09:11:41.000Z
|
from tensorflow.keras.datasets import mnist
from autoencoder import VAE
LEARNING_RATE = 0.0005
BATCH_SIZE = 32
EPOCHS = 100
def load_mnist():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype("float32") / 255
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.astype("float32") / 255
x_test = x_test.reshape(x_test.shape + (1,))
return x_train, y_train, x_test, y_test
def train(x_train, learning_rate, batch_size, epochs):
autoencoder = VAE(
input_shape=(28, 28, 1),
conv_filters=(32, 64, 64, 64),
conv_kernels=(3, 3, 3, 3),
conv_strides=(1, 2, 2, 1),
latent_space_dim=2
)
autoencoder.summary()
autoencoder.compile(learning_rate)
autoencoder.train(x_train, batch_size, epochs)
return autoencoder
if __name__ == "__main__":
x_train, _, _, _ = load_mnist()
autoencoder = train(x_train[:10000], LEARNING_RATE, BATCH_SIZE, EPOCHS)
autoencoder.save("model")
| 25.871795
| 75
| 0.666006
|
8394a39db3f52ccba924ececb588c9535ef09a14
| 3,173
|
py
|
Python
|
kubeseal.py
|
p3lim/sublime-kubeseal
|
e91f5f771e5731e1c512c487788472ef1bfe9c02
|
[
"MIT"
] | null | null | null |
kubeseal.py
|
p3lim/sublime-kubeseal
|
e91f5f771e5731e1c512c487788472ef1bfe9c02
|
[
"MIT"
] | 2
|
2021-05-12T20:28:03.000Z
|
2021-05-15T13:23:55.000Z
|
kubeseal.py
|
p3lim/sublime-kubeseal
|
e91f5f771e5731e1c512c487788472ef1bfe9c02
|
[
"MIT"
] | null | null | null |
import re
import subprocess
import sublime
import sublime_plugin
ANSI_ESCAPE = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
settings = None
def plugin_loaded():
'''
Called when the plugin is loaded, used to load the settings for the package.
'''
global settings
settings = sublime.load_settings('kuebseal.sublime-settings')
class ConvertSecret(object):
'''
Wrapper for the process used to convert a Secret to a SealedSecret using kubeseal.
:param sublime.View view: The view of the file to be converted.
'''
def __init__(self, view):
self.view = view
self.window = view.window()
self.encoding = view.encoding()
if self.encoding == 'Undefined':
self.encoding = 'utf-8'
self.cmd = settings.get('cmd', ['kubeseal', '-o', 'yaml']) # < current view > replaced current view
def convert(self, region):
'''
Attempts to convert the contents of the current view from a Secret manifest to a SealedSecret manifest.
:param sublime.Region region: The region for the file to convert
:returns: str: Returns the SealedSecret manifest if no errors arose, else the original content.
'''
contents = self.view.substr(region)
# run the kubeseal command
output, error = self._exec(contents)
if error:
# there was an error, display it
self._show_errors(error)
# return the original content
return contents
# hide any existing errors
self._hide_errors()
# return the converted output
return output
def _exec(self, stdin):
'''
Runs kubeseal with the given input.
:param str stdin: Stdin for the process, the file content to convert
:returns: stdout, stderr: Returns the SealedSecret manifest if successful, an empty stdout
and error if unsuccessful.
'''
proc = subprocess.Popen(
self.cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = proc.communicate(stdin.encode())
if stderr or proc.returncode != 0:
return '', stderr.decode('utf-8')
return stdout.decode(self.encoding), None
def _show_errors(self, errors):
'''
Show the stderr of a failed process in an output panel.
:param str stderr: Stderr output of a process.
'''
panel = self.window.create_output_panel('kubeseal')
panel.set_scratch(True)
panel.run_command('select_all')
panel.run_command('right_delete')
panel.run_command('insert', {'characters': ANSI_ESCAPE.sub('', errors)})
self.window.run_command('show_panel', {'panel': 'output.kubeseal'})
def _hide_errors(self):
'''
Hide any previously displayed error panel.
'''
self.window.run_command('hide_panel', {'panel': 'output.kubeseal'})
class KubesealCommand(sublime_plugin.TextCommand):
'''
The `kubeseal` command, invoked by the command palette.
'''
def run(self, edit):
'''
Converts the current file viewed, replacing its contents.
'''
converter = ConvertSecret(self.view)
# get the entire view region
region = sublime.Region(0, self.view.size())
# run the formatter with the given region
replacement = converter.convert(region)
# replace the region if the content has changes
if self.view.substr(region) != replacement:
self.view.replace(edit, region, replacement)
| 27.353448
| 105
| 0.714151
|
ce5587d9c8c65a6544e904ea0549dbe6c2e6b9c3
| 329
|
py
|
Python
|
aetherguild/framework/utils/sentry.py
|
katajakasa/aetherguild3
|
4dc5485398d39cc2be47820d1194d91fe73a0435
|
[
"MIT"
] | null | null | null |
aetherguild/framework/utils/sentry.py
|
katajakasa/aetherguild3
|
4dc5485398d39cc2be47820d1194d91fe73a0435
|
[
"MIT"
] | null | null | null |
aetherguild/framework/utils/sentry.py
|
katajakasa/aetherguild3
|
4dc5485398d39cc2be47820d1194d91fe73a0435
|
[
"MIT"
] | null | null | null |
from raven.handlers.logging import SentryHandler
from raven_aiohttp import AioHttpTransport
from raven import Client
class AsyncSentryHandler(SentryHandler):
def __init__(self, *args, **kwargs):
kwargs['client'] = Client(transport=AioHttpTransport)
super(AsyncSentryHandler, self).__init__(*args, **kwargs)
| 32.9
| 65
| 0.765957
|
4da255bf08eec678cb6fe113f3829e1bfec4d259
| 3,471
|
py
|
Python
|
src/test/data_structure/list_exercise_test.py
|
EduHubICT/python-basic
|
43765320e7ff4d5d13a11684f8320513f47ea1ff
|
[
"MIT"
] | null | null | null |
src/test/data_structure/list_exercise_test.py
|
EduHubICT/python-basic
|
43765320e7ff4d5d13a11684f8320513f47ea1ff
|
[
"MIT"
] | 3
|
2020-06-13T14:48:36.000Z
|
2020-07-02T15:51:13.000Z
|
src/test/data_structure/list_exercise_test.py
|
Big0one/python-basic
|
43765320e7ff4d5d13a11684f8320513f47ea1ff
|
[
"MIT"
] | 1
|
2020-06-06T22:51:50.000Z
|
2020-06-06T22:51:50.000Z
|
import unittest
from src.main.data_structure.list_exercise import *
class TestList(unittest.TestCase):
def test_find_average(self):
self.assertEqual(find_average([1, 2, 3]), 2)
self.assertEqual(find_average([1, 2, 3, 4, 5, 6, 7, -1]), 3.375)
def test_sort_list(self):
self.assertEqual(list_sort([3, 5, 1, 9]), [1, 3, 5, 9])
def test_find_max(self):
self.assertEqual(find_max([0, 3, 2, 4, 5, 6, 100]), 100)
def test_find_min(self):
self.assertEqual(find_min([2, 4, 3, 5, -1, -1000]), -1000)
def test_find_sum(self):
self.assertEqual(find_sum([1, 2, 3, 4]), 10)
def test_sort_list_bubble(self):
self.assertEqual(
sort_list_bubble_asc([10, 9, 8, 7, 5, 1, 2, 4, 3, 6]),
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
)
def test_sort_list_bubble_dsc(self):
self.assertEqual(
sort_list_bubble_dsc([10, 9, 8, 7, 5, 1, 2, 4, 3, 6]),
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1],
)
def test_binary_search(self):
self.assertEqual(binary_search([1, 5, 15, 35, 100, 305, 390], 100), "Yes")
self.assertEqual(binary_search([1, 5, 15, 35, 100, 305, 390], 1000), "No")
self.assertEqual(binary_search([1, 5, 15, 35, 100, 305, 390], 390), "Yes")
self.assertEqual(binary_search([1, 5], 100), "No")
def test_recursive_binary_search(self):
self.assertEqual(
binary_search_recursive([1, 2, 3, 4, 5, 6, 7, 8], 3, 0, 7), "Yes"
)
def test_binary_search_index(self):
self.assertEqual(
binary_search_target_index([1, 5, 15, 100, 100, 305, 390], 100, True), 3
)
def test_binary_search_similar_target_count(self):
self.assertEqual(binary_search_count([1, 5, 15, 100, 100, 305, 390], 100), 2)
def test_binary_search_circular_sorted(self):
self.assertEqual(
binary_search_circular_sorted([12, 13, 14, 1, 2, 3, 4], 14), "Yes"
)
def test_find_square(self):
self.assertEqual(
find_square([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
[1, 4, 9, 16, 25, 36, 49, 64, 81, 100],
)
self.assertEqual(find_square([20, 12, 11]), [400, 144, 121])
def test_find_prime(self):
self.assertEqual(
find_prime_number([11, 12, 33, 84, 95, 101]),
[True, False, False, False, False, True],
)
def test_insertion_sort_asc(self):
self.assertEqual(insertion_sort_asc([5, 4, 3, 2, 1]), [1, 2, 3, 4, 5])
self.assertEqual(
insertion_sort_asc([5, 10, 3, 22, 23, 24, 25]), [3, 5, 10, 22, 23, 24, 25]
)
self.assertEqual(
insertion_sort_asc([10, 9, 8, 7, 5, 1, 2, 4, 3, 6]),
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
)
def test_insertion_sort_dsc(self):
self.assertEqual(insertion_sort_dsc([1, 2, 3, 4, 5]), [5, 4, 3, 2, 1])
self.assertEqual(
insertion_sort_dsc([5, 10, 3, 22, 23, 24, 25]), [25, 24, 23, 22, 10, 5, 3]
)
self.assertEqual(
insertion_sort_dsc([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1],
)
def test_specified_type_item(self):
self.assertEqual(specified_type_item([1, 2, "abc", 2.2], int), 2)
def test_remove_duplicates(self):
self.assertEqual(remove_duplicates([1, 2, 2, 3, 3, 4, 4]), [1, 2, 3, 4])
if __name__ == "__main__":
unittest.main()
| 34.366337
| 86
| 0.553155
|
2d812d5fa0739929ba357062a212fedbb4c78de3
| 857
|
py
|
Python
|
tests/test_multivariate/test_sample_analysis.py
|
anukaal/probnum-evaluation
|
5074229cd6fa33aa1884eded792be5c78ba9fb4c
|
[
"MIT"
] | 5
|
2021-05-01T17:20:08.000Z
|
2021-05-03T09:13:28.000Z
|
tests/test_multivariate/test_sample_analysis.py
|
anukaal/probnum-evaluation
|
5074229cd6fa33aa1884eded792be5c78ba9fb4c
|
[
"MIT"
] | 9
|
2021-02-14T11:45:44.000Z
|
2021-06-07T09:37:33.000Z
|
tests/test_multivariate/test_sample_analysis.py
|
anukaal/probnum-evaluation
|
5074229cd6fa33aa1884eded792be5c78ba9fb4c
|
[
"MIT"
] | 2
|
2021-02-14T10:31:21.000Z
|
2022-03-29T12:37:52.000Z
|
"""Tests for sample analysis functions."""
import numpy as np
import pytest
from probnumeval import multivariate
@pytest.fixture
def fake_samples():
return np.random.rand(100, 3)
@pytest.fixture
def fake_reference():
return np.random.rand(3)
@pytest.mark.parametrize("p", [1, 2, np.inf])
def test_sample_sample_distance(fake_samples, p):
ssdist = multivariate.sample_sample_distance(fake_samples, p=p)
np.testing.assert_allclose(ssdist.shape, (100,))
@pytest.mark.parametrize("p", [1, 2, np.inf])
def test_sample_reference_distance(fake_samples, fake_reference, p):
srdist = multivariate.sample_reference_distance(fake_samples, fake_reference, p=p)
np.testing.assert_allclose(srdist.shape, (100,))
def test_gaussianity_p_value():
with pytest.raises(NotImplementedError):
multivariate.gaussianity_p_value(None)
| 25.969697
| 86
| 0.757293
|
fbfcc818b154bd8f75bc8cc3cc661ff72a26ce82
| 5,124
|
py
|
Python
|
aoc2021/d20.py
|
jbudynek/advent-of-code
|
16ab71b110e9766b445bce3d3172b11d421b2f75
|
[
"CC0-1.0"
] | null | null | null |
aoc2021/d20.py
|
jbudynek/advent-of-code
|
16ab71b110e9766b445bce3d3172b11d421b2f75
|
[
"CC0-1.0"
] | null | null | null |
aoc2021/d20.py
|
jbudynek/advent-of-code
|
16ab71b110e9766b445bce3d3172b11d421b2f75
|
[
"CC0-1.0"
] | null | null | null |
# coding: utf-8
from collections import Counter
from timeit import default_timer as timer
import numpy as np
# Helpers
##########
def print_field(xyids, DBG=True):
coords = xyids.keys()
if(DBG):
print(xyids)
x_min = min(coords, key=lambda t: t[0])[0]-1
x_max = max(coords, key=lambda t: t[0])[0]+1
y_min = min(coords, key=lambda t: t[1])[1]-1
y_max = max(coords, key=lambda t: t[1])[1]+1
if(DBG):
print(x_min, x_max, y_min, y_max)
for yy in range(y_min, y_max+1):
ss = ""
for xx in range(x_min, x_max+1):
if (xx, yy) in xyids:
ss += str(xyids[(xx, yy)])
else:
ss += " "
print(ss)
def create_world(ccc, DBG=True):
field = {}
x = -1
y = -1
v_id = 0
for line in ccc:
y += 1
x = -1
for c in line:
x += 1
if c == '#':
field[(x, y)] = 1
else:
field[(x, y)] = 0
if DBG:
print(field)
return field
def get_bounds(tracks, DBG):
coords = tracks.keys()
x_min = min(coords, key=lambda t: t[0])[0]
x_max = max(coords, key=lambda t: t[0])[0]
y_min = min(coords, key=lambda t: t[1])[1]
y_max = max(coords, key=lambda t: t[1])[1]
return (x_min, x_max, y_min, y_max)
# Main function
##########
def parse_algo(line):
algo = np.zeros(512, dtype=int)
idx = 0
for cc in line:
if cc == '#':
algo[idx] = 1
idx += 1
return algo
def process(world, xy, algo, fill):
dirs = [(-1, -1), (0, -1), (1, -1),
(-1, 0), (0, 0), (1, 0),
(-1, 1), (0, 1), (1, 1)]
bin_string = ''
for d in dirs:
nxy = (xy[0]+d[0], xy[1]+d[1])
if nxy not in world:
bin_string += str(fill)
elif world[nxy] == 1:
bin_string += '1'
else:
bin_string += '0'
return int(bin_string, 2)
def enhance(input_val, nb_steps, DBG=True):
algo = parse_algo(input_val[0])
world = create_world(input_val[2:], DBG)
if DBG:
print_field(world)
for step in range(nb_steps):
fill = 0
if algo[0] == 1:
fill = step % 2
(x_min, x_max, y_min, y_max) = get_bounds(world, DBG)
new_world = world.copy()
for x in range(x_min-1, x_max+2):
for y in range(y_min-1, y_max+2):
xy = (x, y)
nxy_val = process(world, xy, algo, fill)
new_world[xy] = algo[nxy_val]
if DBG:
print_field(new_world)
world = new_world
return Counter(new_world.values())[1]
def boom_part1(input_val, DBG=True):
return enhance(input_val, 2, DBG)
def boom_part2(input_val, DBG=True):
return enhance(input_val, 50, DBG)
# Testing and timing
##########
def print_time(t_start, t_end):
s = t_end-t_start
print(int(s*1000), "ms = ", int(s), "s = ", int(s/60), "min")
RED_FG = '\x1b[91m'
GREEN_FG = '\x1b[92m'
YELLOW_FG = '\x1b[93m'
DEFAULT_FG = '\x1b[39m'
def output_test(cc, t_start, t_end, result, expected):
result = str(result)
expected = str(expected)
flag = (result == expected)
sflag = ""
if flag == True:
sflag = GREEN_FG+str(flag)+DEFAULT_FG
else:
sflag = RED_FG+str(flag)+DEFAULT_FG
if(expected == "None"):
print("*** "+str(cc) + " *** -> Result = "+str(result))
else:
print("*** "+str(cc) + " *** -> Result = "+str(result) +
" -> success = " + sflag + " -> expected " + expected)
print_time(t_start, t_end)
return flag
def test_part1(cc=None, expected=None, DBG=False):
t_start = timer()
result = boom_part1(cc, DBG)
t_end = timer()
return output_test(cc, t_start, t_end, result, expected)
def test_part2(cc=None, expected=None, DBG=False):
t_start = timer()
result = boom_part2(cc, DBG)
t_end = timer()
return output_test(cc, t_start, t_end, result, expected)
# Test cases
##########
tt1 = """..#.#..#####.#.#.#.###.##.....###.##.#..###.####..#####..#....#..#..##..###..######.###...####..#..#####..##..#.#####...##.#.#..#.##..#.#......#.###.######.###.####...#.##.##..#..#..#####.....#.#....###..#.##......#.....#..#..#..##..#...##.######.####.####.#.#...#.......#..#.#.#...####.##.#......#..#...##.#.##..#...##.#.##..###.#......#.#.......#.#.#.####.###.##...#.....####.#..#..#.##.#....##..#.####....##...##..#...#......#.#.......#.......##..####..#...#.#.#...##..#.#..###..#####........#..####......#..#
#..#.
#....
##..#
..#..
..###"""
tt1 = tt1.splitlines()
test_part1(tt1, 35, True)
test_part2(tt1, 3351, False)
# Real data
##########
INPUT_FILE = "input-d20.txt"
f = open(INPUT_FILE, "r")
contents = f.read()
puzzle_input = contents.splitlines()
f.close()
# part 1
t_start = timer()
ret = boom_part1(puzzle_input, DBG=False)
t_end = timer()
print_time(t_start, t_end)
print(ret)
# part 2
t_start = timer()
ret = boom_part2(puzzle_input, DBG=False)
t_end = timer()
print_time(t_start, t_end)
print(ret)
# PART 1 OK = 5663
# PART 2 OK = 19638
| 23.18552
| 521
| 0.484192
|
cb28b7beeb920baca8daaf50418b4bc6b6bb5333
| 98,805
|
py
|
Python
|
test/test_c10d.py
|
wxwoods/mctorch
|
7cd6eb51fdd01fa75ed9245039a4f145ba342de2
|
[
"BSD-3-Clause"
] | 1
|
2019-07-23T11:20:58.000Z
|
2019-07-23T11:20:58.000Z
|
test/test_c10d.py
|
wxwoods/mctorch
|
7cd6eb51fdd01fa75ed9245039a4f145ba342de2
|
[
"BSD-3-Clause"
] | null | null | null |
test/test_c10d.py
|
wxwoods/mctorch
|
7cd6eb51fdd01fa75ed9245039a4f145ba342de2
|
[
"BSD-3-Clause"
] | null | null | null |
import copy
import math
import multiprocessing
import os
import random
import sys
import tempfile
import threading
import time
import unittest
from datetime import timedelta
from itertools import groupby
from functools import wraps
from collections import namedtuple
import torch
import common_utils as common
from torch import nn
import torch.nn.functional as F
import torch.distributed as c10d
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from common_utils import TestCase, load_tests, run_tests
from common_utils import retry_on_address_already_in_use_error
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if not c10d.is_available():
print('c10d not available, skipping tests')
sys.exit(0)
TIMEOUT_DEFAULT = 30
TIMEOUT_OVERRIDE = {}
TestSkip = namedtuple('TestSkip', 'exit_code, message')
TEST_SKIPS = {
"multi-gpu": TestSkip(75, "Need at least 2 CUDA devices"),
"nccl": TestSkip(76, "c10d not compiled with NCCL support"),
"known_issues": TestSkip(77, "Test skipped due to known issues")
}
def skip_if_not_multigpu(func):
"""Multi-GPU tests requires at least 2 GPUS. Skip if this is not met."""
@wraps(func)
def wrapper(*args, **kwargs):
if torch.cuda.is_available() and torch.cuda.device_count() >= 2:
return func(*args, **kwargs)
sys.exit(TEST_SKIPS['multi-gpu'].exit_code)
return wrapper
def skip_if_lt_x_gpu(x):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if torch.cuda.is_available() and torch.cuda.device_count() >= x:
return func(*args, **kwargs)
sys.exit(TEST_SKIPS['multi-gpu'].exit_code)
return wrapper
return decorator
def skip_if_not_nccl(func):
"""Skips a test if NCCL is not available (for c10d)."""
@wraps(func)
def wrapper(*args, **kwargs):
if hasattr(c10d, "ProcessGroupNCCL"):
return func(*args, **kwargs)
sys.exit(TEST_SKIPS['nccl'].exit_code)
return wrapper
def skip_for_known_issues(func):
"""Skips a test due to known issues (for c10d)."""
@wraps(func)
def wrapper(*args, **kwargs):
sys.exit(TEST_SKIPS['known_issues'].exit_code)
return wrapper
def get_timeout(test_id):
return TIMEOUT_OVERRIDE.get(test_id.split('.')[-1], TIMEOUT_DEFAULT)
def gpus_for_rank(world_size):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(visible_devices[rank * gpus_per_process: (rank + 1) * gpus_per_process])
return gpus_for_rank
def simple_reduce_tests(rank, world_size):
return [
(
c10d.ReduceOp.SUM,
torch.Tensor([rank + 1.0]),
torch.Tensor([float(world_size * (world_size + 1) / 2)]),
),
(
c10d.ReduceOp.PRODUCT,
torch.Tensor([rank + 1.0]),
torch.Tensor([float(math.factorial(world_size))]),
),
(
c10d.ReduceOp.MIN,
torch.Tensor([rank + 1.0]),
torch.Tensor([1.0]),
),
(
c10d.ReduceOp.MAX,
torch.Tensor([rank + 1.0]),
torch.Tensor([world_size]),
),
]
def simple_multi_input_reduce_tests(rank, world_size):
return [
(
c10d.ReduceOp.SUM,
[torch.Tensor([2 * rank + 0.0]), torch.Tensor([2 * rank + 1.0])],
torch.Tensor([float(world_size * (2 * world_size - 1))]),
),
(
c10d.ReduceOp.PRODUCT,
[torch.Tensor([2 * rank + 1.0]), torch.Tensor([2 * rank + 2.0])],
torch.Tensor([float(math.factorial(2 * world_size))]),
),
(
c10d.ReduceOp.MIN,
[torch.Tensor([2 * rank + 1.0]), torch.Tensor([2 * rank + 2.0])],
torch.Tensor([1.0]),
),
(
c10d.ReduceOp.MAX,
[torch.Tensor([2 * rank + 1.0]), torch.Tensor([2 * rank + 2.0])],
torch.Tensor([2 * world_size]),
),
]
class StoreTestBase(object):
def _create_store(self, i):
raise RuntimeError("not implemented")
def _test_set_get(self, fs):
fs.add("key", 1)
fs.add("key", 2)
fs.add("key", 3)
fs.set("key0", "value0")
fs.add("key3", 1)
fs.set("key1", "value1")
fs.add("key3", 2)
fs.set("key2", "value2")
fs.add("key3", 3)
fs.add("key3", 4)
fs.add("key3", 5)
fs.add("key3", 6)
self.assertEqual(b"6", fs.get("key"))
self.assertEqual(b"value0", fs.get("key0"))
self.assertEqual(b"value1", fs.get("key1"))
self.assertEqual(b"value2", fs.get("key2"))
self.assertEqual(b"21", fs.get("key3"))
def test_set_get(self):
self._test_set_get(self._create_store())
class FileStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(FileStoreTest, self).setUp()
self.file = tempfile.NamedTemporaryFile(delete=False)
def _create_store(self):
store = c10d.FileStore(self.file.name, 1)
store.set_timeout(timedelta(seconds=300))
return store
class PrefixFileStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(PrefixFileStoreTest, self).setUp()
self.file = tempfile.NamedTemporaryFile(delete=False)
self.filestore = c10d.FileStore(self.file.name, 1)
self.prefix = "test_prefix"
self.filestore.set_timeout(timedelta(seconds=300))
def _create_store(self):
return c10d.PrefixStore(self.prefix, self.filestore)
def create_tcp_store(addr):
"""
Creates a TCP store. Retries if the chosen port is already in use.
"""
ports = []
for _ in range(10):
try:
port = common.find_free_port()
ports.append(port)
return c10d.TCPStore(addr, port, 1, True)
except RuntimeError as error:
if str(error) == "Address already in use":
continue
raise
raise RuntimeError("Unable to find free port (tried %s)" % ", ".join(ports))
class TCPStoreTest(TestCase, StoreTestBase):
def _create_store(self):
store = create_tcp_store('localhost')
store.set_timeout(timedelta(seconds=300))
return store
def test_address_already_in_use(self):
with self.assertRaisesRegex(RuntimeError, "^Address already in use$"):
addr = 'localhost'
port = common.find_free_port()
# Use noqa to silence flake8.
# Need to store in an unused variable here to ensure the first
# object is not destroyed before the second object is created.
store1 = c10d.TCPStore(addr, port, 1, True) # noqa: F841
store2 = c10d.TCPStore(addr, port, 1, True) # noqa: F841
class PrefixTCPStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(PrefixTCPStoreTest, self).setUp()
self.tcpstore = create_tcp_store('localhost')
self.prefix = "test_prefix"
self.tcpstore.set_timeout(timedelta(seconds=300))
def _create_store(self):
return c10d.PrefixStore(self.prefix, self.tcpstore)
class RendezvousTest(TestCase):
def test_unknown_handler(self):
with self.assertRaisesRegex(RuntimeError, "^No rendezvous handler"):
c10d.rendezvous('invalid://')
class RendezvousEnvTest(TestCase):
@retry_on_address_already_in_use_error
def test_common_errors(self):
# TODO remove this hack
if not hasattr(c10d, "ProcessGroupNCCL"):
raise unittest.SkipTest("C10D is not built with NCCL process group,"
" skipping test")
vars = {
"WORLD_SIZE": "1",
"RANK": "0",
"MASTER_ADDR": "127.0.0.1",
"MASTER_PORT": common.find_free_port(),
}
class Env(object):
def __init__(self, vars):
self.vars = vars
def __enter__(self):
for key, value in self.vars.items():
os.environ[key] = str(value)
def __exit__(self, type, value, traceback):
for key in self.vars.keys():
del os.environ[key]
def without(d, key):
d = d.copy()
d.pop(key)
return d
def withouts(d, keys):
d = d.copy()
for key in keys:
d.pop(key)
return d
with Env(without(vars, 'WORLD_SIZE')):
with self.assertRaisesRegex(ValueError, 'WORLD_SIZE expected'):
gen = c10d.rendezvous('env://')
next(gen)
c10d.init_process_group(backend='nccl', world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, 'RANK')):
with self.assertRaisesRegex(ValueError, 'RANK expected'):
gen = c10d.rendezvous('env://')
next(gen)
c10d.init_process_group(backend='nccl', rank=0)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(withouts(vars, ['RANK', 'WORLD_SIZE'])):
c10d.init_process_group(backend='nccl', rank=0, world_size=1)
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(vars):
c10d.init_process_group(backend='nccl')
self.assertEqual(c10d.get_rank(), 0)
self.assertEqual(c10d.get_world_size(), 1)
c10d.destroy_process_group()
with Env(without(vars, 'MASTER_ADDR')):
with self.assertRaisesRegex(ValueError, 'MASTER_ADDR expected'):
gen = c10d.rendezvous('env://')
next(gen)
with Env(without(vars, 'MASTER_PORT')):
with self.assertRaisesRegex(ValueError, 'MASTER_PORT expected'):
gen = c10d.rendezvous('env://')
next(gen)
with Env(without(vars, 'WORLD_SIZE')):
gen = c10d.rendezvous('env://?world_size={}'.format(1))
_, _, size = next(gen)
self.assertEqual(size, 1)
with Env(without(vars, 'RANK')):
gen = c10d.rendezvous('env://?rank={}'.format(0))
_, rank, _ = next(gen)
self.assertEqual(rank, 0)
with Env(withouts(vars, ['RANK', 'WORLD_SIZE'])):
gen = c10d.rendezvous('env://?rank={}&world_size={}'.format(0, 1))
_, rank, size = next(gen)
self.assertEqual(rank, 0)
self.assertEqual(size, 1)
@retry_on_address_already_in_use_error
def test_nominal(self):
os.environ['WORLD_SIZE'] = '1'
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = str(common.find_free_port())
# Single rank
os.environ['RANK'] = '0'
gen0 = c10d.rendezvous('env://')
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(1, size0)
store0.set("key0", "value0")
# check with get
self.assertEqual(b"value0", store0.get("key0"))
class RendezvousFileTest(TestCase):
def test_common_errors(self):
with self.assertRaisesRegex(ValueError, 'path missing'):
gen = c10d.rendezvous('file://?rank=0&world_size=1')
next(gen)
with self.assertRaisesRegex(ValueError, 'rank parameter missing'):
gen = c10d.rendezvous('file:///tmp/foo?world_size=1')
next(gen)
with self.assertRaisesRegex(ValueError, 'size parameter missing'):
gen = c10d.rendezvous('file:///tmp/foo?rank=0')
next(gen)
def test_nominal(self):
with tempfile.NamedTemporaryFile(delete=False) as file:
url = 'file://%s?world_size=%d' % (file.name, 2)
gen0 = c10d.rendezvous(url + "&rank=0")
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(2, size0)
gen1 = c10d.rendezvous(url + "&rank=1")
store1, rank1, size1 = next(gen1)
self.assertEqual(1, rank1)
self.assertEqual(2, size1)
# Set value on both stores
store0.set("key0", "value0")
store1.set("key1", "value1")
# Cross check with get
self.assertEqual(b"value0", store1.get("key0"))
self.assertEqual(b"value1", store0.get("key1"))
class RendezvousTCPTest(TestCase):
def test_common_errors(self):
with self.assertRaisesRegex(ValueError, 'port number missing'):
gen = c10d.rendezvous('tcp://127.0.0.1?rank=0&world_size=1')
next(gen)
with self.assertRaisesRegex(ValueError, 'rank parameter missing'):
gen = c10d.rendezvous('tcp://127.0.0.1:23456?world_size=1')
next(gen)
with self.assertRaisesRegex(ValueError, 'size parameter missing'):
gen = c10d.rendezvous('tcp://127.0.0.1:23456?rank=0')
next(gen)
@retry_on_address_already_in_use_error
def test_nominal(self):
addr = 'localhost'
port = common.find_free_port()
url = 'tcp://%s:%d?world_size=%d' % (addr, port, 1)
gen0 = c10d.rendezvous(url + "&rank=0")
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(1, size0)
# Set value on the single store
store0.set("key0", "value0")
# check with get
self.assertEqual(b"value0", store0.get("key0"))
class MultiProcessTestCase(TestCase):
MAIN_PROCESS_RANK = -1
@property
def world_size(self):
return 4
@staticmethod
def join_or_run(fn):
@wraps(fn)
def wrapper(self):
if self.rank == self.MAIN_PROCESS_RANK:
self._join_processes(fn)
else:
fn(self)
return wrapper
# The main process spawns N subprocesses that run the test.
# This function patches overwrites every test function to either
# assume the role of the main process and join its subprocesses,
# or run the underlying test function.
@classmethod
def setUpClass(cls):
for attr in dir(cls):
if attr.startswith('test'):
fn = getattr(cls, attr)
setattr(cls, attr, cls.join_or_run(fn))
def setUp(self):
super(MultiProcessTestCase, self).setUp()
self.rank = self.MAIN_PROCESS_RANK
self.file = tempfile.NamedTemporaryFile(delete=False)
self.processes = [self._spawn_process(rank) for rank in range(int(self.world_size))]
def tearDown(self):
super(MultiProcessTestCase, self).tearDown()
for p in self.processes:
p.terminate()
def _spawn_process(self, rank):
name = 'process ' + str(rank)
process = multiprocessing.Process(target=self._run, name=name, args=(rank,))
process.start()
return process
def _run(self, rank):
self.rank = rank
# self.id() == e.g. '__main__.TestDistributed.test_get_rank'
# We're retreiving a corresponding test and executing it.
getattr(self, self.id().split(".")[2])()
sys.exit(0)
def _join_processes(self, fn):
timeout = get_timeout(self.id())
start_time = time.time()
for p in self.processes:
p.join(timeout)
elapsed_time = time.time() - start_time
self._check_return_codes(elapsed_time)
def _check_return_codes(self, elapsed_time):
"""
Checks that the return codes of all spawned processes match, and skips
tests if they returned a return code indicating a skipping condition.
"""
first_process = self.processes[0]
for i, p in enumerate(self.processes):
if p.exitcode is None:
raise RuntimeError('Process {} terminated or timed out after {} seconds'.format(i, elapsed_time))
self.assertEqual(p.exitcode, first_process.exitcode)
for skip in TEST_SKIPS.values():
if first_process.exitcode == skip.exit_code:
raise unittest.SkipTest(skip.message)
self.assertEqual(first_process.exitcode, 0)
@property
def is_master(self):
return self.rank == 0
class TimeoutTest(TestCase):
def _test_store_timeout(self, backend, init_method, c2p):
try:
c10d.distributed_c10d.init_process_group(
backend=backend, init_method=init_method, world_size=1, rank=0,
timeout=timedelta(seconds=1))
default_store = c10d.distributed_c10d._get_default_store()
tik = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
default_store.get("nonexistent key")
tok = time.time()
c10d.destroy_process_group()
c2p.append(float(tok - tik))
except RuntimeError as e:
# catch "Address already in use" error and report it to the main
# thread
c2p.append(e)
def _init_methods(self):
f = tempfile.NamedTemporaryFile(delete=False)
yield "file://%s" % f.name
f.close()
yield "tcp://127.0.0.1:%d" % common.find_free_port()
def _test_default_store_timeout(self, backend):
for init_method in self._init_methods():
c2p = []
t = threading.Thread(
target=self._test_store_timeout,
args=(backend, init_method, c2p))
t.daemon = True
t.start()
t.join(5)
self.assertEqual(1, len(c2p))
if isinstance(c2p[0], float):
# waiting time should be 1s, use 3s to rule out false alarm
self.assertGreater(3, c2p[0])
elif isinstance(c2p[0], RuntimeError):
# let @retry_on_address_already_in_use_error handle the error
raise c2p[0]
else:
raise RuntimeError("Unexpected type {}".format(type(c2p[0])))
@retry_on_address_already_in_use_error
def test_default_store_timeout_nccl(self):
# TODO remove this hack
if not hasattr(c10d, "ProcessGroupNCCL"):
raise unittest.SkipTest("C10D is not built with NCCL process group,"
" skipping test")
self._test_default_store_timeout('nccl')
@retry_on_address_already_in_use_error
def test_default_store_timeout_gloo(self):
self._test_default_store_timeout('gloo')
class ProcessGroupGlooTest(MultiProcessTestCase):
def opts(self, threads=2):
opts = c10d.ProcessGroupGloo.Options()
opts.devices = [c10d.ProcessGroupGloo.create_tcp_device(interface="lo")]
opts.timeout = 5.0
opts.threads = threads
return opts
def test_broadcast_checks(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.BroadcastOptions()
opts.rootRank = -1
opts.rootTensor = 0
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.world_size
opts.rootTensor = 0
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = -1
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 1
pg.broadcast([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.BroadcastOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.broadcast([t1, t3], opts)
def _test_broadcast_basics(self, fn):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
# Every rank is root once
for i in range(self.world_size):
# Run with 1 input tensor
x = fn(torch.Tensor([self.rank]))
broadcast([x], i, 0)
self.assertEqual(torch.Tensor([i]), x)
# Run with 2 input tensors
num = 2
for j in range(num):
xs = [
fn(torch.Tensor([self.rank * num + 0.0])),
fn(torch.Tensor([self.rank * num + 1.0])),
]
broadcast(xs, i, j)
self.assertEqual(torch.Tensor([i * num + j]), xs[0])
self.assertEqual(torch.Tensor([i * num + j]), xs[1])
# Test overloaded convenience function
x = torch.Tensor([self.rank + 1.0])
work = pg.broadcast(x, root=0)
work.wait()
self.assertEqual(torch.Tensor([1.0]), x)
def test_broadcast_basics(self):
self._test_broadcast_basics(lambda t: t.clone())
@skip_if_not_multigpu
def test_broadcast_basics_cuda(self):
self._test_broadcast_basics(lambda t: t.clone().cuda())
def _test_broadcast_stress(self, inputs):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts(threads=8))
work_handles = [
pg.broadcast(inputs[i], root=(i % self.world_size))
for i in range(len(inputs))
]
for i, work_handle in enumerate(work_handles):
work_handle.wait()
self.assertEqual(
torch.Tensor([
(i * self.world_size) + (i % self.world_size)
]),
inputs[i],
"Mismatch in iteration %d" % i,
)
def test_broadcast_stress(self):
inputs = [torch.Tensor([i * self.world_size + self.rank]) for i in range(1000)]
self._test_broadcast_stress(inputs)
@skip_if_not_multigpu
def test_broadcast_stress_cuda(self):
inputs = [torch.Tensor([i * self.world_size + self.rank]).cuda() for i in range(1000)]
self._test_broadcast_stress(inputs)
def test_allreduce_checks(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "requires non-empty tensor list"):
opts = c10d.AllreduceOptions()
pg.allreduce([], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t2], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.AllreduceOptions()
pg.allreduce([t1, t3], opts)
def _test_allreduce_basics(self, fn):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Single input tests
tests = simple_reduce_tests(self.rank, self.world_size)
for (op, input, output) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensor = fn(input)
work = pg.allreduce([tensor], opts)
work.wait()
self.assertEqual(output, tensor)
# Multi input tests
tests = simple_multi_input_reduce_tests(self.rank, self.world_size)
for (op, inputs, output) in tests:
opts = c10d.AllreduceOptions()
opts.reduceOp = op
tensors = [fn(input) for input in inputs]
work = pg.allreduce(tensors, opts)
work.wait()
for tensor in tensors:
self.assertEqual(output, tensor)
# Test overloaded convenience function (defaults to using sum)
x = fn(torch.Tensor([self.rank + 1.0]))
work = pg.allreduce(x)
work.wait()
self.assertEqual(torch.Tensor([float(self.world_size * (self.world_size + 1) / 2)]), x)
def test_allreduce_basics(self):
self._test_allreduce_basics(lambda t: t.clone())
@skip_if_not_multigpu
def test_allreduce_basics_cuda(self):
self._test_allreduce_basics(lambda t: t.clone().cuda())
def _test_allreduce_stress(self, inputs):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts(threads=8))
work_handles = [pg.allreduce(inputs[i]) for i in range(len(inputs))]
for i, work_handle in enumerate(work_handles):
work_handle.wait()
self.assertEqual(
torch.Tensor([
(i * self.world_size) +
(self.world_size * (self.world_size - 1) / 2)
]),
inputs[i],
"Mismatch in iteration %d" % i,
)
def test_allreduce_stress(self):
inputs = [torch.Tensor([i + self.rank]) for i in range(1000)]
self._test_allreduce_stress(inputs)
@skip_if_not_multigpu
def test_allreduce_stress_cuda(self):
inputs = [torch.Tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_allreduce_stress(inputs)
def test_scatter_checks(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = -1
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ScatterOptions()
opts.rootRank = self.world_size
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element output tensor list"):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([], [], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element output tensor list"):
opts = c10d.ScatterOptions()
opts.rootRank = 0
pg.scatter([t1, t1], [], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * self.world_size, [t1] * self.world_size], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * (self.world_size - 1)], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * (self.world_size + 1)], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input list"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t1] * (self.world_size + 1)], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t2] * self.world_size], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.ScatterOptions()
opts.rootRank = self.rank
pg.scatter([t1], [[t3] * self.world_size], opts)
with self.assertRaisesRegex(ValueError, "requires empty input on non-root"):
opts = c10d.ScatterOptions()
opts.rootRank = (self.rank + 1) % self.world_size
pg.scatter([t1], [[t1] * self.world_size], opts)
def _test_scatter_basics(self, fn):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
input = [fn(torch.Tensor([self.rank])) for _ in range(self.world_size)]
outputs = [fn(torch.Tensor([-1])) for _ in range(self.world_size)]
# Take turns being the scatter root and accumulate work items
work = []
for i in range(self.world_size):
opts = c10d.ScatterOptions()
opts.rootRank = i
if i == self.rank:
work.append(pg.scatter([outputs[i]], [input], opts))
else:
work.append(pg.scatter([outputs[i]], [], opts))
# Wait for work to complete
for i in range(self.world_size):
work[i].wait()
self.assertEqual(torch.Tensor([i]), outputs[i])
def test_scatter_basics(self):
self._test_scatter_basics(lambda t: t.clone())
@skip_if_not_multigpu
def test_scatter_basics_cuda(self):
self._test_scatter_basics(lambda t: t.clone().cuda())
def _test_scatter_stress(self, inputs, fn):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts(threads=8))
outputs = [
[fn(torch.Tensor([-1])) for _ in range(self.world_size)]
for _ in range(len(inputs))
]
work_handles = []
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.ScatterOptions()
opts.rootRank = root
if root == self.rank:
work = pg.scatter([outputs[i][root]], [[fn(e) for e in inputs[i]]], opts)
else:
work = pg.scatter([outputs[i][root]], [], opts)
work_handles.append(work)
for i, work_handle in enumerate(work_handles):
work_handle.wait()
iter = i // self.world_size
root = i % self.world_size
self.assertEqual(
torch.Tensor([iter + root]),
outputs[iter][root],
"Mismatch in iteration %d for rank %d" % (iter, root)
)
def test_scatter_stress(self):
inputs = [
[torch.Tensor([i + self.rank]) for _ in range(self.world_size)]
for i in range(1000)
]
self._test_scatter_stress(inputs, lambda t: t.clone())
@unittest.skip("Test is flaky, see https://github.com/pytorch/pytorch/issues/15963")
@skip_if_not_multigpu
def test_scatter_stress_cuda(self):
inputs = [
[torch.Tensor([i + self.rank]) for _ in range(self.world_size)]
for i in range(1000)
]
self._test_scatter_stress(inputs, lambda t: t.clone().cuda())
def test_gather_checks(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = -1
pg.gather([], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.GatherOptions()
opts.rootRank = self.world_size
pg.gather([], [t1], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input tensor list"):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather([], [], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element input tensor list"):
opts = c10d.GatherOptions()
opts.rootRank = 0
pg.gather([], [t1, t1], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element output list"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([], [t1], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element output list"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * self.world_size, [t1] * self.world_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element output list"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * (self.world_size - 1)], [t1], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element output list"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t1] * (self.world_size + 1)], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t2] * self.world_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
opts = c10d.GatherOptions()
opts.rootRank = self.rank
pg.gather([[t3] * self.world_size], [t1], opts)
with self.assertRaisesRegex(ValueError, "requires empty output on non-root"):
opts = c10d.GatherOptions()
opts.rootRank = (self.rank + 1) % self.world_size
pg.gather([[t1] * self.world_size], [t1], opts)
def _test_gather_basics(self, fn):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
input = [fn(torch.Tensor([self.rank]))]
outputs = [fn(torch.Tensor([-1])) for _ in range(self.world_size)]
# Take turns being the gather root and accumulate work items
work = []
for i in range(self.world_size):
opts = c10d.GatherOptions()
opts.rootRank = i
if i == self.rank:
work.append(pg.gather([outputs], input, opts))
else:
work.append(pg.gather([], input, opts))
# Wait for work to complete
expected = [torch.Tensor([rank]) for rank in range(self.world_size)]
for i in range(self.world_size):
work[i].wait()
if i == self.rank:
self.assertEqual(expected, outputs)
def test_gather_basics(self):
self._test_gather_basics(lambda t: t.clone())
@skip_if_not_multigpu
def test_gather_basics_cuda(self):
self._test_gather_basics(lambda t: t.clone().cuda())
def _test_gather_stress(self, inputs, fn):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts(threads=8))
work_handles = []
outputs = [
[
[fn(torch.Tensor([-1])) for _ in range(self.world_size)]
] for _ in range(len(inputs))
]
expected_outputs = [
[
[torch.Tensor([i + j]) for j in range(self.world_size)]
] for i in range(len(inputs))
]
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.GatherOptions()
opts.rootRank = root
if root == self.rank:
work = pg.gather(outputs[i], [fn(inputs[i])], opts)
else:
work = pg.gather([], [fn(inputs[i])], opts)
work_handles.append(work)
for i, work_handle in enumerate(work_handles):
work_handle.wait()
iter = i // self.world_size
root = i % self.world_size
if root == self.rank:
self.assertEqual(
expected_outputs[iter],
outputs[iter],
"Mismatch in iteration %d for root %d" % (iter, root)
)
def test_gather_stress(self):
inputs = [torch.Tensor([i + self.rank]) for i in range(1000)]
self._test_gather_stress(inputs, lambda t: t.clone())
@skip_if_not_multigpu
def test_gather_stress_cuda(self):
inputs = [torch.Tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_gather_stress(inputs, lambda t: t.clone().cuda())
def test_allgather_checks(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
t2 = torch.zeros([1], dtype=torch.float64)
t3 = torch.zeros([2], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "requires non-empty input tensor list"):
pg.allgather([], [])
with self.assertRaisesRegex(ValueError, "requires input/output tensor lists to have the same length"):
pg.allgather([], [t1])
with self.assertRaisesRegex(ValueError, "requires input/output tensor lists to have the same length"):
pg.allgather([[t1] * self.world_size, [t1] * self.world_size], [t1])
with self.assertRaisesRegex(ValueError, "invalid output tensor list"):
pg.allgather([[t1] * (self.world_size - 1)], [t1])
with self.assertRaisesRegex(ValueError, "invalid output tensor list"):
pg.allgather([[t1] * (self.world_size + 1)], [t1])
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
pg.allgather([[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t2])
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
pg.allgather([[t1, t1] * (self.world_size), [t1, t1] * (self.world_size)], [t1, t3])
with self.assertRaisesRegex(ValueError, "invalid tensor type"):
pg.allgather([([t1, t2] * (self.world_size))[:self.world_size]], [t1])
with self.assertRaisesRegex(ValueError, "invalid tensor size"):
pg.allgather([([t1, t3] * (self.world_size))[:self.world_size]], [t1])
def _test_allgather_basics(self, fn):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Run with N input tensor per rank
for n in [1, 2, 3]:
input = [
fn(torch.Tensor([n * self.rank + i])) for i in range(n)
]
output = [
[
fn(torch.Tensor([-1])) for _ in range(n * self.world_size)
] for _ in range(n)
]
expected_output = [
[
torch.Tensor([i]) for i in range(n * self.world_size)
] for _ in range(n)
]
work = pg.allgather(output, input)
work.wait()
self.assertEqual(expected_output, output)
def test_allgather_basics(self):
self._test_allgather_basics(lambda t: t.clone())
@skip_if_not_multigpu
def test_allgather_basics_cuda(self):
self._test_allgather_basics(lambda t: t.clone().cuda())
def _test_allgather_stress(self, inputs, fn):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts(threads=8))
work_handles = []
outputs = [
[
[fn(torch.Tensor([-1])) for _ in range(self.world_size)]
] for _ in range(len(inputs))
]
expected_outputs = [
[
[torch.Tensor([i + j]) for j in range(self.world_size)]
] for i in range(len(inputs))
]
for i in range(len(inputs)):
work = pg.allgather(outputs[i], [fn(inputs[i])])
work_handles.append(work)
for i, work_handle in enumerate(work_handles):
work_handle.wait()
self.assertEqual(
expected_outputs[i],
outputs[i],
"Mismatch in iteration %d" % i
)
def test_allgather_stress(self):
inputs = [torch.Tensor([i + self.rank]) for i in range(1000)]
self._test_allgather_stress(inputs, lambda t: t.clone())
@skip_if_not_multigpu
def test_allgather_stress_cuda(self):
inputs = [torch.Tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_allgather_stress(inputs, lambda t: t.clone().cuda())
def test_reduce_checks(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
t1 = torch.zeros([1], dtype=torch.float32)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ReduceOptions()
opts.rootRank = -1
opts.rootTensor = 0
pg.reduce([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root rank"):
opts = c10d.ReduceOptions()
opts.rootRank = self.world_size
opts.rootTensor = 0
pg.reduce([t1], opts)
with self.assertRaisesRegex(ValueError, "invalid root tensor"):
opts = c10d.ReduceOptions()
opts.rootRank = self.rank
opts.rootTensor = 1
pg.reduce([t1], opts)
with self.assertRaisesRegex(ValueError, "requires a single-element tensor list"):
opts = c10d.ReduceOptions()
opts.rootRank = self.rank
opts.rootTensor = 0
pg.reduce([t1, t1], opts)
def _test_reduce_basics(self, fn):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
for (op, input, output) in simple_reduce_tests(self.rank, self.world_size):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.reduceOp = op
opts.rootRank = root
tmp = fn(input)
work = pg.reduce([tmp], opts)
work.wait()
if root == self.rank:
self.assertEqual(output, tmp)
def test_reduce_basics(self):
self._test_reduce_basics(lambda t: t.clone())
@skip_if_not_multigpu
def test_reduce_basics_cuda(self):
self._test_reduce_basics(lambda t: t.clone().cuda())
def _test_reduce_stress(self, inputs):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts(threads=8))
work_handles = []
outputs = []
for i in range(len(inputs)):
for root in range(self.world_size):
opts = c10d.ReduceOptions()
opts.rootRank = root
tmp = inputs[i].clone()
outputs.append(tmp)
work = pg.reduce([tmp], opts)
work_handles.append(work)
for i, work_handle in enumerate(work_handles):
work_handle.wait()
iter = i // self.world_size
root = i % self.world_size
if root == self.rank:
self.assertEqual(
torch.Tensor([
(iter * self.world_size) +
(self.world_size * (self.world_size - 1) / 2)
]),
outputs[i],
"Mismatch in iteration %d with root rank %d" % (iter, root),
)
def test_reduce_stress(self):
inputs = [torch.Tensor([i + self.rank]) for i in range(1000)]
self._test_reduce_stress(inputs)
@skip_if_not_multigpu
def test_reduce_stress_cuda(self):
inputs = [torch.Tensor([i + self.rank]).cuda() for i in range(1000)]
self._test_reduce_stress(inputs)
def test_send_recv_all_to_all(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size, self.opts())
# Preallocate tensors for input/output
inputs = [torch.Tensor([self.rank]) for _ in range(self.world_size)]
outputs = [torch.Tensor([-1]) for _ in range(self.world_size)]
# Issue sends
send_work = []
for i in range(self.world_size):
if i == self.rank:
continue
send_work.append(pg.send([inputs[i]], i, 0))
# Issue recvs
recv_work = []
for i in range(self.world_size):
if i == self.rank:
continue
recv_work.append(pg.recv([outputs[i]], i, 0))
# Wait for sends to complete
for work in send_work:
work.wait()
self.assertTrue(work.is_completed())
# Wait for recvs to complete
for work in recv_work:
work.wait()
self.assertTrue(work.is_completed())
# Test that every output other than our own contains the respective rank
for i in range(self.world_size):
if i == self.rank:
continue
self.assertEqual(torch.Tensor([i]), outputs[i])
def test_timeout_kwarg(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(
store,
self.rank,
self.world_size,
timeout=timedelta(seconds=0.5))
# Wait on barrier
pg.barrier().wait()
# Sleep on one of the processes to trigger barrier timeout
if self.rank == 0:
time.sleep(1.0)
# The barrier will now time out
with self.assertRaisesRegex(RuntimeError, " (Timed out|closed) "):
pg.barrier().wait()
def test_barrier_implies_wait(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupGloo(store, self.rank, self.world_size)
# Kick off allreduce operations
size = (100, 100)
num = 16
tensors = [torch.full(size, float(i)) for i in range(num)]
for tensor in tensors:
# Note: leak the returned work handle
pg.allreduce(tensor)
# Barrier should ensure all previous work has completed
pg.barrier().wait()
for i, tensor in enumerate(tensors):
self.assertEqual(torch.full(size, float(i * self.world_size)), tensor)
class ProcessGroupNCCLTest(TestCase):
MAIN_PROCESS_RANK = 0
def setUp(self):
if not hasattr(c10d, "ProcessGroupNCCL"):
raise unittest.SkipTest("C10D is not built with NCCL process group,"
" skipping test")
self.rank = self.MAIN_PROCESS_RANK
self.world_size = 1
self.file = tempfile.NamedTemporaryFile(delete=False)
self.num_gpus = torch.cuda.device_count()
if self.num_gpus < 2:
raise unittest.SkipTest("NCCL test requires 2+ GPUs")
def tearDown(self):
pass
def test_broadcast_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def broadcast(xs, rootRank, rootTensor):
opts = c10d.BroadcastOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.broadcast(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.Tensor([i]).cuda(i))
broadcast(tensors, self.rank, rt)
for i in range(self.num_gpus):
self.assertEqual(tensors[i], tensors[rt])
def test_allreduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors, op):
opts = c10d.AllreduceOptions()
opts.reduceOp = op
work = pg.allreduce(tensors, opts)
work.wait()
# Sum
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.Tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
self.assertEqual(
torch.Tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[i])
# Product
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.Tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
self.assertEqual(
torch.Tensor([float(math.factorial(self.num_gpus))]),
tensors[i])
# Min
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.Tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
self.assertEqual(torch.Tensor([1.0]), tensors[i])
# Max
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.Tensor([i + 1]).cuda(i))
allreduce(tensors, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
self.assertEqual(torch.Tensor([self.num_gpus]), tensors[i])
def test_reduce_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce(xs, rootRank, rootTensor):
opts = c10d.ReduceOptions()
opts.rootRank = rootRank
opts.rootTensor = rootTensor
work = pg.reduce(xs, opts)
work.wait()
# for every root tensor
for rt in range(self.num_gpus):
tensors = []
for i in range(self.num_gpus):
tensors.append(torch.Tensor([i + 1]).cuda(i))
reduce(tensors, self.rank, rt)
self.assertEqual(
torch.Tensor([float(self.num_gpus * (self.num_gpus + 1) / 2)]),
tensors[rt])
def test_allgather_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allgather(output_ts, input_ts):
work = pg.allgather(output_ts, input_ts)
work.wait()
tensors = []
output_ts = [[] for _ in range(self.num_gpus)]
for idx, ls in enumerate(output_ts):
for _ in range(self.world_size * self.num_gpus):
ls.append(torch.Tensor([0]).cuda(idx))
for i in range(self.num_gpus):
tensors.append(torch.Tensor([i]).cuda(i))
allgather(output_ts, tensors)
# Verification
for device_ts in output_ts:
for s_idx, t in enumerate(device_ts):
self.assertEqual(torch.Tensor([s_idx]), t)
def test_reduce_scatter_ops(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def reduce_scatter(outputs, input_lists, op):
opts = c10d.ReduceScatterOptions()
opts.reduceOp = op
work = pg.reduce_scatter(outputs, input_lists, opts)
work.wait()
virtual_rank = self.rank * self.world_size
virtual_world_size = self.num_gpus * self.world_size
output = [
torch.Tensor([0]).cuda(i)
for i in range(self.num_gpus)
]
# 0 1 2
# 0 [0..11] [1..12]
# 1 [3..14]
# 2
# 3
# Sum
tensor_lists = [
[
torch.Tensor([self.rank * self.num_gpus + i + j]).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.SUM)
for i in range(self.num_gpus):
expected = torch.Tensor([
float(self.num_gpus * (self.num_gpus - 1) / 2) +
(virtual_rank + i) * virtual_world_size
])
self.assertEqual(expected, output[i])
# Min
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MIN)
for i in range(self.num_gpus):
expected = torch.Tensor([self.rank * self.world_size + i])
self.assertEqual(expected, output[i])
# Max
reduce_scatter(output, tensor_lists, c10d.ReduceOp.MAX)
for i in range(self.num_gpus):
expected = torch.Tensor(
[self.rank * self.world_size + i + virtual_world_size - 1]
)
self.assertEqual(expected, output[i])
# Product
tensor_lists = [
[
torch.Tensor([
(self.rank * self.num_gpus + i + j) % virtual_world_size + 1
]).cuda(i)
for j in range(virtual_world_size)
]
for i in range(self.num_gpus)
]
reduce_scatter(output, tensor_lists, c10d.ReduceOp.PRODUCT)
for i in range(self.num_gpus):
expected = torch.Tensor([float(math.factorial(virtual_world_size))])
self.assertEqual(expected, output[i])
def test_barrier(self):
store = c10d.FileStore(self.file.name, self.world_size)
pg = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
def allreduce(tensors):
opts = c10d.AllreduceOptions()
work = pg.allreduce(tensors, opts)
return work
# Making the collective to operate on
# 1, 2, 3, 4, .... self.num_gpus GPUs
tensors_list = [[] for _ in range(2, self.num_gpus + 1)]
for i in range(2, self.num_gpus + 1):
for j in range(i):
tensors_list[i - 2].append(torch.Tensor([j + 1]).cuda(j))
works = []
for tensors in tensors_list:
work = allreduce(tensors)
works.append(work)
# Barrier will ensure that all previous work is completed
pg.barrier().wait()
for i in range(2, self.num_gpus + 1):
for j in range(i):
self.assertEqual(
torch.Tensor([float(i * (i + 1) / 2)]),
tensors_list[i - 2][j])
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class DoubleGpuNet(nn.Module):
def __init__(self, gpus):
super(DoubleGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[1])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(torch.Tensor([2, 2]).long(),
requires_grad=False).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.fc3(x)
return F.softmax(x, dim=1).to(dev0)
class QuadraGpuNet(nn.Module):
def __init__(self, gpus):
super(QuadraGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[2])
self.fc4 = nn.Linear(4, 4, bias=False).to(gpus[3])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(torch.Tensor([2, 2]).long(),
requires_grad=False).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
dev2 = self.fc3.weight.device
dev3 = self.fc4.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.relu(self.fc3(x.to(dev2)))
x = self.fc4(x.to(dev3))
return F.softmax(x, dim=1).to(dev0)
class DistributedDataParallelTest(MultiProcessTestCase):
def tearDown(self):
# DistributedDataParallel test doesn't seem to call FileStore destructor
# TODO: investigate this test and the test is known to have issues
# Use this hack to remove files for that test
try:
os.remove(self.file.name)
except OSError:
pass
@property
def world_size(self):
return 2
def _prepare_single_device_module(self, process_group, gpus, global_batch_size):
model = Net()
ddp_model = DistributedDataParallel(
copy.deepcopy(model).cuda(gpus[0]),
device_ids=gpus,
process_group=process_group,
bucket_cap_mb=0.001)
model.cuda(gpus[0])
input = torch.randn(global_batch_size, 2).cuda(gpus[0])
target = torch.randn(global_batch_size, 4).cuda(gpus[0])
return model, ddp_model, input, target
def _prepare_multi_device_module(self, process_group, gpus, global_batch_size):
self.assertTrue(
len(gpus) == 2 or len(gpus) == 4,
"unexpected devices for ddp tests {}".format(gpus))
if len(gpus) == 2:
model = DoubleGpuNet(gpus)
elif len(gpus) == 4:
model = QuadraGpuNet(gpus)
ddp_model = DistributedDataParallel(
copy.deepcopy(model),
process_group=process_group,
bucket_cap_mb=0.001)
input = torch.randn(global_batch_size, 2).to(gpus[0])
target = torch.randn(global_batch_size, 4)
return model, ddp_model, input, target
def _test_ddp_with_process_group(self, process_group, gpus, multi_gpu=False):
local_batch_size = len(gpus)
global_batch_size = self.world_size * local_batch_size
if multi_gpu:
model, ddp_model, input, target = \
self._prepare_multi_device_module(
process_group, gpus, global_batch_size)
else:
model, ddp_model, input, target = \
self._prepare_single_device_module(
process_group, gpus, global_batch_size)
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
def update_parameters(model):
for param in model.parameters():
param.data -= param.grad
param.grad = None
# check two model parameters over 2 iterations
for iteration in range(2):
# single cpu/gpu training
step_model(model, input, target)
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
step_model(ddp_model,
input[self.rank * local_batch_size: (self.rank + 1) * local_batch_size],
target[self.rank * local_batch_size: (self.rank + 1) * local_batch_size])
# Update weights and run a second iteration to shake out errors
update_parameters(model)
update_parameters(ddp_model)
self.assertEqual(len(list(model.parameters())), len(list(ddp_model.parameters())))
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertEqual(i, j)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
def _test_gloo_backend(self, gpus, multi_gpu=False, use_str=False):
if use_str:
gpus = list(map(lambda i: torch.device('cuda:' + str(i)), gpus))
store = c10d.FileStore(self.file.name, self.world_size)
options = c10d.ProcessGroupGloo.Options()
options.devices = [c10d.ProcessGroupGloo.create_tcp_device(interface="lo")]
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size, options)
self._test_ddp_with_process_group(process_group, gpus, multi_gpu)
@skip_if_not_multigpu
def test_gloo_backend(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self._test_gloo_backend(gpus)
@skip_if_not_multigpu
def test_gloo_backend_str(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self._test_gloo_backend(gpus, use_str=True)
@skip_if_lt_x_gpu(4)
def test_gloo_backend_2gpu_module(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self._test_gloo_backend(gpus[:2], multi_gpu=True)
@skip_if_lt_x_gpu(4)
def test_gloo_backend_2gpu_module_str(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self._test_gloo_backend(gpus[:2], multi_gpu=True, use_str=True)
@skip_if_lt_x_gpu(8)
def test_gloo_backend_4gpu_module(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self._test_gloo_backend(gpus[:4], multi_gpu=True)
@skip_if_lt_x_gpu(8)
def test_gloo_backend_4gpu_module_str(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self._test_gloo_backend(gpus[:4], multi_gpu=True, use_str=True)
def _test_nccl_backend(self, gpus, multi_gpu=False, use_str=False):
if use_str:
gpus = list(map(lambda i: torch.device('cuda:' + str(i)), gpus))
store = c10d.FileStore(self.file.name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
self._test_ddp_with_process_group(process_group, gpus, multi_gpu)
@skip_if_not_multigpu
@skip_if_not_nccl
def test_nccl_backend(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self._test_nccl_backend(gpus)
@skip_if_not_multigpu
@skip_if_not_nccl
def test_nccl_backend_str(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self._test_nccl_backend(gpus, use_str=True)
@skip_if_lt_x_gpu(4)
@skip_if_not_nccl
def test_nccl_backend_2gpu_module(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self._test_nccl_backend(gpus[:2], multi_gpu=True)
@skip_if_lt_x_gpu(4)
@skip_if_not_nccl
def test_nccl_backend_2gpu_module_str(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self._test_nccl_backend(gpus[:2], multi_gpu=True, use_str=True)
@skip_if_lt_x_gpu(8)
@skip_if_not_nccl
def test_nccl_backend_4gpu_module(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self._test_nccl_backend(gpus[:4], multi_gpu=True)
@skip_if_lt_x_gpu(8)
@skip_if_not_nccl
def test_nccl_backend_4gpu_module_str(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self._test_nccl_backend(gpus[:4], multi_gpu=True, use_str=True)
@skip_if_lt_x_gpu(4)
@skip_if_not_nccl
def test_ddp_multi_device_module_config(self):
gpus = gpus_for_rank(self.world_size)[self.rank]
self.assertTrue(len(gpus) >= 2, "expecting at least 2 gpus per process")
store = c10d.FileStore(self.file.name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus[:2]
model = DoubleGpuNet(gpus)
with self.assertRaisesRegex(AssertionError, "output_device .* single-device CUDA"):
ddp_model = DistributedDataParallel(
model, output_device=gpus[1], process_group=process_group)
with self.assertRaisesRegex(AssertionError, "device_ids .* single-device CUDA"):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group)
with self.assertRaisesRegex(AssertionError, "only works with CUDA devices"):
model.fc1 = model.fc1.cpu()
ddp_model = DistributedDataParallel(model, process_group=process_group)
model = model.cpu()
with self.assertRaisesRegex(AssertionError, "device_ids .* single-device CUDA"):
ddp_model = DistributedDataParallel(
model, device_ids=gpus, process_group=process_group)
@skip_if_not_multigpu
@skip_if_not_nccl
@skip_for_known_issues
def test_dist_broadcast_coalesced_nccl(self):
store = c10d.FileStore(self.file.name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
device = torch.device('cuda')
for fine_grained in [False, True]:
target = torch.arange(60, dtype=torch.float16, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float16, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float16, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
if self.is_master:
# All processes should have these tensors in the end.
tensors = target
else:
# Non-master processes start with empty tensors and should be
# filled with the tensors from the master.
tensors = torch.zeros(60, dtype=torch.float16, device=device).chunk(5)
tensors += torch.zeros(60, dtype=torch.float32, device=device).chunk(5)
tensors += torch.zeros(60, dtype=torch.float16, device=device).chunk(5)
tensors += torch.zeros(60, dtype=torch.float64, device=device).chunk(5)
tensors += torch.zeros(60, dtype=torch.float16, device=device).chunk(5)
tensors += torch.zeros(60, dtype=torch.float32, device=device).chunk(5)
c10d._dist_broadcast_coalesced(
process_group,
tensors,
buffer_size=256,
fine_grained=fine_grained)
self.assertEqual(tensors, target)
@skip_if_not_multigpu
def test_dist_broadcast_coalesced_gloo(self):
store = c10d.FileStore(self.file.name, self.world_size)
options = c10d.ProcessGroupGloo.Options()
options.devices = [c10d.ProcessGroupGloo.create_tcp_device(interface="lo")]
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size, options)
device = torch.device('cuda')
for fine_grained in [False, True]:
target = torch.arange(60, dtype=torch.float16, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float16, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float64, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float16, device=device).chunk(5)
target += torch.arange(60, dtype=torch.float32, device=device).chunk(5)
if self.is_master:
# All processes should have these tensors in the end.
tensors = target
else:
# Non-master processes start with empty tensors and should be
# filled with the tensors from the master.
tensors = torch.zeros(60, dtype=torch.float16, device=device).chunk(5)
tensors += torch.zeros(60, dtype=torch.float32, device=device).chunk(5)
tensors += torch.zeros(60, dtype=torch.float16, device=device).chunk(5)
tensors += torch.zeros(60, dtype=torch.float64, device=device).chunk(5)
tensors += torch.zeros(60, dtype=torch.float16, device=device).chunk(5)
tensors += torch.zeros(60, dtype=torch.float32, device=device).chunk(5)
c10d._dist_broadcast_coalesced(
process_group,
tensors,
buffer_size=128,
fine_grained=fine_grained)
self.assertEqual(tensors, target)
@skip_if_not_multigpu
def test_sync_params_no_buffers(self):
store = c10d.FileStore(self.file.name, self.world_size)
options = c10d.ProcessGroupGloo.Options()
options.devices = [c10d.ProcessGroupGloo.create_tcp_device(interface="lo")]
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size, options)
# Use all available devices on every process here (data is small, so should be fine).
devices = gpus_for_rank(self.world_size)[self.rank]
target = torch.arange(10, dtype=torch.float64, device='cuda:{}'.format(devices[0])).chunk(5)
parameter_data = [target]
parameter_data += [torch.zeros(10, device=torch.device('cuda', d)).chunk(5) for d in devices[1:]]
buffer_data = [[]] * len(parameter_data)
c10d._sync_params(
process_group,
parameter_data=parameter_data,
buffer_data=buffer_data,
devices=devices,
broadcast_bucket_size=10,
broadcast_buffers=False)
for device_data in parameter_data:
for i, parameter in enumerate(device_data):
self.assertEqual(parameter, target[i])
@skip_if_not_multigpu
def test_sync_params_with_buffers(self):
store = c10d.FileStore(self.file.name, self.world_size)
options = c10d.ProcessGroupGloo.Options()
options.devices = [c10d.ProcessGroupGloo.create_tcp_device(interface="lo")]
process_group = c10d.ProcessGroupGloo(store, self.rank, self.world_size, options)
devices = gpus_for_rank(self.world_size)[self.rank]
target = torch.arange(10, dtype=torch.float64, device='cuda:{}'.format(devices[0])).chunk(5)
parameter_data = [target]
parameter_data += [torch.zeros(10, device=torch.device('cuda', d)).chunk(5) for d in devices[1:]]
# sync_params should do a dist_broadcast for buffers, so we only populate the master buffers and
# then check that other processes' tensors end up matching.
if self.is_master:
buffer_data = [target]
buffer_data += [torch.zeros(10, device=torch.device('cuda', d)).chunk(5) for d in devices[1:]]
else:
buffer_data = [torch.zeros(10, device=torch.device('cuda', d)).chunk(5) for d in devices]
c10d._sync_params(
process_group,
parameter_data=parameter_data,
buffer_data=buffer_data,
devices=devices,
broadcast_bucket_size=10,
broadcast_buffers=True)
for device_data in parameter_data:
for i, parameter in enumerate(device_data):
self.assertEqual(parameter, target[i])
for device_data in buffer_data:
for i, buffer in enumerate(device_data):
self.assertEqual(buffer, target[i])
@skip_if_not_multigpu
@skip_if_not_nccl
def test_fp16(self):
store = c10d.FileStore(self.file.name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
gpus = gpus_for_rank(self.world_size)[self.rank]
model = nn.Linear(1, 1, bias=False).cuda(gpus[0]).half()
nn.init.constant_(model.weight, 1)
ddp_model = DistributedDataParallel(
model,
device_ids=[gpus[0]],
process_group=process_group,
bucket_cap_mb=0.001,
)
# Input 2**15, so that the gradients will overflow with a
# world_size of 2, unless we normalize the gradient by the
# world_size before the reduction
input = torch.Tensor([[2**15]]).cuda(gpus[0]).half()
# Step model
ddp_model.train()
output = ddp_model(input)
loss = output.sum()
loss.backward()
self.assertFalse(
any(torch.isinf(p.grad).any() for p in ddp_model.parameters())
)
@skip_if_not_nccl
@skip_if_not_multigpu
def test_queue_reduction(self):
# Set up process group.
store = c10d.FileStore(self.file.name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get this process' split of devices.
devices = gpus_for_rank(self.world_size)[self.rank]
grads_batch = [(torch.ones(10, device=torch.device('cuda', d)) *
(self.rank + 1)).chunk(5)
for d in devices]
work, local_grad_sum = c10d._queue_reduction(process_group,
grads_batch,
devices)
# The first return value should be the allreduce work item.
self.assertTrue(isinstance(work, c10d.Work))
# The second return value will be the finished allreduced gradients.
self.assertTrue(isinstance(local_grad_sum, torch.Tensor))
# Wait for the allreduce to finish.
work.wait()
# The expected result of the allreduce should be the average
self.assertEqual(local_grad_sum,
torch.ones(10) * (self.world_size + 1) * len(devices) / 2.0)
@skip_if_not_nccl
@skip_if_not_multigpu
def test_sync_reduction(self):
# Set up process group.
store = c10d.FileStore(self.file.name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
# Get this process' split of devices.
devices = gpus_for_rank(self.world_size)[self.rank]
grads_batch = [(torch.ones(10, device=torch.device('cuda', d)) *
(self.rank + 1)).chunk(5)
for d in devices]
work, local_grad_sum = c10d._queue_reduction(process_group,
grads_batch,
devices)
c10d._sync_reduction(work, grads_batch[0], local_grad_sum)
# The expected result of the allreduce should be the average
self.assertEqual(grads_batch[0], (torch.ones(10) * (self.world_size + 1) * len(devices) / 2.0).chunk(5))
@skip_if_not_nccl
@skip_if_not_multigpu
def test_arbitrary_forward_return_value(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file.name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class ForwardReturnValueModule(nn.Module):
def __init__(self):
super(ForwardReturnValueModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, fn):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# The first softmax does NOT include fc3 in its autograd graph
# whereas the second softmax DOES. If we pass only the first
# tensor we see in the output to the reducer, it marks the
# gradient for fc3 as ready (because it doesn't show up). If
# downstream uses of this return value choose to differentiate
# against the second output tensor, it would still receive a
# gradient and a callback for this tensor, resulting in a crash.
return fn(
F.softmax(x, dim=1),
F.softmax(self.fc3(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
ForwardReturnValueModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(device_id)
# Always run "backward" to ensure the reducer is called by autograd.
# If we don't correctly capture the output tensors from the return value,
# the reducer won't see a hook for the unused parameter, and throw an error.
# The correct capture is what we're testing in this function.
def test(box, unbox):
output = model(input, fn=box)
loss = criterion(unbox(output), target)
loss.backward()
# Test with identity return value
test(
box=lambda x, y: (x, y),
unbox=lambda obj: obj[1],
)
# Test with list return value
test(
box=lambda x, y: ["foo", x, "bar", y],
unbox=lambda obj: obj[3],
)
# Test with tuple return value
test(
box=lambda x, y: ("foo", x, "bar", y),
unbox=lambda obj: obj[3],
)
# Test with dict return value
test(
box=lambda x, y: {"foo": "bar", "a": x, "b": y},
unbox=lambda obj: obj["b"],
)
# Test with list with dict return value
test(
box=lambda x, y: ["foo", "bar", {"a": x, "b": y}],
unbox=lambda obj: obj[2]["b"],
)
# Test with dict with list return value
test(
box=lambda x, y: {"foo": "bar", "list": [0, x, 1, y]},
unbox=lambda obj: obj["list"][3],
)
@skip_if_not_nccl
@skip_if_not_multigpu
def test_find_unused_parameters_kwarg(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file.name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class FindUnusedParametersModule(nn.Module):
def __init__(self):
super(FindUnusedParametersModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
# Return the fc3 module so that the caller can invoke it
# outside of the forward function. While this is bad practice,
# we can use it to trigger a reducer error.
return (F.softmax(x, dim=1), self.fc3)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(device_id)
def test_find_unused_parameters(find_unused_parameters, test_default=False):
if test_default:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
else:
model = DistributedDataParallel(
FindUnusedParametersModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=find_unused_parameters,
)
output, fc3 = model(input)
output = fc3(output)
loss = criterion(output, target)
loss.backward()
# First test that finding unused params under these conditions is to
# trigger an error when `backward` is called (because fc3 is an unused
# parameter and will therefore be marked ready twice).
try:
test_find_unused_parameters(True)
except Exception as ex:
self.assertTrue(
str(ex).startswith("Expected to mark a variable ready only once."))
else:
self.fail("Expected exception")
# Then test that the default behavior can be overridden by setting
# `find_unused_parameters=False`.
try:
test_find_unused_parameters(False)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
# Test find_unused_parameters defaults to False
try:
test_find_unused_parameters(True, test_default=True)
except Exception as ex:
self.fail("Unexpected exception: %s" % ex)
@skip_if_not_nccl
@skip_if_not_multigpu
def test_multiple_outputs_multiple_backward(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file.name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class MultipleOutputModule(nn.Module):
def __init__(self):
super(MultipleOutputModule, self).__init__()
def define_module():
return nn.Sequential(
nn.Linear(2, 10, bias=False),
nn.ReLU(),
nn.Linear(10, 4, bias=False),
nn.ReLU(),
)
self.module0 = define_module()
self.module1 = define_module()
def forward(self, x):
return (
F.softmax(self.module0(x), dim=1),
F.softmax(self.module1(x), dim=1),
)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
MultipleOutputModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(device_id)
# Compute loss and gradients for both outputs
output1, output2 = model(input)
loss1 = criterion(output1, target)
loss1.backward()
loss2 = criterion(output2, target)
loss2.backward()
@skip_if_not_nccl
@skip_if_not_multigpu
def test_no_used_parameters(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file.name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class NoUsedParameters(nn.Module):
def __init__(self):
super(NoUsedParameters, self).__init__()
# Make sure this module has some parameters, only to then decide
# to never use them from the `forward` function.
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
return x * 0.0
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
NoUsedParameters().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
find_unused_parameters=True,
)
batch_size = 4
input = torch.rand([batch_size, 2], dtype=torch.float)
# After initialization, no parameter has their gradient set.
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNone(p.grad)
# Run `forward` function.
model(input)
# Because none of the parameters were used, we expect reduction for
# all parameters will be executed right when initializing the reducer.
# Once `forward` returns, all the parameter's gradients must be set.
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNotNone(p.grad)
self.assertTrue(torch.is_tensor(p.grad))
self.assertEqual(p.size(), p.grad.size())
@skip_if_not_nccl
@skip_if_not_multigpu
def test_no_grad(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file.name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class NoGradModule(nn.Module):
def __init__(self):
super(NoGradModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
NoGradModule().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
input = torch.rand([batch_size, 2], dtype=torch.float)
def check_no_grads():
for p in model.parameters():
self.assertTrue(p.requires_grad)
self.assertIsNone(p.grad)
# After initialization, no parameter has their gradient set.
check_no_grads()
# Run `forward` function with torch.no_grad()
with torch.no_grad():
output = model(input)
self.assertTrue(torch.is_tensor(output))
# No parameter should have their gradient set.
check_no_grads()
@skip_if_not_nccl
@skip_if_not_multigpu
def test_ignored_output(self):
"""
Note: this test can be sped up by only running it on a CPU module
once DistributedDataParallel supports them.
"""
store = c10d.FileStore(self.file.name, self.world_size)
process_group = c10d.ProcessGroupNCCL(store, self.rank, self.world_size)
class IgnoredOutput(nn.Module):
def __init__(self):
super(IgnoredOutput, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return F.softmax(x, dim=1)
device_id = gpus_for_rank(self.world_size)[self.rank][0]
model = DistributedDataParallel(
IgnoredOutput().float().to(device_id),
device_ids=[device_id],
process_group=process_group,
)
batch_size = 4
criterion = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.float)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)]).to(device_id)
# Run a few iterations where we ignore the output.
for _ in range(4):
output = model(input)
del output
# Run a few iterations where we use the output.
for _ in range(4):
output = model(input)
loss = criterion(output, target)
loss.backward()
class ReducerModule(nn.Module):
def __init__(self):
super(ReducerModule, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 4, bias=False)
self.fc3 = nn.Linear(4, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x, use_fc3=True):
x = self.relu(self.fc1(x)).float()
x = self.relu(self.fc2(x)).float()
if use_fc3:
x = self.fc3(x).float()
return F.softmax(x, dim=1)
class ReducerTest(TestCase):
def setUp(self):
self.store = c10d.FileStore("/dev/null", 1)
self.process_group = c10d.ProcessGroupGloo(self.store, 0, 1)
def test_single_dtype_single_bucket(self):
model = ReducerModule()
parameters = list(model.parameters())
buckets = [list(range(len(parameters)))]
dist.Reducer([parameters], buckets, self.process_group)
def _create_mixed_precision_model(self):
model = ReducerModule()
model.float()
model.fc1.double()
return model
def test_multi_dtype_single_bucket(self):
model = self._create_mixed_precision_model()
# Raise if there are multiple types per bucket.
# In this case we create one bucket for all parameters.
with self.assertRaises(RuntimeError):
parameters = [list(model.parameters())]
buckets = [list(range(len(parameters[0])))]
dist.Reducer(parameters, buckets, self.process_group)
def test_multi_dtype_multi_bucket(self):
model = self._create_mixed_precision_model()
parameters = [list(model.parameters())]
group_by_type = groupby(
range(len(parameters[0])),
key=lambda i: parameters[0][i].type())
buckets = [list(indices) for _, indices in group_by_type]
dist.Reducer(parameters, buckets, self.process_group)
def _create_reducer_for_models(self, models):
parameters = [list(model.parameters()) for model in models]
group_by_type = groupby(
range(len(parameters[0])),
key=lambda i: parameters[0][i].type())
buckets = [list(indices) for _, indices in group_by_type]
return dist.Reducer(parameters, buckets, self.process_group)
def test_forward_backward_single_replica(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model])
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
output = loss(model(input), target)
reducer.prepare_for_backward(output)
output.backward()
def test_forward_backward_multi_replica(self):
batch_size = 10
num_replicas = 2
models = [self._create_mixed_precision_model() for _ in range(num_replicas)]
reducer = self._create_reducer_for_models(models)
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double).chunk(num_replicas)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
outputs = [models[i](input[i]) for i in range(num_replicas)]
output = loss(torch.cat(outputs), target)
reducer.prepare_for_backward(output)
output.backward()
# The reducer will have reduced the gradients for all model replicas.
# Verify that they are equal across model replicas.
for parameters in zip(*[model.parameters() for model in models]):
for parameter in parameters:
self.assertEqual(parameters[0].grad, parameter.grad)
def test_forward_backward_unused_parameters(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model])
loss = nn.CrossEntropyLoss()
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
output = loss(model(input, use_fc3=False), target)
# Check that the grad of fc3 is not set.
self.assertEqual(None, model.fc3.weight.grad)
# Compute and accumulate gradients.
reducer.prepare_for_backward(output)
output.backward()
# The reducer will have marked the grad of fc3 as ready, because
# it doesn't show up in the autograd graph of `output`.
# This should result in its contents being equal to zero.
self.assertEqual(torch.zeros(model.fc3.weight.size()), model.fc3.weight.grad)
def test_forward_backward_optimizer(self):
batch_size = 10
model = self._create_mixed_precision_model()
reducer = self._create_reducer_for_models([model])
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters())
for i in range(3):
input = torch.rand([batch_size, 2], dtype=torch.double)
target = torch.LongTensor([random.randrange(4) for _ in range(batch_size)])
# The `zero_grad` function calls `detach_` and `zero_` on the grad
# tensors of model parameters. If we tried to set the grad tensors
# to a view of the reducer's bucket tensors, this would blow up.
optimizer.zero_grad()
# Unused parameter only in the first iteration.
output = loss(model(input, use_fc3=(i > 0)), target)
reducer.prepare_for_backward(output)
output.backward()
optimizer.step()
class ComputeBucketAssignmentTest(TestCase):
def test_single_limit_single_dtype(self):
tensors = [
torch.empty([100], dtype=torch.float),
torch.empty([200], dtype=torch.float),
torch.empty([100], dtype=torch.float),
torch.empty([50], dtype=torch.float),
]
result = dist._compute_bucket_assignment_by_size(tensors, [400])
self.assertEqual([[0], [1], [2], [3]], result)
def test_single_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result = dist._compute_bucket_assignment_by_size(tensors, [400])
self.assertEqual([[0, 2], [1, 3], [4], [5]], result)
def test_multi_limit_single_dtype(self):
tensors = [
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
]
result = dist._compute_bucket_assignment_by_size(tensors, [40, 80])
self.assertEqual([[0], [1, 2], [3]], result)
def test_multi_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result = dist._compute_bucket_assignment_by_size(tensors, [200, 400])
self.assertEqual([[0], [1], [2, 4], [3, 5]], result)
if __name__ == '__main__':
assert not torch.cuda._initialized, "test_distributed must not have initialized CUDA context on main process"
run_tests()
| 37.972713
| 113
| 0.593361
|
96b6ce0fa76a7f37b1fa3abdc8caa4a635ae05c4
| 586
|
py
|
Python
|
velkozz_web_api/apps/social_media_api/migrations/0002_auto_20210216_0106.py
|
velkoz-data-ingestion/velkozz_web_api
|
519a6a90e5fdf5bab8ba2daf637768c5fd424a12
|
[
"MIT"
] | null | null | null |
velkozz_web_api/apps/social_media_api/migrations/0002_auto_20210216_0106.py
|
velkoz-data-ingestion/velkozz_web_api
|
519a6a90e5fdf5bab8ba2daf637768c5fd424a12
|
[
"MIT"
] | null | null | null |
velkozz_web_api/apps/social_media_api/migrations/0002_auto_20210216_0106.py
|
velkoz-data-ingestion/velkozz_web_api
|
519a6a90e5fdf5bab8ba2daf637768c5fd424a12
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.5 on 2021-02-16 01:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('social_media_api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='scienceposts',
name='permalink',
field=models.CharField(max_length=300, null=True),
),
migrations.AlterField(
model_name='wallstreetbetsposts',
name='permalink',
field=models.CharField(max_length=300, null=True),
),
]
| 24.416667
| 62
| 0.59727
|
2bba1321f2937a721b5004173eb96266ad21b4ee
| 13,658
|
py
|
Python
|
src/rogerthat/bizz/friend_helper.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/rogerthat/bizz/friend_helper.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
src/rogerthat/bizz/friend_helper.py
|
goubertbrent/oca-backend
|
b9f59cc02568aecb55d4b54aec05245790ea25fd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import StringIO
import collections
from contextlib import closing
import json
import logging
from google.appengine.api import memcache
from google.appengine.ext import db
import cloudstorage
from mcfw.consts import MISSING
from mcfw.properties import azzert
from mcfw.serialization import s_any, ds_any
from rogerthat.consts import FRIEND_HELPER_BUCKET
from rogerthat.dal.profile import get_service_profile
from rogerthat.dal.roles import list_service_roles, get_service_role_by_id
from rogerthat.dal.service import get_service_menu_items, get_service_identity
from rogerthat.models import ServiceTranslation, ServiceInteractionDef, UserProfile, Branding
from rogerthat.to.friends import FriendTO
from rogerthat.utils import guid, today
from rogerthat.utils.service import get_service_user_from_service_identity_user, add_slash_default
from rogerthat.utils.transactions import run_in_transaction
class FriendHelper(object):
"""Helper class for creating a FriendTO. There are 2 types.
- One which gets the data from the datastore
- One which gets the data from cloud storage cache
"""
def __repr__(self):
return '%(class)s(user=\'%(user)s\', friend_type=%(friend_type)s)' % {
'class': self.__class__.__name__,
'user': self.user,
'friend_type': self.friend_type
}
__str__ = __repr__
def __init__(self, user, friend_type):
self.user = user
self.friend_type = friend_type
self.brandings = {}
self._translator = None
@staticmethod
def from_data_store(user, friend_type):
return _FriendDatastoreHelper(user, friend_type)
@staticmethod
def from_cloud_storage(user, friend_type, cloud_storage_path):
return FriendCloudStorageHelper(user, friend_type, cloud_storage_path)
def add_to_memcache(self, cloud_storage_path, value):
if len(value) < memcache.MAX_VALUE_SIZE:
memcache.set(cloud_storage_path, value, time=3600) # @UndefinedVariable
@classmethod
def serialize(cls, user, friend_type):
# type: (users.User, int) -> FriendCloudStorageHelper
if friend_type == FriendTO.TYPE_SERVICE:
assert '/' in user.email()
datastore_helper = FriendHelper.from_data_store(user, friend_type)
def trans():
data = {}
flow_keys = []
for method in ('get_service_profile', 'get_profile_info', 'list_service_menu_items', 'list_roles',
'get_share_sid', '_get_all_translations', 'get_service_data', 'get_brandings'):
logging.debug('Serializing result of %s', method)
f = getattr(datastore_helper, method)
obj = f()
if obj:
if isinstance(obj, dict) or not isinstance(obj, collections.Iterable):
data[method] = obj
else:
is_menu_items_method = method == 'list_service_menu_items'
for i, model in enumerate(obj):
data['%s-%s' % (method, i)] = model
if is_menu_items_method and model.staticFlowKey:
flow_keys.append(model.staticFlowKey)
if flow_keys:
flows = db.get(flow_keys)
for flow in flows:
data['flow-%s' % flow.key()] = flow
return data
# TODO: Change xg to False once ServiceTranslationSet uses same parent key as other service data
data = run_in_transaction(trans, xg=True)
with closing(StringIO.StringIO()) as stream:
s_any(stream, data)
serialized_value = stream.getvalue()
serialized_length = len(serialized_value)
logging.info('Size of serialized FriendHelper for %s: %d', user, serialized_length)
cloud_storage_path = FriendCloudStorageHelper.create_cloudstorage_path(user.email())
with cloudstorage.open(cloud_storage_path, 'w') as f:
f.write(serialized_value)
datastore_helper.add_to_memcache(cloud_storage_path, serialized_value)
return FriendCloudStorageHelper.from_cloud_storage(user, friend_type, cloud_storage_path)
@property
def is_service(self):
# type: () -> bool
return self.friend_type == FriendTO.TYPE_SERVICE
@property
def service_user(self):
# type: () -> users.User
return get_service_user_from_service_identity_user(self.user)
@property
def service_identity_user(self):
# type: () -> users.User
return add_slash_default(self.user)
@property
def profile_info_user(self):
# type: () -> users.User
return self.service_identity_user if self.is_service else self.user
def get_service_profile(self):
# type: () -> ServiceProfile
raise NotImplementedError()
def get_service_data(self):
# type: () -> dict
raise NotImplementedError()
def get_profile_info(self):
raise NotImplementedError()
def list_service_menu_items(self):
raise NotImplementedError()
def list_roles(self):
# type: () -> list[ServiceRole]
raise NotImplementedError()
def get_role(self, role_id):
raise NotImplementedError()
def get_message_flow(self, key):
raise NotImplementedError()
def get_share_sid(self):
raise NotImplementedError()
def set_service_identity_user(self):
raise NotImplementedError()
def get_brandings(self):
raise NotImplementedError()
def get_branding(self, branding_hash):
raise NotImplementedError()
def _get_all_translations(self):
raise NotImplementedError()
def get_translator(self):
from rogerthat.bizz.i18n import DummyTranslator, Translator
if self.is_service:
if not self._translator:
translations = self._get_all_translations()
service_profile = self.get_service_profile()
if translations:
self._translator = Translator(translations, service_profile.supportedLanguages)
else:
self._translator = DummyTranslator(service_profile.defaultLanguage)
return self._translator
class _FriendDatastoreHelper(FriendHelper):
def __init__(self, user, friend_type):
super(_FriendDatastoreHelper, self).__init__(user, friend_type)
self.has_data = False
self._service_profile = None
self._profile_info = None
self._brandings = {}
def __getattribute__(self, item):
if item.startswith('get'):
self._ensure_data()
return super(_FriendDatastoreHelper, self).__getattribute__(item)
def _ensure_data(self):
if self.has_data:
return
else:
if self.is_service:
self._service_profile = get_service_profile(self.service_user)
self._profile_info = get_service_identity(self.service_identity_user)
else:
self._profile_info = db.get(UserProfile.createKey(self.user))
self.has_data = True
def get_service_profile(self):
# type: () -> ServiceProfile
if self.is_service:
return self._service_profile
def get_profile_info(self):
# type: () -> ServiceIdentity
azzert(self._profile_info)
return self._profile_info
def get_service_data(self):
if self.is_service:
service_identity = self.get_profile_info()
return service_identity.appData and json.loads(service_identity.appData)
def list_service_menu_items(self):
return get_service_menu_items(self.user) if self.is_service else []
def list_roles(self):
return list_service_roles(self.service_user)
def get_role(self, role_id):
if self.is_service:
return get_service_role_by_id(self.service_user, role_id)
def get_message_flow(self, key):
if self.is_service:
return db.get(key)
def _get_all_translations(self):
if self.is_service:
from rogerthat.bizz.i18n import get_all_translations, get_active_translation_set
s = get_active_translation_set(self.get_service_profile())
if s:
translation_types = ServiceTranslation.HOME_TYPES + ServiceTranslation.IDENTITY_TYPES
translations = get_all_translations(s, translation_types)
return translations
def get_share_sid(self):
if self.is_service:
service_identity = self.get_profile_info()
if service_identity.shareEnabled:
return ServiceInteractionDef.get(service_identity.shareSIDKey)
def set_service_identity_user(self, service_identity_user):
azzert(add_slash_default(self.user) == service_identity_user)
def get_brandings(self):
if not self.is_service:
return {}
if self._brandings:
return self._brandings
brandings_to_get = []
profile_info = self.get_profile_info()
translator = self.get_translator()
for language in translator.supported_languages:
if profile_info.menuBranding and profile_info.menuBranding not in brandings_to_get:
brandings_to_get.append(translator.translate(ServiceTranslation.HOME_BRANDING,
profile_info.menuBranding, language))
keys = [Branding.create_key(b_hash) for b_hash in brandings_to_get]
self._brandings = {branding.hash: branding for branding in db.get(keys)} if keys else {}
return self._brandings
def get_branding(self, branding_hash):
return self.get_brandings()[branding_hash]
class FriendCloudStorageHelper(FriendHelper):
def __init__(self, user, friend_type, cloud_storage_path):
FriendHelper.__init__(self, user, friend_type)
self.cloud_storage_path = cloud_storage_path
@staticmethod
def create_cloudstorage_path(user):
# type: (str) -> str
return '/'.join([FRIEND_HELPER_BUCKET, str(today()), user, guid()])
@property
def _data(self):
if not hasattr(self, '_internal_data'):
data_from_memcache = memcache.get(self.cloud_storage_path) # @UndefinedVariable
if data_from_memcache:
with closing(StringIO.StringIO()) as stream:
stream.write(data_from_memcache)
stream.seek(0)
self._internal_data = ds_any(stream)
else:
with cloudstorage.open(self.cloud_storage_path, 'r') as f:
self.add_to_memcache(self.cloud_storage_path, f.read())
f.seek(0)
self._internal_data = ds_any(f)
return self._internal_data
def _get(self, method):
model = self._data.get(method, MISSING)
if model is not MISSING:
return model
if '%s-0' % method in self._data:
x = 0
models = []
while True:
model = self._data.get('%s-%s' % (method, x))
if not model:
break
models.append(model)
x += 1
return models
return None
def get_service_profile(self):
# type: () -> ServiceProfile
if self.is_service:
return self._get('get_service_profile')
def get_profile_info(self):
return self._get('get_profile_info')
def get_service_data(self):
return self._get('get_service_data')
def list_service_menu_items(self):
return self._get('list_service_menu_items') or []
def list_roles(self):
# type: () -> list[ServiceRole]
return self._get('list_roles') or []
def get_role(self, role_id):
if self.is_service:
for role in self.list_roles():
if role.role_id == role_id:
return role
def get_message_flow(self, key):
if self.is_service:
return self._data.get('flow-%s' % key)
def _get_all_translations(self):
if self.is_service:
return self._get('_get_all_translations')
def get_share_sid(self):
if self.is_service:
return self._get('get_share_sid')
def set_service_identity_user(self, service_identity_user):
if add_slash_default(self.user) != service_identity_user:
# this can happen when a helper is created with supplying a service user
self.user = service_identity_user
for m in ('get_profile_info', 'get_share_sid', 'get_service_data'):
self._data[m] = MISSING
def get_brandings(self):
return self._get('get_brandings')
def get_branding(self, branding_hash):
return self.get_brandings()[branding_hash]
| 36.324468
| 110
| 0.647825
|
536f80faaadafb43135fd6ff29ef9eaf3df78ce0
| 1,531
|
py
|
Python
|
helper_functions.py
|
SatyadevaNaidu/EdgeDetectionOfAnImage
|
8562677fe02baf7160342970d663e9c0b2b76d16
|
[
"Unlicense"
] | null | null | null |
helper_functions.py
|
SatyadevaNaidu/EdgeDetectionOfAnImage
|
8562677fe02baf7160342970d663e9c0b2b76d16
|
[
"Unlicense"
] | null | null | null |
helper_functions.py
|
SatyadevaNaidu/EdgeDetectionOfAnImage
|
8562677fe02baf7160342970d663e9c0b2b76d16
|
[
"Unlicense"
] | null | null | null |
from pathlib import Path
from skimage import io
from scipy import ndimage
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def read_colorimg(imgpath):
# Read the color image
color_img = mpimg.imread(imgpath)
# Convert the color to grayscale
gray_img = np.dot(color_img[...,:3], [0.33, 0.33, 0.33])
# Save the color image as a grayscale image
# Pad the image with boundary zeroes
padded_img = np.pad(gray_img, (1,), 'constant', constant_values=0)
return padded_img[:, :].tolist()
def verify_result(pixel_values, new_pixel_values, mask):
# Creating the original image array from the list
orig_image = np.asarray(pixel_values)[1:-1,1:-1]
edges_image = np.asarray(new_pixel_values)
correct_edges_image = ndimage.convolve(orig_image, np.array(mask)[::-1, ::-1], mode='constant', cval=0)
# Comparing each updated value against the correct answer
comparison = edges_image == correct_edges_image
print(f"{comparison.all()} result")
def view_images(imgpath, new_pixel_values):
orig_image = mpimg.imread(imgpath)
edges_image = np.asarray(new_pixel_values)
plt.rcParams['font.size'] = 30
plt.rcParams['axes.titlepad'] = 20
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
ax = axes.ravel()
ax[0].imshow(orig_image)
ax[0].set_title("Input image")
ax[1].imshow(edges_image, cmap='gray', vmin = 0, vmax = 255)
ax[1].set_title("Edges of the image")
plt.show()
| 38.275
| 108
| 0.677988
|
a4b6162ef9aead54081487d8d8c6298ecd74d06d
| 5,112
|
py
|
Python
|
setup.py
|
carlinmack/tqdm
|
6888abd3dc33582d8437d825e11a7ef45d186397
|
[
"MIT"
] | 1
|
2019-11-12T23:28:21.000Z
|
2019-11-12T23:28:21.000Z
|
setup.py
|
carlinmack/tqdm
|
6888abd3dc33582d8437d825e11a7ef45d186397
|
[
"MIT"
] | 2
|
2019-11-28T23:26:50.000Z
|
2020-10-30T12:45:23.000Z
|
setup.py
|
carlinmack/tqdm
|
6888abd3dc33582d8437d825e11a7ef45d186397
|
[
"MIT"
] | 1
|
2021-08-30T15:26:47.000Z
|
2021-08-30T15:26:47.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
def find_packages(where='.'):
# os.walk -> list[(dirname, list[subdirs], list[files])]
return [folder.replace("/", ".").lstrip(".")
for (folder, _, fils) in os.walk(where)
if "__init__.py" in fils]
import sys
from io import open as io_open
# Get version from tqdm/_version.py
__version__ = None
src_dir = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(src_dir, 'tqdm', '_version.py')
with io_open(version_file, mode='r') as fd:
exec(fd.read())
# Executing makefile commands if specified
if sys.argv[1].lower().strip() == 'make':
import pymake
# Filename of the makefile
fpath = os.path.join(src_dir, 'Makefile')
pymake.main(['-f', fpath] + sys.argv[2:])
# Stop to avoid setup.py raising non-standard command error
sys.exit(0)
extras_require = {}
requirements_dev = os.path.join(src_dir, 'requirements-dev.txt')
with io_open(requirements_dev, mode='r') as fd:
extras_require['dev'] = [i.strip().split('#', 1)[0].strip()
for i in fd.read().strip().split('\n')]
README_rst = ''
fndoc = os.path.join(src_dir, 'README.rst')
with io_open(fndoc, mode='r', encoding='utf-8') as fd:
README_rst = fd.read()
setup(
name='tqdm',
version=__version__,
description='Fast, Extensible Progress Meter',
long_description=README_rst,
license='MPLv2.0, MIT Licences',
url='https://github.com/tqdm/tqdm',
maintainer='tqdm developers',
maintainer_email='python.tqdm@gmail.com',
platforms=['any'],
packages=['tqdm'] + ['tqdm.' + i for i in find_packages('tqdm')],
provides=['tqdm'],
extras_require=extras_require,
entry_points={'console_scripts': ['tqdm=tqdm.cli:main'], },
package_data={'tqdm': ['CONTRIBUTING.md', 'LICENCE', 'examples/*.py',
'tqdm.1', 'requirements-dev.txt']},
python_requires='>=2.6, !=3.0.*, !=3.1.*',
classifiers=[
# Trove classifiers
# (https://pypi.org/pypi?%3Aaction=list_classifiers)
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Other Environment',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Framework :: IPython',
'Framework :: Jupyter',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Other Audience',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Operating System :: MacOS',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft',
'Operating System :: Microsoft :: MS-DOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: SunOS/Solaris',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation',
'Programming Language :: Python :: Implementation :: IronPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Unix Shell',
'Topic :: Desktop Environment',
'Topic :: Education :: Computer Aided Instruction (CAI)',
'Topic :: Education :: Testing',
'Topic :: Office/Business',
'Topic :: Other/Nonlisted Topic',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Pre-processors',
'Topic :: Software Development :: User Interfaces',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Logging',
'Topic :: System :: Monitoring',
'Topic :: System :: Shells',
'Topic :: Terminals',
'Topic :: Utilities'
],
keywords='progressbar progressmeter progress bar meter'
' rate eta console terminal time',
test_suite='nose.collector',
tests_require=['nose', 'flake8', 'coverage'],
)
| 40.571429
| 74
| 0.603678
|
b8b80bdaa75ee426a8d629b8c7efaf3b1d5c70f8
| 9,893
|
py
|
Python
|
frontera/tests/test_utils_url.py
|
TeamHG-Memex/frontera
|
06ab4002428528a2d8b67c1e82368cc5988b2228
|
[
"BSD-3-Clause"
] | 3
|
2015-11-11T19:37:16.000Z
|
2017-03-15T13:33:54.000Z
|
frontera/tests/test_utils_url.py
|
TeamHG-Memex/frontera
|
06ab4002428528a2d8b67c1e82368cc5988b2228
|
[
"BSD-3-Clause"
] | null | null | null |
frontera/tests/test_utils_url.py
|
TeamHG-Memex/frontera
|
06ab4002428528a2d8b67c1e82368cc5988b2228
|
[
"BSD-3-Clause"
] | 2
|
2016-09-08T08:30:24.000Z
|
2018-10-02T22:00:47.000Z
|
import unittest
from frontera.utils.url import parse_url, parse_domain_from_url, \
parse_domain_from_url_fast, safe_url_string, canonicalize_url
simple_url = 'http://www.example.com'
complete_url = 'http://username:password@www.example.com:80/some/page/do?a=1&b=2&c=3#frag'
class TestParseUrl(unittest.TestCase):
def test_simple_url(self):
self.assertEqual(parse_url(simple_url),
('http', 'www.example.com', '', '', '', ''))
def test_complete_url(self):
self.assertEqual(parse_url(complete_url),
('http', 'username:password@www.example.com:80',
'/some/page/do', '', 'a=1&b=2&c=3', 'frag'))
def test_already_parsed(self):
result = parse_url(simple_url)
self.assertEqual(parse_url(result), result)
class TestParseDomainFromUrl(unittest.TestCase):
def test_simple_url(self):
self.assertEqual(parse_domain_from_url(simple_url),
('www.example.com', 'example.com', 'http', 'example', 'com', 'www'))
def test_complete_url(self):
self.assertEqual(parse_domain_from_url(complete_url),
('www.example.com', 'example.com', 'http', 'example', 'com', 'www'))
def test_missing_tld(self):
self.assertEqual(parse_domain_from_url('http://www.example'),
('www.example', 'example', 'http', 'example', '', 'www'))
def test_missing_subdomain(self):
self.assertEqual(parse_domain_from_url('https://example.com'),
('example.com', 'example.com', 'https', 'example', 'com', ''))
def test_missing_scheme(self):
self.assertEqual(parse_domain_from_url('www.example.com'),
('www.example.com', 'example.com', '', 'example', 'com', 'www'))
class TestParseDomainFromUrlFast(unittest.TestCase):
def test_simple_url(self):
self.assertEqual(parse_domain_from_url_fast(simple_url),
('www.example.com', 'www.example.com', 'http', '', '', ''))
def test_complete_url(self):
self.assertEqual(parse_domain_from_url_fast(complete_url),
('username:password@www.example.com:80', 'www.example.com', 'http', '', '', ''))
class TestSafeUrlString(unittest.TestCase):
def test_safe_url_string(self):
# Motoko Kusanagi (Cyborg from Ghost in the Shell)
motoko = u'\u8349\u8599 \u7d20\u5b50'
self.assertEqual(safe_url_string(motoko), # note the %20 for space
'%E8%8D%89%E8%96%99%20%E7%B4%A0%E5%AD%90')
self.assertEqual(safe_url_string(motoko),
safe_url_string(safe_url_string(motoko)))
self.assertEqual(safe_url_string(u'\xa9'), # copyright symbol
'%C2%A9')
self.assertEqual(safe_url_string(u'\xa9', 'iso-8859-1'),
'%A9')
self.assertEqual(safe_url_string("http://www.scrapy.org/"),
'http://www.scrapy.org/')
alessi = u'/ecommerce/oggetto/Te \xf2/tea-strainer/1273'
self.assertEqual(safe_url_string(alessi),
'/ecommerce/oggetto/Te%20%C3%B2/tea-strainer/1273')
self.assertEqual(safe_url_string("http://www.example.com/test?p(29)url(http://www.another.net/page)"),
"http://www.example.com/test?p(29)url(http://www.another.net/page)")
self.assertEqual(safe_url_string("http://www.example.com/Brochures_&_Paint_Cards&PageSize=200"),
"http://www.example.com/Brochures_&_Paint_Cards&PageSize=200")
safeurl = safe_url_string(u"http://www.example.com/\xa3", encoding='latin-1')
self.assert_(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%A3")
safeurl = safe_url_string(u"http://www.example.com/\xa3", encoding='utf-8')
self.assert_(isinstance(safeurl, str))
self.assertEqual(safeurl, "http://www.example.com/%C2%A3")
class TestCanonicalizeUrl(unittest.TestCase):
def test_simple_case(self):
self.assertEqual(canonicalize_url("http://www.example.com/"),
"http://www.example.com/")
def test_returns_str(self):
assert isinstance(canonicalize_url(u"http://www.example.com"), str)
def test_append_missing_path(self):
self.assertEqual(canonicalize_url("http://www.example.com"),
"http://www.example.com/")
def test_typical_usage(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?a=1&b=2&c=3"),
"http://www.example.com/do?a=1&b=2&c=3")
self.assertEqual(canonicalize_url("http://www.example.com/do?c=1&b=2&a=3"),
"http://www.example.com/do?a=3&b=2&c=1")
self.assertEqual(canonicalize_url("http://www.example.com/do?&a=1"),
"http://www.example.com/do?a=1")
def test_sorting(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?c=3&b=5&b=2&a=50"),
"http://www.example.com/do?a=50&b=2&b=5&c=3")
def test_keep_blank_values(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&a=2", keep_blank_values=False),
"http://www.example.com/do?a=2")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&a=2"),
"http://www.example.com/do?a=2&b=")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&c&a=2", keep_blank_values=False),
"http://www.example.com/do?a=2")
self.assertEqual(canonicalize_url("http://www.example.com/do?b=&c&a=2"),
"http://www.example.com/do?a=2&b=&c=")
self.assertEqual(canonicalize_url(u'http://www.example.com/do?1750,4'),
'http://www.example.com/do?1750%2C4=')
def test_spaces(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a+space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
self.assertEqual(canonicalize_url("http://www.example.com/do?q=a%20space&a=1"),
"http://www.example.com/do?a=1&q=a+space")
def test_normalize_percent_encoding_in_path(self):
self.assertEqual(canonicalize_url("http://www.example.com/a%a3do"),
"http://www.example.com/a%A3do"),
def test_normalize_percent_encoding_in_query_arguments(self):
self.assertEqual(canonicalize_url("http://www.example.com/do?k=b%a3"),
"http://www.example.com/do?k=b%A3")
def test_non_ascii_percent_encoding_in_path(self):
self.assertEqual(canonicalize_url("http://www.example.com/a do?a=1"),
"http://www.example.com/a%20do?a=1"),
self.assertEqual(canonicalize_url("http://www.example.com/a %20do?a=1"),
"http://www.example.com/a%20%20do?a=1"),
self.assertEqual(canonicalize_url("http://www.example.com/a do\xc2\xa3.html?a=1"),
"http://www.example.com/a%20do%C2%A3.html?a=1")
def test_non_ascii_percent_encoding_in_query_argument(self):
self.assertEqual(canonicalize_url(u"http://www.example.com/do?price=\xa3500&a=5&z=3"),
u"http://www.example.com/do?a=5&price=%C2%A3500&z=3")
self.assertEqual(canonicalize_url("http://www.example.com/do?price=\xc2\xa3500&a=5&z=3"),
"http://www.example.com/do?a=5&price=%C2%A3500&z=3")
self.assertEqual(canonicalize_url("http://www.example.com/do?price(\xc2\xa3)=500&a=1"),
"http://www.example.com/do?a=1&price%28%C2%A3%29=500")
def test_auth_and_ports(self):
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com:81/do?now=1"),
u"http://user:pass@www.example.com:81/do?now=1")
def test_remove_fragments(self):
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com/do?a=1#frag"),
u"http://user:pass@www.example.com/do?a=1")
self.assertEqual(canonicalize_url(u"http://user:pass@www.example.com/do?a=1#frag", keep_fragments=True),
u"http://user:pass@www.example.com/do?a=1#frag")
def test_dont_convert_safe_chars(self):
self.assertEqual(canonicalize_url(
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html"),
"http://www.simplybedrooms.com/White-Bedroom-Furniture/Bedroom-Mirror:-Josephine-Cheval-Mirror.html")
def test_safe_characters_unicode(self):
# urllib.quote uses a mapping cache of encoded characters. when parsing
# an already percent-encoded url, it will fail if that url was not
# percent-encoded as utf-8, that's why canonicalize_url must always
# convert the urls to string. the following test asserts that
# functionality.
self.assertEqual(canonicalize_url(u'http://www.example.com/caf%E9-con-leche.htm'),
'http://www.example.com/caf%E9-con-leche.htm')
def test_domains_are_case_insensitive(self):
self.assertEqual(canonicalize_url("http://www.EXAMPLE.com/"),
"http://www.example.com/")
def test_quoted_slash_and_question_sign(self):
self.assertEqual(canonicalize_url("http://foo.com/AC%2FDC+rocks%3f/?yeah=1"),
"http://foo.com/AC%2FDC+rocks%3F/?yeah=1")
self.assertEqual(canonicalize_url("http://foo.com/AC%2FDC/"),
"http://foo.com/AC%2FDC/")
| 49.964646
| 114
| 0.610836
|
507097d6242b3861342f22d46deb67628b3095e5
| 3,356
|
py
|
Python
|
suji/kansuji.py
|
jikyo/suji4p
|
00b7f6f0ac088810cbb379f6a1687db7d329a996
|
[
"Apache-2.0"
] | 3
|
2021-02-16T10:53:33.000Z
|
2021-09-21T07:55:09.000Z
|
suji/kansuji.py
|
jikyo/suji4p
|
00b7f6f0ac088810cbb379f6a1687db7d329a996
|
[
"Apache-2.0"
] | 1
|
2021-02-15T11:19:43.000Z
|
2021-02-24T07:29:03.000Z
|
suji/kansuji.py
|
jikyo/suji4p
|
00b7f6f0ac088810cbb379f6a1687db7d329a996
|
[
"Apache-2.0"
] | null | null | null |
""" The entry point to the kansuji methods.
This provides a method to convert from the mixed japanese number notation to Kansuji string value,
and a method to convert from mixed number notations to Kansuji string.
"""
from suji.converter import values
class Kansuji:
__zero = '零'
__minus = 'マイナス'
__number = {
1: '一',
2: '二',
3: '三',
4: '四',
5: '五',
6: '六',
7: '七',
8: '八',
9: '九',
}
__radic = [
10000000000000000,
1000000000000,
100000000,
10000,
1000,
100,
10,
]
__radic_kanji = [
'京',
'兆',
'億',
'万',
'千',
'百',
'十',
]
# __before_the_decimal_point = '割'
# __radic_adp = [
# 0.1,
# 0.01,
# 0.001,
# ]
# __radic_adp_kanji = [
# '分',
# '厘',
# '毛',
# ]
@staticmethod
def __value(v, index, one):
if v == 0:
return ''
if v < 0:
return Kansuji.__minus + Kansuji.__value(-1 * v, index, one)
if v in Kansuji.__number:
return Kansuji.__number[v]
if len(Kansuji.__radic) <= index:
if v in Kansuji.__number:
return Kansuji.__number[v]
else:
return ''
nd = divmod(v, Kansuji.__radic[index])
if nd[0] == 0:
return Kansuji.__value(v, index + 1, one)
prefix = ''
if nd[0] != 1:
prefix = Kansuji.__value(nd[0], index + 1, one)
elif one:
prefix = Kansuji.__number[1]
return prefix \
+ Kansuji.__radic_kanji[index] \
+ Kansuji.__value(nd[1], index + 1, one)
@staticmethod
def value(v, one=True):
v = int(v)
if v == 0:
return Kansuji.__zero
return Kansuji.__value(v, 0, one)
def kansujis(src, one=True):
""" Convert from mixed Japanese number notations to Knasuji string values.
The return value is a list of Kansuji value objects.
If the input string has no number notation, `kansujis` returns a empty list.
The result object has three keys: `val`, `beg`, and `end`:
:val: the string value of the Kansuji notation.
:beg: the start postion of the found number notation at the input string.
:end: the end postion of the found number notation.
:param src: a input string.
:param one: a boolean flag for the display `ichi`. Default is True.
:return: a list of the Kansuji value objects.
"""
val = values(src)
for v in val:
v['val'] = Kansuji.value(v['val'], one)
return val
def kansuji(src, one=True):
""" Convert from mixed number notations to Kansuji string.
The return value is a converted str.
If the input string has no number notation, `kansuji` returns the input str.
:param src: a input string.
:param one: a boolean flag for the display `ichi`. Default is True.
:return: a converted str.
"""
vals = values(src)
if 0 == len(vals):
return src
start = 0
s = ''
for v in vals:
s += src[start:v['beg']]
s += Kansuji.value(v['val'], one)
start = v['end']
s += src[start:len(src)]
return s
__all__ = ['kansujis', 'kansuji']
| 23.633803
| 98
| 0.532181
|
5ec556c10c387c722a187725bdfd445dd0e002a0
| 1,738
|
py
|
Python
|
python_src/2020_08_august_contest/day_23.py
|
BurnySc2/leetcode-solutions
|
db86cc945f81d2b84891c5446f4887253011890e
|
[
"MIT"
] | null | null | null |
python_src/2020_08_august_contest/day_23.py
|
BurnySc2/leetcode-solutions
|
db86cc945f81d2b84891c5446f4887253011890e
|
[
"MIT"
] | 1
|
2021-08-01T09:56:31.000Z
|
2021-08-01T09:56:31.000Z
|
python_src/2020_08_august_contest/day_23.py
|
BurnySc2/leetcode-solutions
|
db86cc945f81d2b84891c5446f4887253011890e
|
[
"MIT"
] | null | null | null |
from typing import List, Dict
from collections import deque
# TOO SLOW
class StreamChecker:
def __init__(self, words: List[str]):
self.words = words
self.keep_track = deque()
self.word_counter: Dict[str, List[str]] = {word: list(word) for word in words}
self.longest_word = max(len(i) for i in words)
self.words_ending_with_char = {}
for i in words:
char = i[-1]
if char not in self.words_ending_with_char:
self.words_ending_with_char[char] = [i]
else:
self.words_ending_with_char[char].append(i)
def query(self, letter: str) -> bool:
self.keep_track.append(letter)
if len(self.keep_track) > self.longest_word:
self.keep_track.popleft()
for word in self.words_ending_with_char.get(letter, []):
if word[-1] == letter:
word_as_list = self.word_counter[word]
if len(self.keep_track) >= len(word) and all(
i == j for i, j in zip(word_as_list[::-1], reversed(self.keep_track))
):
return True
return False
if __name__ == "__main__":
# fmt: off
s = StreamChecker(["ab", "ba", "aaab", "abab", "baa"])
q = ["a","a","a","a","a","b","a","b","a","b","b","b","a","b","a","b","b","b","b","a","b","a","b","a","a","a","b","a","a","a"]
expected = [False,False,False,False,False,True,True,True,True,True,False,False,True,True,True,True,False,False,False,True,True,True,True,True,True,False,True,True,True,False]
# fmt: on
for query, expect in zip(q, expected):
value = s.query(query)
print(query, value, expect)
assert value is expect
| 39.5
| 178
| 0.567319
|
81fd8d5ff99dac0ea700184d0817a4a885871b75
| 1,832
|
py
|
Python
|
python/src/nnabla/backward_function/clip_grad_by_value.py
|
sdonatti/nnabla
|
ac4a42e62dd358f16bd79c08a9a9f3d83c0100c9
|
[
"Apache-2.0"
] | 1
|
2020-08-03T12:49:19.000Z
|
2020-08-03T12:49:19.000Z
|
python/src/nnabla/backward_function/clip_grad_by_value.py
|
sdonatti/nnabla
|
ac4a42e62dd358f16bd79c08a9a9f3d83c0100c9
|
[
"Apache-2.0"
] | 1
|
2020-11-09T07:33:29.000Z
|
2020-11-09T07:33:29.000Z
|
python/src/nnabla/backward_function/clip_grad_by_value.py
|
sdonatti/nnabla
|
ac4a42e62dd358f16bd79c08a9a9f3d83c0100c9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
from .backward_function import BackwardFunction
class ClipGradByValueBackward(BackwardFunction):
@property
def name(self):
return 'ClipGradByValueBackward'
def _create_forward_inputs_and_outputs(self, inputs, outputs):
# Inputs on the forward graph
inputs_fwd = []
for i in range(self._num_inputs_fwd):
need_grad = self.forward_func.inputs[i].need_grad
v = nn.Variable(inputs[i].shape, need_grad=need_grad)
v.data = inputs[i].data
v.grad = outputs[i].data
inputs_fwd += [v]
# Outputs on the forward graph
outputs_fwd = []
for i in range(self._num_outputs_fwd):
inp = inputs[self._num_inputs_fwd + i]
v = nn.Variable(inp.shape)
v.grad = inp.data
outputs_fwd += [v]
return inputs_fwd, outputs_fwd
def backward_impl(self, inputs, outputs, prop_down, accum):
# inputs: [inputs_fwd_graph] + [inputs_bwd_graph] or
# [inputs_fwd_graph] + [outputs_fwd_graph] + [inputs_bwd_graph]
raise NotImplementedError(
"The backward method of ClipGradByValueBackward class is not implemented.")
| 37.387755
| 87
| 0.679039
|
05b0b553be8e24e6ac5375afb2e581a459beef8a
| 1,372
|
py
|
Python
|
autobahntestsuite/autobahntestsuite/case/case5_2.py
|
rishabh-bector/autobahn-testsuite
|
57030060630c10b22be44774973eaa61987b716c
|
[
"Apache-2.0"
] | 595
|
2015-10-20T09:01:18.000Z
|
2022-03-28T08:48:27.000Z
|
autobahntestsuite/autobahntestsuite/case/case5_2.py
|
rishabh-bector/autobahn-testsuite
|
57030060630c10b22be44774973eaa61987b716c
|
[
"Apache-2.0"
] | 73
|
2015-12-03T14:21:56.000Z
|
2022-02-05T01:53:05.000Z
|
autobahntestsuite/autobahntestsuite/case/case5_2.py
|
rishabh-bector/autobahn-testsuite
|
57030060630c10b22be44774973eaa61987b716c
|
[
"Apache-2.0"
] | 65
|
2015-11-04T15:58:37.000Z
|
2022-02-09T03:49:24.000Z
|
###############################################################################
##
## Copyright (c) Crossbar.io Technologies GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from case import Case
class Case5_2(Case):
DESCRIPTION = """Send Pong fragmented into 2 fragments."""
EXPECTATION = """Connection is failed immediately, since control message MUST NOT be fragmented."""
def onOpen(self):
self.expected[Case.OK] = []
self.expectedClose = {"closedByMe":False,"closeCode":[self.p.CLOSE_STATUS_CODE_PROTOCOL_ERROR],"requireClean":False}
self.p.sendFrame(opcode = 10, fin = False, payload = "fragment1")
self.p.sendFrame(opcode = 0, fin = True, payload = "fragment2")
self.p.killAfter(1)
| 41.575758
| 123
| 0.610787
|
6ce8d9e7e39fcdea894ebc81df888ae15bbc04a3
| 1,298
|
py
|
Python
|
Lib/site-packages/wagtailimportexport/forms.py
|
nateonmission/nateonmission.com
|
c7c5078ca2160fa97cb6cdc0497438547f82ae41
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/wagtailimportexport/forms.py
|
nateonmission/nateonmission.com
|
c7c5078ca2160fa97cb6cdc0497438547f82ae41
|
[
"bzip2-1.0.6"
] | null | null | null |
Lib/site-packages/wagtailimportexport/forms.py
|
nateonmission/nateonmission.com
|
c7c5078ca2160fa97cb6cdc0497438547f82ae41
|
[
"bzip2-1.0.6"
] | null | null | null |
from django import forms
from django.utils.translation import ugettext as _
try:
from wagtail.admin.widgets import AdminPageChooser
from wagtail.core.models import Page
except ImportError: # fallback for Wagtail <2.0
from wagtail.wagtailadmin.widgets import AdminPageChooser
from wagtail.wagtailcore.models import Page
class ImportFromAPIForm(forms.Form):
source_page_id = forms.IntegerField()
source_site_base_url = forms.URLField()
parent_page = forms.ModelChoiceField(
queryset=Page.objects.all(),
widget=AdminPageChooser(can_choose_root=True, user_perms='copy_to'),
label=_("Destination parent page"),
help_text=_("Imported pages will be created as children of this page.")
)
class ImportFromFileForm(forms.Form):
file = forms.FileField(label=_("File to import"))
parent_page = forms.ModelChoiceField(
queryset=Page.objects.all(),
widget=AdminPageChooser(can_choose_root=True, user_perms='copy_to'),
label=_("Destination parent page"),
help_text=_("Imported pages will be created as children of this page.")
)
class ExportForm(forms.Form):
root_page = forms.ModelChoiceField(
queryset=Page.objects.all(),
widget=AdminPageChooser(can_choose_root=True),
)
| 34.157895
| 79
| 0.724191
|
359883fbfbbc5abcbce84f615b78aa3d0b55511d
| 2,246
|
py
|
Python
|
preprocess.py
|
Huy-Ngo/dependency-size
|
791b9a8cc3ae632d702ab0d12f13556698018a44
|
[
"MIT"
] | null | null | null |
preprocess.py
|
Huy-Ngo/dependency-size
|
791b9a8cc3ae632d702ab0d12f13556698018a44
|
[
"MIT"
] | 7
|
2020-12-15T14:05:06.000Z
|
2021-01-13T09:52:52.000Z
|
preprocess.py
|
Huy-Ngo/dependency-size
|
791b9a8cc3ae632d702ab0d12f13556698018a44
|
[
"MIT"
] | null | null | null |
from urllib3 import PoolManager, disable_warnings
disable_warnings()
import json
import pprint
import requirements
def request_pkg_info(pkg_name, pkg_ver = ''):
"""Get json response from pypi
Parameters:
pkg_name : str
pkg_ver : str
Returns:
json/dict
"""
url = ''
if pkg_ver == '':
url = f'https://pypi.org/pypi/{pkg_name}/json'
else:
url = f'https://pypi.org/pypi/{pkg_name}/{pkg_ver}/json'
http = PoolManager()
r = http.request('GET', url)
r_json = json.loads(r.data.decode('utf-8'))
return r_json
def get_basic_info(pkg_info, filename='', requires_format=True):
"""Get basic information such as name, requires_dist, size
from what request_pkg_info() returns
Parameters:
pkg_info : json/dict
filename : str
requires_format : bool
Returns:
json/dict
Notes:
For `requires_dist` key, it is a list of tuples.
Each tuple is a requirement of package.
First element of that tuple is pkg_name
Second element of that tuple is a list of conditions as tuples.
Examples:
Try to run this file and see the output in stdout.
"""
basic_info = {}
basic_info['name'] = pkg_info['info']['name']
if requires_format and pkg_info['info']['requires_dist'] is not None:
req_dist = [ite for ite in pkg_info['info']['requires_dist'] if 'extra ' not in ite]
basic_info['requires_dist'] = [(ite.name, ite.specs) for ite in
requirements.parse("\n".join(req_dist))]
else:
basic_info['requires_dist'] = pkg_info['info']['requires_dist']
if filename == '':
basic_info['size'] = pkg_info['urls'][0]['size']
else:
for url_info in pkg_info['urls']:
if url_info['filename'] == filename:
basic_info['size'] = url_info['size']
return basic_info
def lazy_get_pkg(pkg_name, pkg_ver = '', filename='', requires_format=True):
return get_basic_info(request_pkg_info(pkg_name, pkg_ver),
filename, requires_format)
if __name__ == '__main__':
pkg_info = lazy_get_pkg('requests')
pprint.PrettyPrinter(indent=2).pprint(pkg_info)
| 28.43038
| 92
| 0.625557
|
535a243224a9cf101dbd3e2441ccde5a3c2b1349
| 375
|
py
|
Python
|
pythonlatex/__init__.py
|
jordyril/PythonLaTeX
|
547c699b7f01ab90a023893f7e6a9736e78534e5
|
[
"MIT"
] | null | null | null |
pythonlatex/__init__.py
|
jordyril/PythonLaTeX
|
547c699b7f01ab90a023893f7e6a9736e78534e5
|
[
"MIT"
] | null | null | null |
pythonlatex/__init__.py
|
jordyril/PythonLaTeX
|
547c699b7f01ab90a023893f7e6a9736e78534e5
|
[
"MIT"
] | null | null | null |
"""
A library complementing/building upon the package
'pylatex' by Jelte Fennema (see https://github.com/JelteF/PyLaTeX for the original package)
.. :copyright: (c) 2019 by Jordy Rillaerts.
:license: MIT, see License for more details.
"""
from .figure import Figure, SubFigure
from .table import Table
from .saving import LatexSaving
from .value_old import LatexValue
| 31.25
| 91
| 0.762667
|
d3f8337903e468d168279143c8c57c038bb31e29
| 861
|
py
|
Python
|
distribution_procedures/__init__.py
|
Art-Ev/AequilibraE-GUI
|
0c6ea37dcb5079cca499a4e17f0f96586c887be7
|
[
"MIT"
] | 22
|
2018-08-31T13:05:07.000Z
|
2021-11-18T08:50:26.000Z
|
distribution_procedures/__init__.py
|
Art-Ev/AequilibraE-GUI
|
0c6ea37dcb5079cca499a4e17f0f96586c887be7
|
[
"MIT"
] | 65
|
2018-06-30T18:20:02.000Z
|
2022-03-21T04:51:43.000Z
|
distribution_procedures/__init__.py
|
Art-Ev/AequilibraE-GUI
|
0c6ea37dcb5079cca499a4e17f0f96586c887be7
|
[
"MIT"
] | 6
|
2018-10-18T23:16:32.000Z
|
2022-02-10T13:39:18.000Z
|
"""
-----------------------------------------------------------------------------------------------------------
Package: AequilibraE
Name: QGIS distribution dialogs and procedures initializer
Purpose:
Original Author: Pedro Camargo (c@margo.co)
Contributors:
Last edited by: Pedro Camargo
Website: www.AequilibraE.com
Repository: https://github.com/AequilibraE/AequilibraE
Created: 2016-10-30
Updated: 2018-08-08
Copyright: (c) AequilibraE authors
Licence: See LICENSE.TXT
-----------------------------------------------------------------------------------------------------------
"""
from .distribution_models_dialog import DistributionModelsDialog
from .ipf_procedure import IpfProcedure
from .calibrate_gravity_procedure import CalibrateGravityProcedure
from .apply_gravity_procedure import ApplyGravityProcedure
| 34.44
| 108
| 0.586527
|
291cf3015459404140adaf6df24ea49d5a77aa6e
| 169
|
py
|
Python
|
community_library/delivery_management/apps.py
|
kiruthihan10/Community-Library
|
d2f68e27e14586e7a2bafab42024e1e9c57834f1
|
[
"MIT"
] | null | null | null |
community_library/delivery_management/apps.py
|
kiruthihan10/Community-Library
|
d2f68e27e14586e7a2bafab42024e1e9c57834f1
|
[
"MIT"
] | null | null | null |
community_library/delivery_management/apps.py
|
kiruthihan10/Community-Library
|
d2f68e27e14586e7a2bafab42024e1e9c57834f1
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class DeliveryManagementConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'delivery_management'
| 24.142857
| 56
| 0.792899
|
1d3890e348d6a74e2c24fa00e3fe92a19c2bbfdd
| 1,597
|
py
|
Python
|
Golem/golem_run.py
|
shyam-chipsoft/Tools
|
bfc5a15f585da429f7e87968e3401a9934eda13b
|
[
"MIT"
] | 5
|
2021-03-25T09:16:28.000Z
|
2021-06-12T06:48:43.000Z
|
Golem/golem_run.py
|
shyam-chipsoft/Tools
|
bfc5a15f585da429f7e87968e3401a9934eda13b
|
[
"MIT"
] | null | null | null |
Golem/golem_run.py
|
shyam-chipsoft/Tools
|
bfc5a15f585da429f7e87968e3401a9934eda13b
|
[
"MIT"
] | 1
|
2021-07-20T05:37:16.000Z
|
2021-07-20T05:37:16.000Z
|
import os
import re
import time
import json
import collections
import copy
from os import listdir
global_settings = ''
golem_settings = []
golems = []
interval = 0
configFile = r'./config.json'
def runCmd(s):
# print s
os.system(s + ' 2>&1')
def loadCfg():
jsonfile = file(configFile)
jsonobj = json.load(jsonfile)
global global_settings
global golem_settings
global golems
global interval
global_settings = jsonobj["global_settings"]
interval = jsonobj["global_settings"]["start_interval_sec"]
for i in range(len(jsonobj["golem_settings"])):
golem_settings.append(jsonobj["golem_settings"][i])
for i in range(len(jsonobj["golems"])):
golems.append(jsonobj["golems"][i])
jsonfile.close
def runRobots():
strPatternQuot = re.compile('\"')
strPatternSpace = re.compile(' ')
global_settings_text = json.dumps(global_settings, separators=(',',':'))
global_settings_text = strPatternQuot.sub('\\"',global_settings_text)
global_settings_text = strPatternSpace.sub('',global_settings_text)
for i in range(len(golems)):
golem_settings_text = json.dumps(golem_settings[golems[i]], separators=(',',':'))
golem_settings_text = strPatternQuot.sub('\\"',golem_settings_text)
golem_settings_text = strPatternSpace.sub('',golem_settings_text)
print("\n############# ROBOT %d #############" %i)
cmd = './Golem --global_settings %s --golem_settings %s &' % (global_settings_text, golem_settings_text)
runCmd(cmd)
time.sleep(interval)
loadCfg()
runRobots()
| 28.017544
| 112
| 0.673137
|
2485e70d3a644b2f5d1501898a0247a909633992
| 1,236
|
py
|
Python
|
examples/test_show_file_choosers.py
|
mdmintz/seleniumspot
|
f5c225aa4fcd0b4124fc990e3892c36736290ce8
|
[
"MIT"
] | 1
|
2022-02-26T15:09:58.000Z
|
2022-02-26T15:09:58.000Z
|
examples/test_show_file_choosers.py
|
mdmintz/seleniumspot
|
f5c225aa4fcd0b4124fc990e3892c36736290ce8
|
[
"MIT"
] | null | null | null |
examples/test_show_file_choosers.py
|
mdmintz/seleniumspot
|
f5c225aa4fcd0b4124fc990e3892c36736290ce8
|
[
"MIT"
] | null | null | null |
"""
self.show_file_choosers() is used to show hidden file-upload fields.
Verify that one can choose a file after the hidden input is visible.
"""
import os
from seleniumbase import BaseCase
class FileUpload(BaseCase):
def test_show_file_choosers(self):
self.open("https://imgbb.com/upload")
choose_file_selector = 'input[type="file"]'
uploaded_image = "#anywhere-upload-queue li.queue-item"
self.assert_element_not_visible(choose_file_selector)
self.show_file_choosers()
self.highlight(choose_file_selector)
self.assert_element(choose_file_selector)
self.assert_attribute(choose_file_selector, "value", "")
self.assert_element_not_visible(uploaded_image)
dir_name = os.path.dirname(os.path.abspath(__file__))
my_file = "screenshot.png"
file_path = os.path.join(dir_name, "example_logs/%s" % my_file)
self.choose_file(choose_file_selector, file_path)
if self.browser != "safari":
seen_path = "%s\\%s" % ("C:\\fakepath", my_file)
self.assert_attribute(choose_file_selector, "value", seen_path)
self.demo_mode = True
self.assert_element(uploaded_image)
| 42.62069
| 76
| 0.679612
|
4c4522fbd0bde8cfe854c3c71b7a51160b73f08f
| 10,799
|
py
|
Python
|
nylon/preprocessing/preprocessing.py
|
admariner/nylon
|
a66cc594b83f50bcc38bd199382755ffbc99cfb8
|
[
"MIT"
] | 80
|
2021-06-14T15:30:17.000Z
|
2022-01-27T03:56:43.000Z
|
nylon/preprocessing/preprocessing.py
|
admariner/nylon
|
a66cc594b83f50bcc38bd199382755ffbc99cfb8
|
[
"MIT"
] | 24
|
2021-06-14T17:29:27.000Z
|
2021-08-09T14:43:40.000Z
|
nylon/preprocessing/preprocessing.py
|
admariner/nylon
|
a66cc594b83f50bcc38bd199382755ffbc99cfb8
|
[
"MIT"
] | 10
|
2021-06-21T16:30:44.000Z
|
2021-07-17T12:48:32.000Z
|
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import Pipeline
from prince.ca import CA
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from autocorrect import Speller
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import (OneHotEncoder,
StandardScaler,
FunctionTransformer, LabelEncoder)
from pandas.api.types import is_numeric_dtype
import warnings
warnings.filterwarnings('ignore')
def initial_preprocessor(
data,
json_file):
'''
initial preprocessing function that's called in the case that the user doesn't provide a preprocessing option
:param data: dataframe
:param json_file: is the json file with the specifications for preprocessing
:return initially preprocessed dataframe
'''
object_columns = [
col for col,
col_type in data.dtypes.iteritems() if col_type == 'object']
# Handles dates without timestamps
for col in object_columns:
try:
data[col] = pd.to_datetime(data[col], infer_datetime_format=True)
except ValueError:
pass
# get target column
target = json_file['data']['target']
y = data[target]
# remove rows where target is NaN
data = data[y.notna()]
y = y[y.notna()]
del data[target]
df_pre_transform = data.copy()
X_train, X_test, y_train, y_test = train_test_split(
data, y, test_size=0.2, shuffle = json_file['shuffle'])
data = {
'train': pd.concat([X_train], axis=1),
'test': pd.concat([X_test], axis=1)
}
# preprocess the dataset
data, full_pipeline = structured_preprocessor(data)
y_split = {'train' : y_train, 'test' : y_test}
target_transformers = preprocess_y_data(y, y_split, target)
return data, y_split, df_pre_transform, target_transformers
def preprocess_y_data(y_full, y_split, target):
target_transformers = None
if not is_numeric_dtype(y_full):
enc = LabelEncoder()
fitted_enc = enc.fit(y_full)
y_split['train'] = pd.Series(fitted_enc.transform(y_split['train']))
y_split['train'].name = target
y_split['test'] = pd.Series(fitted_enc.transform(y_split['test']))
y_split['test'].name = target
target_transformers = [fitted_enc]
return target_transformers
def structured_preprocessor(data, ca_threshold=0.5, text=[]):
'''
structured preprocessing function that's called in the case that the user doesn't provide a preprocessing option, called after the initial changes
:param data: dataframe
:param ca_threshold: is correspondence analysis threshold for removing columns
:param text: textual columns that require preprocessing by lemmatization, tokenization
:return preprocessed dataframe
'''
# Preprocessing for datetime columns
process_dates(data)
# This will be used inside process_text once complete
if len(text) > 0:
text_preprocessing(data, text)
# identifies the categorical and numerical columns
categorical_columns = data['train'].select_dtypes(
exclude=["number"]).columns
numeric_columns = data['train'].columns[data['train'].dtypes.apply(
lambda c: np.issubdtype(c, np.number))]
# Removes text columns from categorical columns to use in separate pipeline
categorical_columns = [
cat_cols for cat_cols in categorical_columns if cat_cols not in text]
full_pipeline = ColumnTransformer([], remainder="passthrough")
if len(numeric_columns) != 0:
# pipeline for numeric columns
num_pipeline = Pipeline([
('imputer', SimpleImputer(strategy="median")),
('std_scaler', StandardScaler())
])
full_pipeline.transformers.append(
("num", num_pipeline, numeric_columns))
if len(text) != 0:
# Each text col needs a separate pipeline
for x in range(len(text)):
full_pipeline.transformers.append(
(f"text_{x}",
Pipeline(
[
('test',
FunctionTransformer(
lambda x: np.reshape(
x.to_numpy(),
(-1,
1)))),
('imputer',
SimpleImputer(
strategy="constant",
fill_value="")),
('raveler',
FunctionTransformer(
lambda x: x.ravel(),
accept_sparse=True)),
('vect',
TfidfVectorizer()),
('densifier',
FunctionTransformer(
lambda x: x.todense(),
accept_sparse=True)),
('embedder',
FunctionTransformer(
textembedder,
accept_sparse=True))]),
text[x]))
if len(categorical_columns) != 0:
combined = pd.concat([data['train'], data['test']], axis=0)
ca_threshold = combined.shape[0] * \
.25 if ca_threshold is None else combined.shape[0] * ca_threshold
if too_many_values(combined[categorical_columns], ca_threshold):
cat_pipeline = Pipeline([
('imputer', SimpleImputer(strategy="constant", fill_value="")),
('one_hotencoder', OneHotEncoder(handle_unknown='ignore')),
('transformer', FunctionTransformer(lambda x: x.toarray(), accept_sparse=True)),
('ca', CA(n_components=-1))
])
else:
cat_pipeline = Pipeline([
('imputer', SimpleImputer(strategy="constant", fill_value="")),
('label_enconder', LabelEncoder())
])
full_pipeline.transformers.append(
('cat', cat_pipeline, categorical_columns))
train = full_pipeline.fit_transform(data['train'])
train_cols = generate_column_labels(full_pipeline, numeric_columns, text)
test = full_pipeline.transform(data['test'])
test_cols = generate_column_labels(full_pipeline, numeric_columns, text)
# Ternary clause because when running housing.csv,
# the product of preprocessing is np array, but not when using landslide
# data... not sure why
data['train'] = pd.DataFrame(
(train.toarray() if not isinstance(
train,
np.ndarray) else train),
columns=train_cols)
data['test'] = pd.DataFrame(
(test.toarray() if not isinstance(
test,
np.ndarray) else test),
columns=test_cols)
return data, full_pipeline
def process_dates(data):
'''
function to process dates
:param data: dataframe
:return dataframe with dates preprocessed
'''
for df in data:
df = data[df]
datetime_cols = df.select_dtypes('datetime64')
for col in datetime_cols:
df[f'{col}_DayOfWeek'] = df[col].dt.day_name()
df[f'{col}_Year'] = df[col].dt.year
df[f'{col}_Month'] = df[col].dt.month_name()
df[f'{col}_MonthDay'] = df[col].dt.day
del df[col]
# Preprocesses text for word embedding
def text_preprocessing(data, text_cols):
'''
function to process text
:param data: dataframe
:param text_cols: are the text columns that can be preprocessed
:return dataframe with dates preprocessed
'''
lemmatizer = WordNetLemmatizer()
combined = pd.concat([data['train'], data['test']], axis=0)
spell = Speller(fast=True)
for col in text_cols:
combined[col] = combined[col].apply(
lambda x: x.lower() if isinstance(x, str) else x)
stop_words = set(stopwords.words('english'))
for col in text_cols:
preprocessed_text = []
for words in combined[col]:
if words is not np.nan:
words = word_tokenize(words)
words = [word for word in words if word.isalpha()]
words = [word for word in words if word not in stop_words]
words = [spell(word) for word in words]
words = [lemmatizer.lemmatize(word) for word in words]
preprocessed_text.append(' '.join(words))
else:
preprocessed_text.append(np.nan)
combined[col] = preprocessed_text
data['train'] = combined.iloc[:len(data['train'])]
data['test'] = combined.iloc[len(data['train']):]
def textembedder(text):
'''
function to embed textual columns
:param text: the text columns
:return the text in embedded columns
'''
total = list()
for i in text:
total.append(np.sum(i))
return np.reshape(total, (-1, 1))
# Sees if one hot encoding occurred, if not just uses numeric cols
def generate_column_labels(full_pipeline, numeric_cols, text_cols):
# Check if one hot encoding was performed
if 'cat' in full_pipeline.named_transformers_:
# If ca was used
if isinstance(full_pipeline.named_transformers_['cat'][-1], CA):
ca = full_pipeline.named_transformers_['cat'][-1]
encoded_cols = [f'CA_{x}' for x in range(len(ca.eigenvalues_))]
cols = [*list(numeric_cols), *encoded_cols, *text_cols]
else:
try:
encoded_cols = full_pipeline.named_transformers_[
'cat']['one_hotencoder'].get_feature_names()
cols = [*list(numeric_cols), *encoded_cols, *text_cols]
except Exception as error:
# For debugging only
(error)
cols = list(numeric_cols)
return cols
else:
return [*list(numeric_cols), *text_cols]
# Method to calculate how many columns the data set will
# have after one hot encoding
# Decides whether CA is needed or not essentially
# mca_threshold is the len of the dataset * .25 to calculate the proportion of
# when to apply CA
def too_many_values(data, ca_threshold):
total_unique = 0
for col in data:
if total_unique > ca_threshold:
return True
# Use value_counts() due to same columns having strings and floats
total_unique += len(data[col].value_counts())
return False
| 34.39172
| 150
| 0.602278
|
6b7425f79dff273142b8fe3b553aa948595ab192
| 4,408
|
py
|
Python
|
test/functional/interface_bitcoin_cli.py
|
brebcoin/brebcoin
|
17829cdd63d79e8950da03d9c4ff16a4dd847875
|
[
"MIT"
] | null | null | null |
test/functional/interface_bitcoin_cli.py
|
brebcoin/brebcoin
|
17829cdd63d79e8950da03d9c4ff16a4dd847875
|
[
"MIT"
] | null | null | null |
test/functional/interface_bitcoin_cli.py
|
brebcoin/brebcoin
|
17829cdd63d79e8950da03d9c4ff16a4dd847875
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test brebcoin-cli"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie
class TestBitcoinCli(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
cli_response = self.nodes[0].cli("-version").send_cli()
assert("Brebcoin Core RPC client version" in cli_response)
self.log.info("Compare responses from getwalletinfo RPC and `brebcoin-cli getwalletinfo`")
if self.is_wallet_compiled():
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info("Compare responses from getblockchaininfo RPC and `brebcoin-cli getblockchaininfo`")
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
user, password = get_auth_cookie(self.nodes[0].datadir)
self.log.info("Test -stdinrpcpass option")
assert_equal(0, self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input=password).getblockcount())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input="foo").echo)
self.log.info("Test -stdin and -stdinrpcpass")
assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input="foo").echo)
self.log.info("Test connecting to a non-existing server")
assert_raises_process_error(1, "Could not connect to the server", self.nodes[0].cli('-rpcport=1').echo)
self.log.info("Test connecting with non-existing RPC cookie file")
assert_raises_process_error(1, "Could not locate RPC credentials", self.nodes[0].cli('-rpccookiefile=does-not-exist', '-rpcpassword=').echo)
self.log.info("Make sure that -getinfo with arguments fails")
assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help)
self.log.info("Compare responses from `brebcoin-cli -getinfo` and the RPCs data is retrieved from.")
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
if self.is_wallet_compiled():
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
if self.is_wallet_compiled():
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test")
if self.is_wallet_compiled():
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
# unlocked_until is not tested because the wallet is not encrypted
if __name__ == '__main__':
TestBitcoinCli().main()
| 55.1
| 160
| 0.695554
|
97caf7a7584269ae9301e26ea3de8b2ab1f000dd
| 4,354
|
py
|
Python
|
dc_checking/ldgplot.py
|
yuemning/dc-bucket-elim
|
c962b8fc638efbacbabbdf8c6f170c5a11be072d
|
[
"MIT"
] | 2
|
2020-08-24T08:37:56.000Z
|
2020-10-02T02:37:12.000Z
|
dc_checking/ldgplot.py
|
yuemning/dc-bucket-elim
|
c962b8fc638efbacbabbdf8c6f170c5a11be072d
|
[
"MIT"
] | null | null | null |
dc_checking/ldgplot.py
|
yuemning/dc-bucket-elim
|
c962b8fc638efbacbabbdf8c6f170c5a11be072d
|
[
"MIT"
] | 1
|
2020-11-24T20:40:42.000Z
|
2020-11-24T20:40:42.000Z
|
import matplotlib.pyplot as plt
import networkx as nx
import math
def distance(pos1, pos2):
return math.sqrt((pos1[0] - pos2[0])**2 + (pos1[1] - pos2[1])**2)
class LDGPlot():
"""Plotting tool for labeled distance graph.
Given a reference to labeled distance graph LDG, plot the graph.
Assumes LDG is unmodified during initialization.
"""
def __init__(self, g):
self.ldg = g
# Find uncontrollable nodes
self.u_nodes = set()
for e in self.ldg.edges(data=True):
_, _, data = e
if data['labelType'] is not None:
self.u_nodes.add(data['label'])
self.c_nodes = set(self.ldg.nodes()).difference(self.u_nodes)
# Compute layout
# self.pos = nx.layout.spring_layout(self.ldg)
# self.pos = nx.random_layout(self.ldg)
self.pos = nx.planar_layout(self.ldg)
self.xmin = None
self.xmax = None
self.ymin = None
self.ymax = None
# Parameters
self.curve_ratio = 0.2
def plot(self):
"""Plot the current LDG.
Notice that a node can have attributes 'color'.
And an edge can have attributes 'color', 'linewidth' and 'linestyle'.
"""
pos = self.pos
curr_nodes = set(self.ldg.nodes())
c_nodes = list(curr_nodes.intersection(self.c_nodes))
u_nodes = list(curr_nodes.intersection(self.u_nodes))
labels = {}
node2data = {}
for v, data in self.ldg.nodes(data=True):
labels[v] = v
node2data[v] = data
node_color = [node2data[v]['color'] if 'color' in node2data[v] else 'w' for v in c_nodes]
nx.draw_networkx_nodes(self.ldg, pos, nodelist=c_nodes, node_shape = 'o', node_color = node_color, node_size = 250, alpha = 1, linewidths=1, edgecolors= 'black')
node_color = [node2data[v]['color'] if 'color' in node2data[v] else 'w' for v in u_nodes]
nx.draw_networkx_nodes(self.ldg, pos, nodelist=u_nodes, node_shape = 's', node_color = node_color, node_size = 250, alpha = 1, linewidths=1, edgecolors= 'black')
nx.draw_networkx_labels(self.ldg, pos, labels, font_size=10)
ax = plt.gca()
for e in self.ldg.edges(data=True, keys=True):
s, t, key, data = e
linestyle = '-'
if 'linestyle' in data:
linestyle = data['linestyle']
color = 'black'
if 'color' in data:
color = data['color']
linewidth = 1
if 'linewidth' in data:
linewidth = data['linewidth']
ax.annotate("",
xy=pos[t], xycoords='data',
xytext=pos[s], textcoords='data',
arrowprops=dict(arrowstyle="->", color=color,
linestyle=linestyle,
linewidth=linewidth,
shrinkA=8, shrinkB=8,
patchA=None, patchB=None,
connectionstyle="arc3,rad=rrr".replace('rrr',str(self.curve_ratio*key + self.curve_ratio)
),
),
)
weight = data['weight']
label = ""
if data['labelType'] == 'lower':
label = "L:" + data['label'] + ":"
elif data['labelType'] == 'upper':
label = "U:" + data['label'] + ":"
pos_distance = distance(pos[s], pos[t])
pos_delta = pos[t] - pos[s]
pos_label = (pos[s] + pos[t]) /2
sine = pos_delta[1] / pos_distance
cosine = pos_delta[0] / pos_distance
half_distance = (self.curve_ratio * key + self.curve_ratio)/2
pos_offset = [sine * half_distance * pos_distance, -cosine * half_distance * pos_distance]
ax.annotate("{}{}".format(label, weight),
xy=pos_label + pos_offset, xycoords='data')
if self.xmin is not None:
plt.axis('equal')
ax.set(xlim=(self.xmin, self.xmax), ylim=(self.ymin, self.ymax))
else:
self.xmin, self.xmax, self.ymin, self.ymax = plt.axis('equal')
plt.show()
| 39.225225
| 169
| 0.522508
|
2f51b7f1451ae7b2cd94c7303e318f07fedf0a1e
| 17,023
|
py
|
Python
|
registered/routing.py
|
paulswartz/registered
|
3f59f4886d723631d1304503558e0f5ea53a3139
|
[
"MIT"
] | null | null | null |
registered/routing.py
|
paulswartz/registered
|
3f59f4886d723631d1304503558e0f5ea53a3139
|
[
"MIT"
] | null | null | null |
registered/routing.py
|
paulswartz/registered
|
3f59f4886d723631d1304503558e0f5ea53a3139
|
[
"MIT"
] | null | null | null |
"""
Calculate shortest/fastest paths for missing intervals.
"""
from difflib import SequenceMatcher
from itertools import count
import os
import sys
import attr
import folium
import osmnx as ox
import rtree
import shapely
from shapely.geometry import MultiPoint, box
import networkx as nx
from registered.routing_helpers import (
clean_width,
ensure_set,
restrictions_in_polygon,
angle_offset,
cut,
)
DEFAULT_COLORS = ["red", "yellow", "blue", "green"]
USEFUL_NODE_TAGS = []
USEFUL_WAY_TAGS = [
"oneway",
"name",
"highway",
"maxspeed",
"service",
"access",
"width",
"maxheight",
]
def configure_osmnx(**kwargs):
"""
Set configuration for OSMNX.
"""
ox.config(
cache_folder=os.environ.get("OSMNX_CACHE_DIR", "./cache"),
useful_tags_node=USEFUL_NODE_TAGS,
useful_tags_way=USEFUL_WAY_TAGS,
**kwargs,
)
class EmptyGraph(Exception):
"""
Raised if the graph does not have any data.
"""
class NodesCache:
"""
Cache of the nodes Frame.
Currently only used to generate a known-unique ID for a new node.
"""
# pylint: disable=too-few-public-methods
def __init__(self, gdf):
self.counter = count(gdf.index.max() + 1)
def new_id(self):
"""
Return a new ID to use for a newly-created node.
"""
return next(self.counter)
class EdgesCache:
"""
Cache of the edges GeoDataFrame, with some helpful methods for querying/updating.
"""
def __init__(self, gdf):
self.gdf = gdf
self.counter = count()
capacity = int(len(gdf) * 1.1)
props = rtree.index.Property(
index_capacity=capacity,
dimension=2,
variant=rtree.index.RT_Star,
fill_factor=0.9,
)
self.index = rtree.index.Index(
(
(next(self.counter), t.geometry.bounds, t.Index)
for t in gdf.itertuples()
),
properties=props,
)
def nearest_edges(self, point):
"""
Return the nearest edges to the given point.
If more than one edge is closest (and they aren't service roads),
returns the one where the point is on the right (not left) side.
"""
# get a few nearest edges to test, then get the actual closest one
nearest = self.gdf.loc[self.index.nearest(point.bounds, 4, objects="raw")]
distances = nearest["geometry"].map(point.distance, na_action="ignore")
# bias against starting on a motorway
distances.loc[nearest.highway.str.startswith("motorway")] *= 3
if hasattr(point, "description"):
# bias the distance towards more similar names. this helps put
# the point on the right edge, given a description like
# "Washington St @ Blah St".
name_ratio = nearest["name"].map(
lambda x: SequenceMatcher(None, point.description, x).ratio(),
na_action="ignore",
)
if name_ratio.notna().any():
name_ratio = name_ratio.fillna(name_ratio.mean())
distances = distances / name_ratio
min_distance = distances.min() + 1e-6
within_distance = nearest.loc[distances <= min_distance]
if len(within_distance) < 2:
# only one closest edge, return it
return within_distance
if within_distance["highway"].eq("service").all():
# all edges are service roads, so allow going either direction
return within_distance
# otherwise, find which of the multiple edges has the point on the
# right-hand side.
def calc_angle(row):
# might need to be updated if we stare simplifying the graph. in
# that case, we'd need to find the bearing at the projection of
# point on the given geometry. -ps
(tail_x, tail_y) = row["geometry"].coords[0]
angle_bearing = ox.bearing.get_bearing((tail_y, tail_x), (point.y, point.x))
return angle_offset(row["bearing"], angle_bearing)
offset = within_distance.apply(calc_angle, axis=1)
# offsets >0 are on the right-hand side
idx = offset.idxmax()
return within_distance.loc[[idx]]
def geometry(self, from_node, to_node=None):
"""
Return the geometry for the given from/to edge.
"""
if to_node is None:
edge = from_node
else:
edge = (from_node, to_node, 0)
return self.gdf.loc[edge, "geometry"]
def update(self, graph):
"""
Update the cache with the new edges from the given graph.
"""
gdf = ox.graph_to_gdfs(graph, nodes=False)
gdf = gdf.loc[~gdf.index.isin(self.gdf.index)]
self.gdf = self.gdf.append(gdf)
self.gdf.sort_index(inplace=True)
for edge in gdf.itertuples():
self.index.insert(next(self.counter), edge.geometry.bounds, edge.Index)
@attr.s(repr=False)
class RestrictedGraph:
"""
Model a OSM street graph with turn restrictions.
- `graph`: a `nx.MultiDiGraph` representing the primitive graph
- `restricted_nodes`: a Set of node IDs which have a turn restriction
- `restrictions`: a List of (v, {from_osmids}, {to_osmids}) triples which
represent invalid turns
"""
graph = attr.ib()
restricted_nodes = attr.ib(factory=set)
restrictions = attr.ib(factory=list)
def __attrs_post_init__(self):
# pylint: disable=attribute-defined-outside-init
(nodes, edges) = ox.utils_graph.graph_to_gdfs(self.graph)
self._nodes_cache = NodesCache(nodes)
self._edges_cache = EdgesCache(edges)
self._created_nodes = {}
def shortest_path(self, from_point, to_point, weight="travel_time"):
"""
Calculate the shortest path from/to given lat/lon pairs.
The shortest path is either by travel time (default) or by length (weight="length").
"""
orig = self.closest_node(from_point)
dest = self.closest_node(to_point)
try:
(_length, path) = nx.shortest_path_with_turn_restrictions(
self.graph, orig, dest, self.restricted, weight=weight
)
except nx.NetworkXNoPath:
return None
return path
def compass_direction(self, path):
"""
Return the compass direction the path takes at the end.
North = 0, East = 90, South = 180, West = 270
None if the direction is unknown (path is only a single node).
"""
if len(path) < 2:
return None
(second, last) = path[-2:]
attrs = self.graph.edges[second, last, 0]
return attrs["bearing"]
def closest_node(self, point):
"""
Return the ID of the closest node to the given Point.
If there isn't an existing node that's close, find the nearest edges
and split them at the given point, returning the new node ID.
"""
if point.wkb in self._created_nodes:
return self._created_nodes[point.wkb]
ox.utils.log(f"finding closest edge to {point.wkt}")
nearest_edges = self._edges_cache.nearest_edges(point)
snapped_point = shapely.ops.nearest_points(
nearest_edges.iloc[0].geometry, point
)[0]
ox.utils.log(f"snapping {point.wkt} to {snapped_point.wkt}")
name = self._nodes_cache.new_id()
ox.utils.log(f"creating new node {name}")
for nearest_edge in nearest_edges.index:
self.split_edge_at_point(nearest_edge, name, snapped_point)
self._created_nodes[point.wkb] = name
return name
def split_edge_at_point(self, edge, name, point):
"""
Given an edge, the new node ID, and a Point, split the given edge in two at Point.
"""
# 1. create a new node at point
# 2. delete the old edge
# 3. create two new edges, from head to node, and node to tail
ox.utils.log(f"splitting {edge} at {point.wkt}")
(head, tail) = edge[:2]
edge_attrs = self.graph.edges[edge].copy()
ox.utils.log(f"edge OSM ID(s): {edge_attrs['osmid']}")
length = edge_attrs.pop("length")
del edge_attrs["travel_time"]
# simple edges don't have a geometry in the graph, only in the cache
edge_attrs.pop("geometry", None)
geometry = self._edges_cache.geometry(edge)
head_percent = geometry.project(point, normalized=True)
[head_geometry, tail_geometry] = cut(geometry, head_percent)
subgraph = nx.MultiDiGraph(crs=self.graph.graph["crs"])
subgraph.add_node(name, y=point.y, x=point.x, geometry=point)
subgraph.add_node(head, **self.graph.nodes[head])
subgraph.add_node(tail, **self.graph.nodes[tail])
subgraph.add_edge(
head,
name,
**edge_attrs,
length=length * head_percent,
geometry=head_geometry,
)
subgraph.add_edge(
name,
tail,
**edge_attrs,
length=length * (1 - head_percent),
geometry=tail_geometry,
)
return self.update(subgraph, name)
def update(self, subgraph, name):
"""
Updates the current RestrictedGraph with a new node (name) and edges.
Also updates the caches.
"""
subgraph = self.add_graph_features(subgraph)
self.graph.update(subgraph)
self._edges_cache.update(subgraph)
return name
def path_length(self, path):
"""
Returns the length (in meters) of the given path.
"""
return sum(ox.utils_graph.get_route_edge_attributes(self.graph, path, "length"))
def folium_map(self, from_point, to_point, paths, **kwargs):
"""
Create a `folium.Map` with the given from/to points, and optionally some paths.
Returns the map.
"""
route_map = folium.Map(
tiles="https://cdn.mbta.com/osm_tiles/{z}/{x}/{y}.png",
attr="© <a href='http://osm.org/copyright'>OpenStreetMap</a> contributors",
zoom_start=1,
**kwargs,
)
for (path, color) in zip(paths, DEFAULT_COLORS):
locations = [
(row[1], row[0])
for (from_node, to_node) in zip(path, path[1:])
for row in self._edges_cache.geometry(from_node, to_node).coords
]
folium.PolyLine(locations, weight=2, color=color).add_to(route_map)
folium.Marker(
(from_point.y, from_point.x), icon=folium.Icon(icon="play", color="green")
).add_to(route_map)
folium.Marker(
(to_point.y, to_point.x), icon=folium.Icon(icon="stop", color="red")
).add_to(route_map)
[east, north, west, south] = MultiPoint([from_point, to_point]).bounds
route_map.fit_bounds([(north, east), (south, west)])
return route_map
QUERIES = {
# This Overpass API query gets private access roads which don't block
# public service vehicles (PSVs) aka buses.
"drive_service": {"network_type": "drive_service"},
"PSV": {"custom_filter": '["highway"]["access"="private"]["psv"!~"no"]'},
"parking": {"custom_filter": '["highway"]["service"~"parking|parking_aisle"]'},
}
@classmethod
def from_points(cls, points):
"""
Create a RestrictedGraph covering a list of (lat, lon) points.
The polygon covering all the points is generated by finding the
bounding box for the points, then querying the OSM API for that box.
"""
# pylint: disable=protected-access
if not points:
raise EmptyGraph("unable to build graph with no points")
polygon = box(*MultiPoint(list(points)).buffer(0.02).bounds)
graph = nx.MultiDiGraph()
for name, kwargs in cls.QUERIES.items():
try:
ox.utils.log(f"fetching {name} graph")
extra_graph = ox.graph_from_polygon(
polygon,
retain_all=True,
simplify=False,
clean_periphery=False,
truncate_by_edge=False,
**kwargs,
)
except ox._errors.EmptyOverpassResponse:
pass
else:
graph.update(extra_graph)
if len(graph) == 0:
raise EmptyGraph(f"unable to build graph from {polygon.wkt}")
ox.utils.log("fetching restrictions")
(restricted_nodes, restrictions) = restrictions_in_polygon(polygon)
# simplification disabled for now; causes a test failure -ps
# graph = ox.simplification.simplify_graph(graph)
ox.utils.log("adding graph features")
graph = cls.add_graph_features(graph)
return cls(
graph=graph, restricted_nodes=restricted_nodes, restrictions=restrictions
)
@classmethod
def add_graph_features(cls, graph):
"""
Update the given graph with important features.
- width in meters
- speeds in km/h
- travel times
- bearings in degrees
"""
graph = cls.add_widths(graph)
# impute speed on all edges missing data
graph = ox.add_edge_speeds(
graph,
hwy_speeds={
"motorway": 90,
"trunk": 90,
"trunk_link": 60,
"primary": 60,
"secondary": 50,
"tertiary": 30,
"private": 16,
"service": 16,
"residential": 16,
},
)
# calculate travel time (seconds) for all edges
graph = ox.add_edge_travel_times(graph)
# penalize some types of edges
graph = cls.add_edge_penalties(graph)
# add edge bearings
graph = ox.add_edge_bearings(graph)
return graph
@staticmethod
def add_widths(graph):
"""
Add "width_m" and "maxheight_m" to each edge with a width value, normalizing it to meters.
"""
widths = nx.get_edge_attributes(graph, "width")
widths_m = {k: clean_width(v) for (k, v) in widths.items()}
nx.set_edge_attributes(graph, values=widths_m, name="width_m")
maxheights = nx.get_edge_attributes(graph, "maxheight")
maxheights_m = {k: clean_width(v) for (k, v) in maxheights.items()}
nx.set_edge_attributes(graph, values=maxheights_m, name="maxheight_m")
return graph
@classmethod
def add_edge_penalties(cls, graph):
"""
Penalize some edges to reduce their use in routing.
"""
edges = ox.utils_graph.graph_to_gdfs(
graph, nodes=False, fill_edge_geometry=False
)
key = ["travel_time", "length"]
# penalize residential streets
residential = edges["highway"].eq("residential")
edges.loc[residential, key] *= 1.5
# penalize narrow streets
if "width_m" in edges.columns:
narrow = edges["width_m"] < 5
edges.loc[narrow, key] *= 1.5
# heavily penalize edges with a height limit
if "maxheight_m" in edges.columns:
bus_height = 3.7 # ~12ft
edges.loc[
edges["maxheight_m"] < bus_height, ["travel_time", "length"]
] = sys.float_info.max
nx.set_edge_attributes(graph, values=edges["travel_time"], name="travel_time")
nx.set_edge_attributes(graph, values=edges["length"], name="length")
return graph
# pylint: disable=too-many-arguments
def restricted(self, origin, turn, dest, from_attrs, to_attrs):
"""
Return a boolean indicating if the given turn is restricted.
A turn is restricted if there is a `via` relation of `type`
`restriction` and a `restriction` starting with `no_` (like
`no_left_turn` or `no_uturn`)
It is also restricted if the first and last nodes are the same (a
U-turn).
"""
if origin == dest:
# avoid u-turns
return True
from_bearing = from_attrs.get("bearing")
to_bearing = to_attrs.get("bearing")
offset = angle_offset(from_bearing, to_bearing)
if abs(offset) > 135:
# avoid making U-ish turns
return True
if turn not in self.restricted_nodes:
return False
from_ways = ensure_set(from_attrs["osmid"])
to_ways = ensure_set(to_attrs["osmid"])
for (node, invalid_from, invalid_to) in self.restrictions:
if node != turn:
continue
if (invalid_from & from_ways) and (invalid_to & to_ways):
return True
return False
| 33.509843
| 98
| 0.592845
|
ac2bfd3f8ca5020afd95b3b3edc68d8b70d76ccb
| 4,093
|
py
|
Python
|
mmtbx/refinement/real_space/weight.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 155
|
2016-11-23T12:52:16.000Z
|
2022-03-31T15:35:44.000Z
|
mmtbx/refinement/real_space/weight.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 590
|
2016-12-10T11:31:18.000Z
|
2022-03-30T23:10:09.000Z
|
mmtbx/refinement/real_space/weight.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 115
|
2016-11-15T08:17:28.000Z
|
2022-02-09T15:30:14.000Z
|
from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
import mmtbx.refinement.real_space.individual_sites
import random
from six.moves import range
class run(object):
def __init__(
self,
map_data,
xray_structure,
pdb_hierarchy,
geometry_restraints_manager,
gradients_method="fd",
ncs_groups=None,
rms_bonds_limit=0.015,
rms_angles_limit=2.0,
real_space_gradients_delta=1./4,
max_iterations = 100,
range_size=10,
n_ranges=10,
default_weight=50):
"""
Fast determination of optimal data/restraints weight for real-space refinement
of individual sites.
"""
self.msg_strings = []
# split chains into chunks
result = []
for model in pdb_hierarchy.models():
for chain in model.chains():
if(chain.is_protein() or chain.is_na()):
residue_range_sel = flex.size_t()
cntr = 0
for rg in chain.residue_groups():
i_seqs = rg.atoms().extract_i_seq()
cntr += 1
if(cntr<10):
residue_range_sel.extend(i_seqs)
else:
result.append(residue_range_sel)
residue_range_sel = flex.size_t()
residue_range_sel.extend(i_seqs)
cntr = 0
if(len(result)==0):
assert residue_range_sel.size()>0
result.append(residue_range_sel)
self.msg_strings.append("number of chunks: %d"%len(result))
# randomly pick chunks
random_chunks = []
if(len(result)>0):
if len(result) <= n_ranges:
# just try them all, no need to randomize
random_chunks = list(range(len(result)))
else:
while len(random_chunks) <= n_ranges:
# Put only unique choices until got enough lenght.
# Could be slightly slow when len(random_chunks) slightly > n_ranges
rc = random.choice(range(len(result)))
if rc not in random_chunks:
random_chunks.append(rc)
self.msg_strings.append("random chunks:"%random_chunks)
# setup refinery
xrs_dc = xray_structure.deep_copy_scatterers()
sel_all = flex.bool(xrs_dc.scatterers().size(), True)
grm_dc = geometry_restraints_manager.select(sel_all)
ro = mmtbx.refinement.real_space.individual_sites.box_refinement_manager(
xray_structure = xrs_dc,
target_map = map_data,
geometry_restraints_manager = grm_dc.geometry,
real_space_gradients_delta = real_space_gradients_delta,
max_iterations = max_iterations,
ncs_groups = ncs_groups,
gradients_method = gradients_method)
optimal_weights = flex.double()
# loop over chunks: determine best weight for each chunk
if(len(result)==0):
random_chunks = [None]
for chunk in random_chunks:
if(chunk is None): sel = flex.bool(xrs_dc.scatterers().size(), True)
else:
sel = result[chunk]
sel = flex.bool(xrs_dc.scatterers().size(), sel)
ro.refine(
selection = sel,
rms_bonds_limit = rms_bonds_limit,
rms_angles_limit = rms_angles_limit)
self.msg_strings.append("chunk %s optimal weight: %9.4f"%(
str(chunk), ro.weight_optimal))
if(ro.weight_optimal is not None):
optimal_weights.append(ro.weight_optimal)
# select overall best weight
sel = flex.sort_permutation(optimal_weights)
optimal_weights = optimal_weights.select(sel)
self.weight = flex.mean_default(
optimal_weights[:optimal_weights.size()//2], default_weight)
#mean = flex.mean(optimal_weights)
#sel = optimal_weights < mean*3
#sel &= optimal_weights > mean/3
#if(sel.count(True)>0):
# optimal_weights = optimal_weights.select(sel)
#self.weight = flex.mean_default(optimal_weights, default_weight)
self.msg_strings.append("overall best weight: %9.4f"%self.weight)
def show(self, log, prefix=""):
for m in self.msg_strings:
print("%s %s"%(prefix, m), file=log)
| 37.898148
| 78
| 0.645248
|
c4a8203576982b399f56c445eecafc281301430f
| 123
|
py
|
Python
|
CommentManagement/admin.py
|
officialrafsan/travelxpRED
|
2209fba418496c7e64dde8cba271cfb8020856ac
|
[
"MIT"
] | null | null | null |
CommentManagement/admin.py
|
officialrafsan/travelxpRED
|
2209fba418496c7e64dde8cba271cfb8020856ac
|
[
"MIT"
] | null | null | null |
CommentManagement/admin.py
|
officialrafsan/travelxpRED
|
2209fba418496c7e64dde8cba271cfb8020856ac
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import Comment
admin.site.register([Comment])
| 17.571429
| 32
| 0.788618
|
a98c6b223b5351f3333cb0749a3b8a5380ef7968
| 3,749
|
py
|
Python
|
test/websockets/test_poller.py
|
autokrator-uog/backend
|
0a2d46f9b52465ed8dfc9234858d6a93f3754c05
|
[
"MIT"
] | null | null | null |
test/websockets/test_poller.py
|
autokrator-uog/backend
|
0a2d46f9b52465ed8dfc9234858d6a93f3754c05
|
[
"MIT"
] | null | null | null |
test/websockets/test_poller.py
|
autokrator-uog/backend
|
0a2d46f9b52465ed8dfc9234858d6a93f3754c05
|
[
"MIT"
] | 1
|
2019-06-09T23:51:13.000Z
|
2019-06-09T23:51:13.000Z
|
import json
import pytest
from unittest.mock import patch
from endpoints.websockets.poller import NewInfoPollerThread
class TestPoller(object):
@pytest.fixture()
def patch_accounts_service_client(self):
p = patch("endpoints.websockets.poller.AccountsServiceClient")
yield p.start().return_value
p.stop()
@pytest.fixture()
def patch_get_all_account_ids_to_update(self):
p = patch("endpoints.websockets.poller.get_all_account_ids_to_update")
yield p.start()
p.stop()
@pytest.fixture()
def patch_get_socket_for_account_id(self):
p = patch("endpoints.websockets.poller.get_socket_for_account_id")
yield p.start()
p.stop()
@pytest.fixture()
def poller_thread(self, flask_app, patch_accounts_service_client, patch_get_all_account_ids_to_update, patch_get_socket_for_account_id):
return NewInfoPollerThread(flask_app)
def test_poller_nothing_to_poll(self, poller_thread, patch_accounts_service_client, patch_get_all_account_ids_to_update, patch_get_socket_for_account_id):
patch_get_all_account_ids_to_update.return_value = []
poller_thread.poll()
patch_accounts_service_client.get_account_statement.assert_not_called()
patch_get_socket_for_account_id.assert_not_called()
def test_poller_no_change(self, poller_thread, patch_accounts_service_client, patch_get_all_account_ids_to_update, patch_get_socket_for_account_id):
patch_get_all_account_ids_to_update.return_value = [1]
patch_accounts_service_client.get_account_statement.return_value = []
# first poll, sets up
poller_thread.poll()
assert patch_get_all_account_ids_to_update.called
patch_accounts_service_client.get_account_statement.assert_called_with(1)
patch_get_socket_for_account_id.assert_not_called()
# must reset the mocks
patch_get_all_account_ids_to_update.reset_mock()
patch_accounts_service_client.get_account_statement.reset_mock()
patch_get_socket_for_account_id.reset_mock()
# second poll, no change
poller_thread.poll()
assert patch_get_all_account_ids_to_update.called
patch_accounts_service_client.get_account_statement.assert_called_with(1)
patch_get_socket_for_account_id.assert_not_called()
def test_poller_changed(self, poller_thread, patch_accounts_service_client, patch_get_all_account_ids_to_update, patch_get_socket_for_account_id):
patch_get_all_account_ids_to_update.return_value = [1]
patch_accounts_service_client.get_account_statement.return_value = []
# first poll, sets up
poller_thread.poll()
assert patch_get_all_account_ids_to_update.called
patch_accounts_service_client.get_account_statement.assert_called_with(1)
patch_get_socket_for_account_id.assert_not_called()
# must reset the mocks
patch_get_all_account_ids_to_update.reset_mock()
patch_accounts_service_client.get_account_statement.reset_mock()
patch_get_socket_for_account_id.reset_mock()
# set up the change
patch_accounts_service_client.get_account_statement.return_value = [{"Amount": 1234}]
# second poll, should detect change and fire a message on websockets
poller_thread.poll()
assert patch_get_all_account_ids_to_update.called
patch_accounts_service_client.get_account_statement.assert_called_with(1)
patch_get_socket_for_account_id.return_value.send.assert_called_with(json.dumps({
"update_type": "new_statement_item",
"for_account_id": 1,
"data": {
"Amount": 1234
}
}))
| 39.882979
| 158
| 0.750067
|
84636c407610f85147e110c66ef90f189d473e39
| 3,053
|
py
|
Python
|
utils/python-rpc/framework/rpc.py
|
Aifolin/motifcoin
|
82c3c5378240f43e6cfde762c4c2dbc92b645cc3
|
[
"MIT"
] | null | null | null |
utils/python-rpc/framework/rpc.py
|
Aifolin/motifcoin
|
82c3c5378240f43e6cfde762c4c2dbc92b645cc3
|
[
"MIT"
] | null | null | null |
utils/python-rpc/framework/rpc.py
|
Aifolin/motifcoin
|
82c3c5378240f43e6cfde762c4c2dbc92b645cc3
|
[
"MIT"
] | 1
|
2019-08-05T13:04:45.000Z
|
2019-08-05T13:04:45.000Z
|
# Copyright (c) 2018 The Motif Project
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
# THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import requests
import json
class Response(dict):
def __init__(self, d):
for k in d.keys():
if type(d[k]) == dict:
self[k] = Response(d[k])
elif type(d[k]) == list:
self[k] = []
for i in range(len(d[k])):
if type(d[k][i]) == dict:
self[k].append(Response(d[k][i]))
else:
self[k].append(d[k][i])
else:
self[k] = d[k]
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def __eq__(self, other):
if type(other) == dict:
return self == Response(other)
if self.keys() != other.keys():
return False
for k in self.keys():
if self[k] != other[k]:
return False
return True
class JSONRPC(object):
def __init__(self, url):
self.url = url
def send_request(self, path, inputs, result_field = None):
res = requests.post(
self.url + path,
data=json.dumps(inputs),
headers={'content-type': 'application/json'})
res = res.json()
assert 'error' not in res, res
if result_field:
res = res[result_field]
return Response(res)
def send_json_rpc_request(self, inputs):
return self.send_request("/json_rpc", inputs, 'result')
| 36.345238
| 89
| 0.636096
|
f97abad955bb08abe18f04537086fb478104bcbe
| 2,863
|
py
|
Python
|
tests/test_atprogram.py
|
martinabr/pydgilib
|
9e27b11e74518375ae78959a71f896e92a51cdb1
|
[
"BSD-3-Clause"
] | 2
|
2019-04-05T13:27:54.000Z
|
2020-10-09T22:56:22.000Z
|
tests/test_atprogram.py
|
martinabr/pydgilib
|
9e27b11e74518375ae78959a71f896e92a51cdb1
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_atprogram.py
|
martinabr/pydgilib
|
9e27b11e74518375ae78959a71f896e92a51cdb1
|
[
"BSD-3-Clause"
] | 1
|
2019-09-11T07:48:45.000Z
|
2019-09-11T07:48:45.000Z
|
"""This module holds the automated tests for atprogram."""
from atprogram.atprogram import (atprogram, get_device_info, get_project_size)
from os import path, getcwd
import pytest
project_path = path.join(getcwd(), "UnitTest", "UnitTest")
def test_atprogram_simple():
"""test_atprogram_simple."""
assert not atprogram(project_path=project_path,
clean=True, build=True, program=True)
# @pytest.mark.parametrize("verbose", (0, 3))
@pytest.mark.parametrize("verbose", (3,))
# @pytest.mark.parametrize("clean", (True, False))
@pytest.mark.parametrize("clean", (False,))
@pytest.mark.parametrize("build", (True, False))
@pytest.mark.parametrize("erase, program",
[(False, False), (True, False), (True, True)])
@pytest.mark.parametrize("verify", (False,))
# @pytest.mark.parametrize("verify",
# (pytest.param(True, marks=pytest.mark.xfail), False))
@pytest.mark.parametrize("return_output", (True, False))
@pytest.mark.parametrize("dry_run", (True, False))
def test_atprogram(
verbose, clean, build, erase, program, verify, return_output, dry_run):
"""test_atprogram."""
result = atprogram(
project_path=project_path, verbose=verbose, clean=clean, build=build,
erase=erase, program=program, verify=verify,
return_output=return_output, dry_run=dry_run)
assert not result or (return_output and result[-1] == '0')
@pytest.mark.parametrize("make_command, atprogram_command", [
("--version", "--version"),
(None, "--version"),
("--version", None),
pytest.param(None, None, marks=pytest.mark.xfail(
raises=ValueError), strict=True)])
def test_atprogram_command(make_command, atprogram_command):
"""test_atprogram_command."""
assert 0 == atprogram(make_command=make_command,
atprogram_command=atprogram_command)
def test_get_device_info():
"""test_get_device_info."""
info = get_device_info(verbose=2)
assert isinstance(info, dict)
assert info.keys() == set(('Target voltage', 'Device information',
'Memory Information'))
assert 'Name' in info['Device information']
assert info['Memory Information'].keys() == set((['base', 'fuses']))
def test_get_project_size():
"""test_get_project_size."""
atprogram(project_path=project_path, clean=False, build=True,
erase=False, program=False, verify=False)
size = get_project_size(project_path, verbose=2)
assert isinstance(size, dict)
size_keys = ('text', 'data', 'bss', 'dec', 'hex', 'filename')
assert size.keys() == set(size_keys)
for key in size_keys[:-1]:
assert isinstance(size[key], int)
def test_atprogram_invalid_command():
"""test_atprogram_not_a_command."""
assert atprogram(atprogram_command="invalid_command")
| 38.173333
| 80
| 0.670625
|
45a2ce77620071bcb68dc6184bc3c4f73fcdb744
| 986
|
py
|
Python
|
mautrix/util/config/string.py
|
tulir/mautrix-appservice-python
|
d180603445bb0bc465a7b2ff918c4ac28a5dbfc2
|
[
"MIT"
] | 1
|
2018-08-24T13:33:30.000Z
|
2018-08-24T13:33:30.000Z
|
mautrix/util/config/string.py
|
tulir/mautrix-appservice-python
|
d180603445bb0bc465a7b2ff918c4ac28a5dbfc2
|
[
"MIT"
] | 4
|
2018-07-10T11:43:46.000Z
|
2018-09-03T22:08:02.000Z
|
mautrix/util/config/string.py
|
tulir/mautrix-appservice-python
|
d180603445bb0bc465a7b2ff918c4ac28a5dbfc2
|
[
"MIT"
] | 2
|
2018-07-03T04:07:08.000Z
|
2018-09-10T03:13:59.000Z
|
# Copyright (c) 2022 Tulir Asokan
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import annotations
from abc import ABC
import io
from ruamel.yaml import YAML
from ruamel.yaml.comments import CommentedMap
from .base import BaseConfig
from .recursive_dict import RecursiveDict
yaml = YAML()
yaml.indent(4)
yaml.width = 200
class BaseStringConfig(BaseConfig, ABC):
def __init__(self, data: str, base_data: str) -> None:
super().__init__()
self._data = yaml.load(data)
self._base = RecursiveDict(yaml.load(base_data), CommentedMap)
def load(self) -> None:
pass
def load_base(self) -> RecursiveDict[CommentedMap] | None:
return self._base
def save(self) -> str:
buf = io.StringIO()
yaml.dump(self._data, buf)
return buf.getvalue()
| 25.947368
| 70
| 0.690669
|
c3c17b03c67cfe5844a9c17f022e8aac18ff91dc
| 1,752
|
py
|
Python
|
stackapp/utils.py
|
Taycode/teamwave-interview
|
9136920d58e945da750f462be2b3bbc22be0d7b1
|
[
"MIT"
] | null | null | null |
stackapp/utils.py
|
Taycode/teamwave-interview
|
9136920d58e945da750f462be2b3bbc22be0d7b1
|
[
"MIT"
] | 6
|
2020-06-05T20:34:24.000Z
|
2021-06-10T18:17:05.000Z
|
stackapp/utils.py
|
Taycode/teamwave-interview
|
9136920d58e945da750f462be2b3bbc22be0d7b1
|
[
"MIT"
] | null | null | null |
import requests
from datetime import datetime
class StackAPIQuestion:
tags = []
owner = {}
is_answered = False
view_count = None
answer_count = None
score = None
last_activity_date = None
creation_date = None
last_edit_date = None
question_id = None
link = None
title = None
def __init__(self, data):
self.tags = data['tags']
self.owner = data['owner']
self.is_answered = data['is_answered']
self.view_count = data['view_count']
self.answer_count = data['answer_count']
self.score = data['score']
self.last_activity_date = data['last_activity_date']
self.creation_date = data['creation_date']
if 'last_edit_date' in data.values():
self.last_edit_date = data['last_edit_date']
self.question_id = data['question_id']
self.link = data['link']
self.title = data['title']
def __str__(self):
return self.title
class StackAPIConsumer:
url = 'https://api.stackexchange.com/2.2/search'
params = {'site': 'stackoverflow'}
@classmethod
def consume(cls, data):
date = data['fromdate']
if date is not None:
date = datetime(year=date.year, month=date.month, day=date.day)
milliseconds = int(round(date.timestamp()))
data['fromdate'] = milliseconds
date = data['todate']
if date is not None:
date = datetime(year=date.year, month=date.month, day=date.day)
milliseconds = int(round(date.timestamp()))
data['todate'] = milliseconds
print(data)
cls.params.update(data)
response = requests.get(cls.url, params=cls.params)
return response
| 29.2
| 75
| 0.609018
|
69d31c8d4c59a0861108541d7832f94494cf6a53
| 423
|
py
|
Python
|
python/tb_web_crawler/venv/Scripts/pip3-script.py
|
GG-yuki/bugs
|
aabd576e9e57012a3390007af890b7c6ab6cdda8
|
[
"MIT"
] | null | null | null |
python/tb_web_crawler/venv/Scripts/pip3-script.py
|
GG-yuki/bugs
|
aabd576e9e57012a3390007af890b7c6ab6cdda8
|
[
"MIT"
] | null | null | null |
python/tb_web_crawler/venv/Scripts/pip3-script.py
|
GG-yuki/bugs
|
aabd576e9e57012a3390007af890b7c6ab6cdda8
|
[
"MIT"
] | null | null | null |
#!C:\Users\Yuki\Desktop\bugs\python\tb_web_crawler\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| 32.538462
| 74
| 0.673759
|
e6d5c8dd7d848239b73593327590af1705e8bc45
| 1,526
|
py
|
Python
|
Tests/Data/Elliptic/square_1x1_SteadyStateDiffusion_Python/bcs_laplace_eq.py
|
fwitte/ogs
|
0b367872fc58ecd4e1dbfe1dcebbc847da6639d7
|
[
"BSD-3-Clause"
] | 1
|
2021-06-25T13:43:06.000Z
|
2021-06-25T13:43:06.000Z
|
Tests/Data/Elliptic/square_1x1_SteadyStateDiffusion_Python/bcs_laplace_eq.py
|
fwitte/ogs
|
0b367872fc58ecd4e1dbfe1dcebbc847da6639d7
|
[
"BSD-3-Clause"
] | 13
|
2015-01-09T13:08:57.000Z
|
2018-01-25T12:56:17.000Z
|
Tests/Data/Elliptic/square_1x1_SteadyStateDiffusion_Python/bcs_laplace_eq.py
|
fwitte/ogs
|
0b367872fc58ecd4e1dbfe1dcebbc847da6639d7
|
[
"BSD-3-Clause"
] | 2
|
2018-03-01T13:07:12.000Z
|
2018-03-01T13:16:22.000Z
|
import OpenGeoSys
from math import pi, sin, cos, sinh, cosh
a = 2.0*pi/3.0
# analytical solution used to set the Dirichlet BCs
def solution(x, y):
return sin(a*x) * sinh(a*y)
# gradient of the analytical solution used to set the Neumann BCs
def grad_solution(x, y):
return a * cos(a*x) * sinh(a*y), \
a * sin(a*x) * cosh(a*y)
# Dirichlet BCs
class BCTop(OpenGeoSys.BoundaryCondition):
def getDirichletBCValue(self, t, coords, node_id, primary_vars):
x, y, z = coords
assert y == 1.0 and z == 0.0
value = solution(x, y)
return (True, value)
class BCLeft(OpenGeoSys.BoundaryCondition):
def getDirichletBCValue(self, t, coords, node_id, primary_vars):
x, y, z = coords
assert x == 0.0 and z == 0.0
value = solution(x, y)
return (True, value)
class BCBottom(OpenGeoSys.BoundaryCondition):
def getDirichletBCValue(self, t, coords, node_id, primary_vars):
x, y, z = coords
assert y == 0.0 and z == 0.0
value = solution(x, y)
return (True, value)
# Neumann BC
class BCRight(OpenGeoSys.BoundaryCondition):
def getFlux(self, t, coords, primary_vars):
x, y, z = coords
assert x == 1.0 and z == 0.0
value = grad_solution(x, y)[0]
Jac = [ 0.0 ] # value does not depend on primary variable
return (True, value, Jac)
# instantiate BC objects referenced in OpenGeoSys' prj file
bc_top = BCTop()
bc_right = BCRight()
bc_bottom = BCBottom()
bc_left = BCLeft()
| 28.792453
| 68
| 0.629096
|
3a9a158fa9dac7aa96a2b71a844672d46805c04b
| 3,586
|
py
|
Python
|
Rendering/Label/Testing/Python/labeledMesh.py
|
forestGzh/VTK
|
bc98327275bd5cfa95c5825f80a2755a458b6da8
|
[
"BSD-3-Clause"
] | 1,755
|
2015-01-03T06:55:00.000Z
|
2022-03-29T05:23:26.000Z
|
Rendering/Label/Testing/Python/labeledMesh.py
|
forestGzh/VTK
|
bc98327275bd5cfa95c5825f80a2755a458b6da8
|
[
"BSD-3-Clause"
] | 29
|
2015-04-23T20:58:30.000Z
|
2022-03-02T16:16:42.000Z
|
Rendering/Label/Testing/Python/labeledMesh.py
|
forestGzh/VTK
|
bc98327275bd5cfa95c5825f80a2755a458b6da8
|
[
"BSD-3-Clause"
] | 1,044
|
2015-01-05T22:48:27.000Z
|
2022-03-31T02:38:26.000Z
|
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# demonstrate use of point labeling and the selection window
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Create a selection window
xmin = 200
xLength = 100
xmax = xmin + xLength
ymin = 200
yLength = 100
ymax = ymin + yLength
pts = vtk.vtkPoints()
pts.InsertPoint(0, xmin, ymin, 0)
pts.InsertPoint(1, xmax, ymin, 0)
pts.InsertPoint(2, xmax, ymax, 0)
pts.InsertPoint(3, xmin, ymax, 0)
rect = vtk.vtkCellArray()
rect.InsertNextCell(5)
rect.InsertCellPoint(0)
rect.InsertCellPoint(1)
rect.InsertCellPoint(2)
rect.InsertCellPoint(3)
rect.InsertCellPoint(0)
selectRect = vtk.vtkPolyData()
selectRect.SetPoints(pts)
selectRect.SetLines(rect)
rectMapper = vtk.vtkPolyDataMapper2D()
rectMapper.SetInputData(selectRect)
rectActor = vtk.vtkActor2D()
rectActor.SetMapper(rectMapper)
# Create asphere
sphere = vtk.vtkSphereSource()
sphereMapper = vtk.vtkPolyDataMapper()
sphereMapper.SetInputConnection(sphere.GetOutputPort())
sphereActor = vtk.vtkActor()
sphereActor.SetMapper(sphereMapper)
# Generate ids for labeling
ids = vtk.vtkIdFilter()
ids.SetInputConnection(sphere.GetOutputPort())
ids.PointIdsOn()
ids.CellIdsOn()
ids.FieldDataOn()
# Create labels for points
visPts = vtk.vtkSelectVisiblePoints()
visPts.SetInputConnection(ids.GetOutputPort())
visPts.SetRenderer(ren1)
visPts.SelectionWindowOn()
visPts.SetSelection(xmin, xmin + xLength, ymin, ymin + yLength)
ldm = vtk.vtkLabeledDataMapper()
ldm.SetInputConnection(visPts.GetOutputPort())
# ldm.SetLabelFormat.("%g")
# ldm.SetLabelModeToLabelScalars()
# ldm.SetLabelModeToLabelNormals()
ldm.SetLabelModeToLabelFieldData()
# ldm.SetLabeledComponent(0)
pointLabels = vtk.vtkActor2D()
pointLabels.SetMapper(ldm)
# Create labels for cells
cc = vtk.vtkCellCenters()
cc.SetInputConnection(ids.GetOutputPort())
visCells = vtk.vtkSelectVisiblePoints()
visCells.SetInputConnection(cc.GetOutputPort())
visCells.SetRenderer(ren1)
visCells.SelectionWindowOn()
visCells.SetSelection(xmin, xmin + xLength, ymin, ymin + yLength)
cellMapper = vtk.vtkLabeledDataMapper()
cellMapper.SetInputConnection(visCells.GetOutputPort())
# cellMapper.SetLabelFormat("%g")
# cellMapper.SetLabelModeToLabelScalars()
# cellMapper.SetLabelModeToLabelNormals()
cellMapper.SetLabelModeToLabelFieldData()
cellMapper.GetLabelTextProperty().SetColor(0, 1, 0)
cellLabels = vtk.vtkActor2D()
cellLabels.SetMapper(cellMapper)
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(sphereActor)
ren1.AddActor2D(rectActor)
ren1.AddActor2D(pointLabels)
ren1.AddActor2D(cellLabels)
ren1.SetBackground(1, 1, 1)
renWin.SetSize(500, 500)
renWin.Render()
# render the image
#
def PlaceWindow (xmin, ymin):
global xLength, yLength
xmax = xmin + xLength
ymax = ymin + yLength
visPts.SetSelection(xmin, xmax, ymin, ymax)
visCells.SetSelection(xmin, xmax, ymin, ymax)
pts.InsertPoint(0, xmin, ymin, 0)
pts.InsertPoint(1, xmax, ymin, 0)
pts.InsertPoint(2, xmax, ymax, 0)
pts.InsertPoint(3, xmin, ymax, 0)
pts.Modified()
# because insertions don't modify object - performance reasons
renWin.Render()
def MoveWindow ():
y = 100
while y < 300:
x = 100
while x < 300:
PlaceWindow(x, y)
x += 25
y += 25
MoveWindow()
PlaceWindow(xmin, ymin)
#iren.Start()
| 24.731034
| 66
| 0.754322
|
5d53ef86436aec0e2952ea793119166be04857eb
| 10,429
|
py
|
Python
|
bela/lex.py
|
BLIPNTU/bela
|
49cd9087bef88020af28a04eb67ab7dc09e17753
|
[
"MIT"
] | 2
|
2022-01-26T14:11:45.000Z
|
2022-03-08T06:54:00.000Z
|
bela/lex.py
|
BLIPNTU/bela
|
49cd9087bef88020af28a04eb67ab7dc09e17753
|
[
"MIT"
] | null | null | null |
bela/lex.py
|
BLIPNTU/bela
|
49cd9087bef88020af28a04eb67ab7dc09e17753
|
[
"MIT"
] | 1
|
2022-03-16T11:31:40.000Z
|
2022-03-16T11:31:40.000Z
|
# -*- coding: utf-8 -*-
# This code is a part of BELA package: https://github.com/letuananh/bela
# :developer: Le Tuan Anh <tuananh.ke@gmail.com>
# :license: MIT, see LICENSE for more details.
"""
Lexical Analyser
"""
import logging
from pathlib import Path
from collections import defaultdict as dd
from collections import Counter
from chirptext import chio
from chirptext import ttl
from .common import tokenize
from .common import NLTK_AVAILABLE
from .common import _process_token, InvalidTokenException
def read_lexicon(lex_name):
p = Path(__file__).parent / 'data' / lex_name
forms = set()
with chio.open(p) as _lex_stream:
for line in _lex_stream:
if line.startswith("#"):
continue
else:
forms.add(line.strip())
return forms
# TODO add all BELA special keywords
_KEYWORDS = ['babyname', 'adultname', 'siblingname', 'strangername',
'...', 'english', 'chinese', 'malay', 'tamil']
if NLTK_AVAILABLE:
from nltk.corpus import words # English words
from nltk.corpus import stopwords
_ENGLISH_WORDS = set(words.words())
_ENGLISH_WORDS.update(stopwords.words('english'))
else:
_ENGLISH_WORDS = set()
_ENGLISH_WORDS.update(("'s", "ok", "'m", "'ll", "n't", "okay", "'re", "'d", "'ve"))
_MANDARIN_WORDS = read_lexicon('cmn_lex.txt.gz')
_MALAY_WORDS = read_lexicon('msa_lex.txt.gz')
# [2021-03-11 木 11:48]
# Adopted from https://github.com/letuananh/lelesk/blob/master/lelesk/util.py
# LeLESK: MIT License
if NLTK_AVAILABLE:
# from nltk.tokenize import word_tokenize
from nltk import pos_tag
from nltk.stem import WordNetLemmatizer
wnl = WordNetLemmatizer()
def ptpos_to_wn(ptpos, default='x'):
''' Penn Treebank Project POS to WN '''
if ptpos.startswith('JJ'):
return 'a'
elif ptpos.startswith('NN'):
return 'n'
elif ptpos.startswith('RB'):
return 'r'
elif ptpos.startswith('VB'):
return 'v'
else:
return default
def _tokenize(words):
if NLTK_AVAILABLE:
tags = pos_tag(words)
tokens = [(w, t, wnl.lemmatize(w, pos=ptpos_to_wn(t, default='n'))) for w, t in tags]
return tokens
else:
return words
class LexicalAnalyser:
def __init__(self, lang_lex_map=None, word_only=False, ellipsis=False, non_word='', lemmatizer=True, **kwargs):
self.utterances = ttl.Document()
self.word_sent_map = dd(set)
self.lang_word_sent_map = dd(lambda: dd(set))
self.lang_word_speaker_map = dd(lambda: dd(set))
self.word_only = word_only
self.ellipsis = ellipsis
self.non_word = non_word
self.lemmatizer = lemmatizer
# setup built-in language-lexicon map
self.lang_lex_map = {
'English': _ENGLISH_WORDS,
'Mandarin': _MANDARIN_WORDS,
'Malay': _MALAY_WORDS
}
# allow custom language_map
self.__custom_lang_lex_map = lang_lex_map if lang_lex_map else {}
self.word_speaker_map = dd(set)
self.word_map = dd(Counter)
def analyse(self, external_tokenizer=True):
self.word_sent_map.clear()
self.word_map.clear()
for utterance in self.utterances:
language = utterance.tag.language.value
speaker = utterance.tag.speaker.value
# source = utterance.tag.source.value
tokens = [t.lower() for t in tokenize(
utterance.text, language=language,
ellipsis=self.ellipsis, non_word=self.non_word,
word_only=self.word_only, nlp_tokenizer=external_tokenizer)]
self.word_map[language].update(tokens)
for token in tokens:
self.word_speaker_map[token] = speaker
self.word_sent_map[token].add(utterance.text)
self.lang_word_speaker_map[language][token].add(speaker)
self.lang_word_sent_map[language][token].add(utterance.text)
def gen_type_token_map(self):
ratio_map = {}
for lang, counter in self.word_map.items():
count_token = len(list(counter.elements()))
count_type = len(counter)
ratio = count_token / count_type if count_type > 0 else 0
ratio_map[lang] = (count_token, count_type, ratio)
return ratio_map
def gen_type_token_list(self):
_list = [(lang, count_token, count_type, ratio) for lang, (count_token, count_type, ratio) in self.gen_type_token_map().items()]
_list.sort(key=lambda x: -x[3])
return _list
def add(self, text, language, **kwargs):
sent = self.utterances.sents.new(text)
sent.tag.language = language
for k, v in kwargs.items():
sent.tag[k] = v
def is_special_token(self, word, language):
''' Determine if a given token is a special token (keywords, markup, etc.) '''
return word == '###' or word.startswith(':')
def is_unknown(self, word, language):
'''Check if a word is a known word (exists in the current lexicon)'''
if word in _KEYWORDS:
return False
elif language in self.lang_lex_map:
if word in self.lang_lex_map[language]:
return False
elif language not in self.__custom_lang_lex_map:
return True
else:
return word not in self.__custom_lang_lex_map[language]
elif language in self.__custom_lang_lex_map:
return word not in self.__custom_lang_lex_map[language]
else:
return False
def to_dict(self, ignore_empty=True):
stats_dict = {'languages': [], 'lexicon': [], 'errors': []}
__lemmatize_error = False
for lang, count_token, count_type, ratio in self.gen_type_token_list():
if ignore_empty and not count_type and not count_token and not ratio:
continue
stats_dict['languages'].append({
'language': lang,
'types': count_type,
'tokens': count_token,
'ratio': round(ratio, 2)
})
for lang, counter in self.word_map.items():
lang_lexicon = {'language': lang, 'vocabs': []}
for word, freq in counter.most_common():
_is_special = self.is_special_token(word, lang)
if _is_special:
try:
_process_token(word)
_is_unknown = False
except InvalidTokenException:
_is_unknown = True
else:
lemma = word
# try to lemmatize if possible
if NLTK_AVAILABLE and self.lemmatizer and not __lemmatize_error and lang == 'English':
try:
__, tag = pos_tag([word])[0]
lemma = wnl.lemmatize(word, pos=ptpos_to_wn(tag, default='n'))
except Exception as e:
# logging.getLogger(__name__).exception("BELA.Lemmatizer crashed")
# do not lemmatize if NLTK crashed
__lemmatize_error = True
if isinstance(e, LookupError):
if 'omw-1.4' in str(e):
stats_dict['errors'].append(f'Lexicon was generated without lemmatizer. OMW-1.4 data not found.')
else:
stats_dict['errors'].append(f'Lexicon was generated without lemmatizer. Unknown resource missing.')
else:
stats_dict['errors'].append('Lexicon was generated without lemmatizer. Unknown error was raised.')
_is_unknown = self.is_unknown(lemma, lang)
_lex_entry = {
'word': word,
'freq': freq,
'sents': list(self.lang_word_sent_map[lang][word]),
'speakers': list(self.lang_word_speaker_map[lang][word]),
'special_code': _is_special,
'unknown_word': _is_unknown
}
lang_lexicon['vocabs'].append(_lex_entry)
if not ignore_empty or lang_lexicon['vocabs']:
stats_dict['lexicon'].append(lang_lexicon)
return stats_dict
class CorpusLexicalAnalyser:
''' Analyse a corpus text '''
def __init__(self, filepath=':memory:', lang_lex_map=None, word_only=False, lemmatizer=True, **kwargs):
self.filepath = filepath
self.word_only = word_only
self.lemmatizer = lemmatizer
self.__lang_lex_map = {} if lang_lex_map is None else lang_lex_map
self.profiles = dd(self._create_lex_analyzer)
def _create_lex_analyzer(self):
return LexicalAnalyser(lang_lex_map=self.__lang_lex_map,
word_only=self.word_only,
lemmatizer=self.lemmatizer)
def read(self, **kwargs):
''' Read the CSV file content specified by self.filepath '''
for text, language, source, speaker in chio.read_csv_iter(self.filepath, **kwargs):
self.add(text, language, source=source, speaker=speaker)
return self
def add(self, text, language, source='', speaker=''):
if text is None:
text = ''
if language is None:
language = ''
if source is None:
source = ''
if speaker is None:
speaker = ''
self.profiles['ALL'].add(text, language, source=source, speaker=speaker)
self.profiles[speaker].add(text, language, source=source, speaker=speaker)
def analyse(self, external_tokenizer=True):
''' Analyse all available profiles (i.e. speakers) '''
for profile in self.profiles.values():
profile.analyse(external_tokenizer=external_tokenizer)
return self
def to_dict(self):
''' Export analysed result as a JSON-ready object '''
profile_list = []
for pname in sorted(self.profiles.keys()):
profile = self.profiles[pname]
profile_list.append({
'name': pname,
'stats': profile.to_dict()
})
return profile_list
| 39.059925
| 136
| 0.587784
|
f3f7ca17dc94a0e8163b08c43f193a17c5bf3785
| 3,505
|
py
|
Python
|
testing_selenium_capabilities/tests/test_template.py
|
ikostan/SELENIUM_WEBDRIVER_WORKING_WITH_ELEMENTS
|
5039e777e4831d1a89d4056047c24e54bbfec5b3
|
[
"Unlicense"
] | null | null | null |
testing_selenium_capabilities/tests/test_template.py
|
ikostan/SELENIUM_WEBDRIVER_WORKING_WITH_ELEMENTS
|
5039e777e4831d1a89d4056047c24e54bbfec5b3
|
[
"Unlicense"
] | 1
|
2021-06-02T00:02:27.000Z
|
2021-06-02T00:02:27.000Z
|
testing_selenium_capabilities/tests/test_template.py
|
ikostan/SELENIUM_WEBDRIVER_WORKING_WITH_ELEMENTS
|
5039e777e4831d1a89d4056047c24e54bbfec5b3
|
[
"Unlicense"
] | 1
|
2019-08-07T01:37:25.000Z
|
2019-08-07T01:37:25.000Z
|
import unittest
import datetime
from drivers.driver import Driver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class MyTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = None
cls.test_url = ''
cls.test_title = ''
def setUp(self):
if self.driver is not None:
self.driver.quit()
self.driver = None
@unittest.skip('not ready')
def test_chrome(self):
browser = 'chrome'
self.generic_method(browser)
@unittest.skip('not ready')
def test_ie(self):
browser = 'ie'
self.generic_method(browser)
@unittest.skip('not ready')
def test_opera(self):
browser = 'opera'
self.generic_method(browser)
@unittest.skip('not ready')
def test_mozilla(self):
browser = 'mozilla'
self.generic_method(browser)
@unittest.skip('not ready')
def test_edge(self):
browser = 'edge'
self.generic_method(browser)
def generic_method(self, browser):
try:
self.open_test_web_page(browser)
except Exception as ec:
print('\nERROR: {}'.format(ec))
self.take_screen_shot()
raise
def open_test_web_page(self, browser):
# Open test web page and verify URL + Title
self.driver = Driver(browser).get_driver()
self.driver.get(self.test_url)
self.driver.maximize_window()
WebDriverWait(self.driver, 15).until(EC.title_contains(self.test_title))
self.assertEqual(self.test_url, self.driver.current_url)
self.assertEqual(self.test_title, self.driver.title)
def take_screen_shot(self):
"""Take a Screen-shot of the webpage when test Failed."""
now = datetime.datetime.now()
filename = 'screenshot-{}-{}.png'.format(self.driver.name,
datetime.datetime.strftime(now, '%Y-%m-%d_%H-%M-%S'))
self.driver.save_screenshot(filename)
print('\nScreenshot saved as {}'.format(filename))
def screenshots_collector(self):
'''
Collect all screenshots and put them into screenshots directory
:return:
'''
import os
import shutil
screenshots_folder = 'screenshots'
if not os.path.exists(os.curdir + '\\screenshots'):
os.mkdir(screenshots_folder)
now = datetime.datetime.now()
folder_name = '{}\\screenshots_{}_{}'.format(screenshots_folder,
self.driver.name,
datetime.datetime.strftime(now, '%Y-%m-%d_%H-%M-%S'))
files = os.listdir(os.curdir)
for file in files:
if '.png' in str(file):
if not os.path.exists(os.curdir + '\\' + folder_name):
os.mkdir(folder_name)
shutil.move(file.split('\\')[-1], os.curdir + '\\' + folder_name)
def tearDown(self):
self.screenshots_collector()
self.driver.stop_client()
self.driver.close()
@classmethod
def tearDownClass(cls):
if cls.driver is not None:
cls.driver.quit()
| 31.017699
| 106
| 0.602568
|
1a04aafeb0a59c6303deb04828a2a12be6da0a67
| 303
|
py
|
Python
|
data/multilingual/Latn.NOB/Mono_12/pdf_to_json_test_Latn.NOB_Mono_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Latn.NOB/Mono_12/pdf_to_json_test_Latn.NOB_Mono_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Latn.NOB/Mono_12/pdf_to_json_test_Latn.NOB_Mono_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.NOB/Mono_12/udhr_Latn.NOB_Mono_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.3
| 73
| 0.811881
|
d6e47199339be553f3810ff709dd04f5f2692164
| 9,372
|
py
|
Python
|
SCRAPE/Lib/site-packages/twisted/trial/_dist/worker.py
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
09f6abfc3bcf10ee28f486d83b450c89a07e066e
|
[
"MIT"
] | 4,612
|
2015-01-01T12:57:23.000Z
|
2022-03-30T01:08:23.000Z
|
SCRAPE/Lib/site-packages/twisted/trial/_dist/worker.py
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
09f6abfc3bcf10ee28f486d83b450c89a07e066e
|
[
"MIT"
] | 1,243
|
2015-01-23T17:23:59.000Z
|
2022-03-28T13:46:17.000Z
|
SCRAPE/Lib/site-packages/twisted/trial/_dist/worker.py
|
Chinmoy-Prasad-Dutta/scrapy_scraper
|
09f6abfc3bcf10ee28f486d83b450c89a07e066e
|
[
"MIT"
] | 1,236
|
2015-01-13T14:41:26.000Z
|
2022-03-17T07:12:36.000Z
|
# -*- test-case-name: twisted.trial._dist.test.test_worker -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module implements the worker classes.
@since: 12.3
"""
import os
from zope.interface import implementer
from twisted.internet.defer import Deferred
from twisted.internet.interfaces import IAddress, ITransport
from twisted.internet.protocol import ProcessProtocol
from twisted.protocols.amp import AMP
from twisted.python.failure import Failure
from twisted.python.reflect import namedObject
from twisted.trial._dist import (
_WORKER_AMP_STDIN,
_WORKER_AMP_STDOUT,
managercommands,
workercommands,
)
from twisted.trial._dist.workerreporter import WorkerReporter
from twisted.trial.runner import TestLoader, TrialSuite
from twisted.trial.unittest import Todo
class WorkerProtocol(AMP):
"""
The worker-side trial distributed protocol.
"""
def __init__(self, forceGarbageCollection=False):
self._loader = TestLoader()
self._result = WorkerReporter(self)
self._forceGarbageCollection = forceGarbageCollection
def run(self, testCase):
"""
Run a test case by name.
"""
case = self._loader.loadByName(testCase)
suite = TrialSuite([case], self._forceGarbageCollection)
suite.run(self._result)
return {"success": True}
workercommands.Run.responder(run)
def start(self, directory):
"""
Set up the worker, moving into given directory for tests to run in
them.
"""
os.chdir(directory)
return {"success": True}
workercommands.Start.responder(start)
class LocalWorkerAMP(AMP):
"""
Local implementation of the manager commands.
"""
def addSuccess(self, testName):
"""
Add a success to the reporter.
"""
self._result.addSuccess(self._testCase)
return {"success": True}
managercommands.AddSuccess.responder(addSuccess)
def _buildFailure(self, error, errorClass, frames):
"""
Helper to build a C{Failure} with some traceback.
@param error: An C{Exception} instance.
@param errorClass: The class name of the C{error} class.
@param frames: A flat list of strings representing the information need
to approximatively rebuild C{Failure} frames.
@return: A L{Failure} instance with enough information about a test
error.
"""
errorType = namedObject(errorClass)
failure = Failure(error, errorType)
for i in range(0, len(frames), 3):
failure.frames.append(
(frames[i], frames[i + 1], int(frames[i + 2]), [], [])
)
return failure
def addError(self, testName, error, errorClass, frames):
"""
Add an error to the reporter.
"""
failure = self._buildFailure(error, errorClass, frames)
self._result.addError(self._testCase, failure)
return {"success": True}
managercommands.AddError.responder(addError)
def addFailure(self, testName, fail, failClass, frames):
"""
Add a failure to the reporter.
"""
failure = self._buildFailure(fail, failClass, frames)
self._result.addFailure(self._testCase, failure)
return {"success": True}
managercommands.AddFailure.responder(addFailure)
def addSkip(self, testName, reason):
"""
Add a skip to the reporter.
"""
self._result.addSkip(self._testCase, reason)
return {"success": True}
managercommands.AddSkip.responder(addSkip)
def addExpectedFailure(self, testName, error, todo):
"""
Add an expected failure to the reporter.
"""
_todo = Todo(todo)
self._result.addExpectedFailure(self._testCase, error, _todo)
return {"success": True}
managercommands.AddExpectedFailure.responder(addExpectedFailure)
def addUnexpectedSuccess(self, testName, todo):
"""
Add an unexpected success to the reporter.
"""
self._result.addUnexpectedSuccess(self._testCase, todo)
return {"success": True}
managercommands.AddUnexpectedSuccess.responder(addUnexpectedSuccess)
def testWrite(self, out):
"""
Print test output from the worker.
"""
self._testStream.write(out + "\n")
self._testStream.flush()
return {"success": True}
managercommands.TestWrite.responder(testWrite)
def _stopTest(self, result):
"""
Stop the current running test case, forwarding the result.
"""
self._result.stopTest(self._testCase)
return result
def run(self, testCase, result):
"""
Run a test.
"""
self._testCase = testCase
self._result = result
self._result.startTest(testCase)
testCaseId = testCase.id()
d = self.callRemote(workercommands.Run, testCase=testCaseId)
return d.addCallback(self._stopTest)
def setTestStream(self, stream):
"""
Set the stream used to log output from tests.
"""
self._testStream = stream
@implementer(IAddress)
class LocalWorkerAddress:
"""
A L{IAddress} implementation meant to provide stub addresses for
L{ITransport.getPeer} and L{ITransport.getHost}.
"""
@implementer(ITransport)
class LocalWorkerTransport:
"""
A stub transport implementation used to support L{AMP} over a
L{ProcessProtocol} transport.
"""
def __init__(self, transport):
self._transport = transport
def write(self, data):
"""
Forward data to transport.
"""
self._transport.writeToChild(_WORKER_AMP_STDIN, data)
def writeSequence(self, sequence):
"""
Emulate C{writeSequence} by iterating data in the C{sequence}.
"""
for data in sequence:
self._transport.writeToChild(_WORKER_AMP_STDIN, data)
def loseConnection(self):
"""
Closes the transport.
"""
self._transport.loseConnection()
def getHost(self):
"""
Return a L{LocalWorkerAddress} instance.
"""
return LocalWorkerAddress()
def getPeer(self):
"""
Return a L{LocalWorkerAddress} instance.
"""
return LocalWorkerAddress()
class LocalWorker(ProcessProtocol):
"""
Local process worker protocol. This worker runs as a local process and
communicates via stdin/out.
@ivar _ampProtocol: The L{AMP} protocol instance used to communicate with
the worker.
@ivar _logDirectory: The directory where logs will reside.
@ivar _logFile: The name of the main log file for tests output.
"""
def __init__(self, ampProtocol, logDirectory, logFile):
self._ampProtocol = ampProtocol
self._logDirectory = logDirectory
self._logFile = logFile
self.endDeferred = Deferred()
def connectionMade(self):
"""
When connection is made, create the AMP protocol instance.
"""
self._ampProtocol.makeConnection(LocalWorkerTransport(self.transport))
if not os.path.exists(self._logDirectory):
os.makedirs(self._logDirectory)
self._outLog = open(os.path.join(self._logDirectory, "out.log"), "wb")
self._errLog = open(os.path.join(self._logDirectory, "err.log"), "wb")
# Log data is received via AMP which is UTF-8 unicode.
# The log file should be written using a Unicode encoding, and not
# the default system encoding which might not be Unicode compatible.
self._testLog = open(
os.path.join(self._logDirectory, self._logFile),
"w",
encoding="utf-8",
errors="strict",
)
self._ampProtocol.setTestStream(self._testLog)
logDirectory = self._logDirectory
d = self._ampProtocol.callRemote(workercommands.Start, directory=logDirectory)
# Ignore the potential errors, the test suite will fail properly and it
# would just print garbage.
d.addErrback(lambda x: None)
def connectionLost(self, reason):
"""
On connection lost, close the log files that we're managing for stdin
and stdout.
"""
self._outLog.close()
self._errLog.close()
self._testLog.close()
def processEnded(self, reason):
"""
When the process closes, call C{connectionLost} for cleanup purposes
and forward the information to the C{_ampProtocol}.
"""
self.connectionLost(reason)
self._ampProtocol.connectionLost(reason)
self.endDeferred.callback(reason)
def outReceived(self, data):
"""
Send data received from stdout to log.
"""
self._outLog.write(data)
def errReceived(self, data):
"""
Write error data to log.
"""
self._errLog.write(data)
def childDataReceived(self, childFD, data):
"""
Handle data received on the specific pipe for the C{_ampProtocol}.
"""
if childFD == _WORKER_AMP_STDOUT:
self._ampProtocol.dataReceived(data)
else:
ProcessProtocol.childDataReceived(self, childFD, data)
| 29.564669
| 86
| 0.637431
|
519bbb1b087b99a7129a1eb0d99da5eff96f6331
| 2,923
|
py
|
Python
|
src/code/plots/thesis/dg_growth.py
|
dvaruas/minority_recommendations
|
8adcbf5af5c322e4b20d4336b12ecda62a5c4d5f
|
[
"MIT"
] | null | null | null |
src/code/plots/thesis/dg_growth.py
|
dvaruas/minority_recommendations
|
8adcbf5af5c322e4b20d4336b12ecda62a5c4d5f
|
[
"MIT"
] | null | null | null |
src/code/plots/thesis/dg_growth.py
|
dvaruas/minority_recommendations
|
8adcbf5af5c322e4b20d4336b12ecda62a5c4d5f
|
[
"MIT"
] | null | null | null |
# This plot is to get the degree growth for synthetic growth networks.
# Minority fraction = [0.1, 0.2, 0.3, 0.4, 0.5] (x-axis)
# homophily values = [0.0, 0.2, 0.5, 0.8, 1.0] (y-axis)
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir))
#--------------------------------------------------------------------------------
import matplotlib.pyplot as plt
from code.plot_computations.comp_job import ComputationJob
from code.plot_computations.degree_growth import DegreeGrowthPlotPoints
from code.growth_network.growth_data_manager import GrowNetwork
if __name__ == "__main__":
minority_fractions = [0.1, 0.2, 0.3, 0.4, 0.5]
homophilies = [0.0, 0.2, 0.5, 0.8, 1.0]
method = "top_rank" #["pa_homophily", "adamic_adar", "twitter_rank", "ranked_bandit", "top_rank"]
ranking_control = 1.0
fig, ax = plt.subplots(nrows=len(minority_fractions), ncols=len(homophilies), sharex=True, sharey=True, figsize=(11, 12))
obj = GrowNetwork()
for i, f in enumerate(minority_fractions):
for j, h in enumerate(homophilies):
params ={"homophily" : h,
"minority_probability" : f,
"ranking_control" : ranking_control if method in ["ranked_bandit", "top_rank"] else 0.0,
"job_type" : method,
"parameter_profile" : 0}
paths = obj.get_job_paths(all_params=params)
comp_jobs = []
for path in paths:
cobj = ComputationJob()
cobj.set_log_path(path=os.path.join(path, "raw", "logfile.txt"))
comp_jobs.append(cobj)
plot_data = DegreeGrowthPlotPoints.get_plot_points(job_objs=comp_jobs)
ax[i,j].plot(plot_data["minority"]["x"], plot_data["minority"]["y"], "r", label="minority")
ax[i,j].plot(plot_data["majority"]["x"], plot_data["majority"]["y"], "b", label="majority")
ax[i,j].tick_params(labelsize='large')
if (i == len(minority_fractions) - 1):
ax[i,j].set_xlabel("$t/t_{0}$", fontsize=12)
if (j == 0):
ax[i,j].set_ylabel("$\delta(t) / \delta(t_{0})$", fontsize=12)
plt.figtext(0.11, 0.97, "h = 0.0", fontsize=12)
plt.figtext(0.29, 0.97, "h = 0.2", fontsize=12)
plt.figtext(0.47, 0.97, "h = 0.5", fontsize=12)
plt.figtext(0.65, 0.97, "h = 0.8", fontsize=12)
plt.figtext(0.85, 0.97, "h = 1.0", fontsize=12)
plt.figtext(0.97, 0.13, "f = 0.5", fontsize=12, rotation=-90)
plt.figtext(0.97, 0.3, "f = 0.4", fontsize=12, rotation=-90)
plt.figtext(0.97, 0.48, "f = 0.3", fontsize=12, rotation=-90)
plt.figtext(0.97, 0.66, "f = 0.2", fontsize=12, rotation=-90)
plt.figtext(0.97, 0.84, "f = 0.1", fontsize=12, rotation=-90)
plt.subplots_adjust(left=0.05, bottom=0.06, right=0.95, top=0.95, wspace=0.1, hspace=0.1)
plt.show()
| 47.918033
| 125
| 0.588779
|
c5575f12a27b27ee362320f4c33425e27a28c063
| 1,710
|
py
|
Python
|
test/test_ip_availabilities_list.py
|
displague/metal-python
|
96e64e9ac41025d85ff6f61693165e29e1c366db
|
[
"MIT"
] | null | null | null |
test/test_ip_availabilities_list.py
|
displague/metal-python
|
96e64e9ac41025d85ff6f61693165e29e1c366db
|
[
"MIT"
] | 3
|
2021-09-27T05:10:36.000Z
|
2021-09-27T06:10:57.000Z
|
test/test_ip_availabilities_list.py
|
displague/metal-python
|
96e64e9ac41025d85ff6f61693165e29e1c366db
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Metal API
This is the API for Equinix Metal. The API allows you to programmatically interact with all of your Equinix Metal resources, including devices, networks, addresses, organizations, projects, and your user account. The official API docs are hosted at <https://metal.equinix.com/developers/api>. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: support@equinixmetal.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import metal
from metal.types.ip_availabilities_list import IPAvailabilitiesList # noqa: E501
from metal.rest import ApiException
class TestIPAvailabilitiesList(unittest.TestCase):
"""IPAvailabilitiesList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test IPAvailabilitiesList
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = metal.models.ip_availabilities_list.IPAvailabilitiesList() # noqa: E501
if include_optional :
return IPAvailabilitiesList(
available = [
''
]
)
else :
return IPAvailabilitiesList(
)
def testIPAvailabilitiesList(self):
"""Test IPAvailabilitiesList"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| 31.090909
| 312
| 0.678947
|
a2a7cb6a8f371b48c84aaa69bbff95190fdd603c
| 11,046
|
py
|
Python
|
userbot/plugins/solarsystem.py
|
ghion266/SensibleUserbot
|
16ad83206fa14fe4315143fa8a94e5687eb06fcb
|
[
"MIT"
] | 44
|
2021-01-11T13:33:48.000Z
|
2022-02-05T17:53:33.000Z
|
userbot/plugins/solarsystem.py
|
ghion266/SensibleUserbot
|
16ad83206fa14fe4315143fa8a94e5687eb06fcb
|
[
"MIT"
] | 5
|
2020-08-25T15:58:13.000Z
|
2021-02-09T09:57:57.000Z
|
userbot/plugins/solarsystem.py
|
ghion266/SensibleUserbot
|
16ad83206fa14fe4315143fa8a94e5687eb06fcb
|
[
"MIT"
] | 226
|
2020-02-25T05:58:57.000Z
|
2022-03-12T04:12:33.000Z
|
from telethon import events
import asyncio
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern=r"solarsystem"))
async def _(event):
if event.fwd_from:
return
animation_interval = 0.1
animation_ttl = range(0, 549755813888)
await event.edit("Solar")
animation_chars = [
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️🌎◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n🌕◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️☀\n◼️◼️◼️◼️◼️`",
"`◼️🌕◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️☀◼️`",
"`◼️◼️◼️🌕◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️☀◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️🌎◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️◼️◼️◼️`",
"`◼️◼️◼️◼️◼️\n☀◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️🌕\n◼️◼️◼️◼️◼️`",
"`◼️☀◼️◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️◼️◼️🌕◼️`",
"`◼️◼️◼️☀◼️\n◼️◼️◼️◼️◼️\n◼️◼️🌎◼️◼️\n◼️◼️◼️◼️◼️\n◼️🌕◼️◼️◼️`",
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 549755813888])
| 66.945455
| 76
| 0.080844
|
6a5ec02889d3e5098dc3073d5c220a9693f85494
| 8,403
|
py
|
Python
|
dashboard/apps/app1.py
|
gamyers/solar-697
|
90ca38072456af385c98b1bdf3c3d563e2c71f15
|
[
"MIT"
] | 1
|
2021-08-24T00:00:23.000Z
|
2021-08-24T00:00:23.000Z
|
dashboard/apps/app1.py
|
gamyers/solar-697
|
90ca38072456af385c98b1bdf3c3d563e2c71f15
|
[
"MIT"
] | null | null | null |
dashboard/apps/app1.py
|
gamyers/solar-697
|
90ca38072456af385c98b1bdf3c3d563e2c71f15
|
[
"MIT"
] | 2
|
2021-08-30T20:36:36.000Z
|
2021-11-02T19:13:33.000Z
|
import sqlite3
import sys
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import logzero
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import yaml
from app import app
from dash.dependencies import Input, Output
from dash_table import DataTable
from logzero import logger
sys.path.append("../source")
import queries
import plot_tools
import ts_tools
# open and retrieve configuration data
try:
with open("../source/config.yml", "r") as config_in:
cfg = yaml.load(config_in, Loader=yaml.SafeLoader)
logger.info(f"{cfg}\n")
except:
logger.error(f"config file open failure.")
exit(1)
db_path = cfg["file_paths"]["db_path"]
db_files = ts_tools.get_db_files(db_path)
logger.info(f"DB Path: {db_path}\n{db_files}\n")
# --------------------------begin layout--------------------------#
layout_app1 = html.Div(
[
# Dropdown row 0
dbc.Row(
[
dbc.Col(
dcc.Dropdown(
id="dd-db-selection",
options=[{"label": db, "value": db} for db in db_files],
value=cfg["file_names"]["default_db"],
placeholder="Select a database",
persistence=True,
persistence_type="session",
),
width={"size": 2, "offset": 0},
),
dbc.Col(
[
dcc.Dropdown(
id="dd-zipcode-selection",
placeholder="Select a Zip Code",
persistence=True,
),
html.H5(id="dd-zipcode-selection-locale"),
],
width={"size": 2, "offset": 1},
),
],
),
# Plots row 1
dbc.Row(
[
dbc.Col(
[
html.H6(
"Data View",
style={"display": "inline-block", "textAlign": "center"},
),
dcc.Graph(id="graph-data-view"),
],
width={"size": 6},
),
dbc.Col(
[
html.H6(
"Distribution View",
style={"display": "inline-block", "textAlign": "center"},
),
dcc.Graph(id="graph-dist-view"),
],
width={"size": 5},
),
],
),
# Plots row 2
dbc.Row(
[
dbc.Col(
[
html.H6(
"Meteorological View",
style={"display": "inline-block", "textAlign": "center"},
),
dcc.Graph(
id="graph-meteoro-view",
),
],
width={"size": 6, "offset": 0},
),
dbc.Col(
[
html.H6(
"Desciptive Statistics",
style={
"display": "inline-block",
"textAlign": "center",
},
),
DataTable(
id="table-desc-stats",
style_table={
"height": "395px",
},
style_cell={
"backgroundColor": "black",
"forgroundColor": "white",
},
style_header={
"backgroundColor": "black",
"forgroundColor": "white",
"fontWeight": "bold",
"fontColor": "gold",
},
),
],
width={"size": 5},
),
],
),
],
)
# --------------------------begin callbacks--------------------------#
@app.callback(
Output("dd-zipcode-selection", "options"),
Input("dd-db-selection", "value"),
)
def get_zipcodes(file_name):
logger.info(f"get_zipcodes callback: {file_name}")
conn = ts_tools.get_db_connection(db_path, file_name)
zipcodes = ts_tools.get_db_zipcodes(conn)
conn.close()
logger.info(f"app1 zipcodes retrieved\n{zipcodes}")
# return the list object to properly populate the dropdown!
return [{"label": zipcode, "value": zipcode} for zipcode in zipcodes]
# -------------------------------------------------------------------#
# @app.callback(
# Output("dd-zipcode-selection", "value"),
# [
# Input("dd-zipcode-selection", "options"),
# ],
# )
# def set_zipcode_value(options):
# logger.info(f"app1 zipcode selected: {options[0]['value']}")
# return options[0]["value"]
# -------------------------------------------------------------------#
@app.callback(
# [
Output("graph-data-view", "figure"),
Output("graph-dist-view", "figure"),
Output("graph-meteoro-view", "figure"),
Output("table-desc-stats", "data"),
Output("table-desc-stats", "columns"),
Output("dd-zipcode-selection-locale", "children"),
# -------------------------------------
Input("dd-db-selection", "value"),
Input("dd-zipcode-selection", "value"),
)
def graph_output(db_filename, zipcode):
cntx = dash.callback_context
context = cntx.triggered[0]["prop_id"].split(".")[0]
logger.info(f"app1 graph_output #1 Context = {context}\n")
# print(f"app1 graph_output #1 Context: {context}")
if context == "dd-db-selection":
conn = ts_tools.get_db_connection(db_path, db_filename)
# zipcodes = ts_tools.get_db_zipcodes(conn)
# zipcode = zipcodes[0]
locale_data = ts_tools.get_locale_data(conn, zipcode)
df = ts_tools.get_irr_data(conn, zipcode)
logger.info(f"app1 Made if: {db_filename}, {zipcode}")
elif context == "dd-zipcode-selection":
conn = ts_tools.get_db_connection(db_path, db_filename)
locale_data = ts_tools.get_locale_data(conn, zipcode)
df = ts_tools.get_irr_data(conn, zipcode)
logger.info(f"app1 Made elif: {db_filename}, {zipcode}")
else:
db_filename = cfg["file_names"]["default_db"]
conn = ts_tools.get_db_connection(db_path, db_filename)
zipcodes = ts_tools.get_db_zipcodes(conn)
if not zipcode:
zipcode = zipcodes[0]
locale_data = ts_tools.get_locale_data(conn, zipcode)
df = ts_tools.get_irr_data(conn, zipcode)
logger.info(f"app1 Made else: {db_filename}, {zipcode}")
logger.info(f"app1 passed if/elif/else")
df_desc = df.describe().transpose().round(decimals=2).reset_index(drop=False)
df_desc.rename(columns={"index": "feature"}, inplace=True)
df_desc.insert(loc=1, column="unit", value=[value for value in cfg["data_units"].values()])
desc_columns = [{"id": col, "name": col} for col in df_desc.columns]
logger.info(f"app1 passed df_desc")
title1 = "Irradiance Data"
fig1 = plot_tools.plot_irradiance(
df, title=title1, zipcode=zipcode, irr_columns=cfg["irradiance_columns"], locale=locale_data
)
logger.info(f"app1 passed {title1}")
title2 = "Data Distributions"
fig2 = plot_tools.plot_histograms(
df,
title=title2,
zipcode=zipcode,
)
logger.info(f"app1 passed {title2}")
title3 = "Meteorological Conditions"
fig3 = plot_tools.plot_multi_line(
df,
title=title3,
locale=locale_data,
columns=cfg["meteorological_fields"],
)
logger.info(f"app1 passed {title3}")
return (
fig1,
fig2,
fig3,
df_desc.to_dict("records"),
desc_columns,
f"{locale_data[0]}, {locale_data[2]}",
)
| 32.952941
| 100
| 0.471855
|
e96cfab2bcc41c52643b1b66805b3431e547d7aa
| 14,914
|
py
|
Python
|
lale/lib/sklearn/random_forest_regressor.py
|
chiragsahni/lale
|
a04adcdc14f2e05b846ebd5767186b66fe20463f
|
[
"Apache-2.0"
] | null | null | null |
lale/lib/sklearn/random_forest_regressor.py
|
chiragsahni/lale
|
a04adcdc14f2e05b846ebd5767186b66fe20463f
|
[
"Apache-2.0"
] | null | null | null |
lale/lib/sklearn/random_forest_regressor.py
|
chiragsahni/lale
|
a04adcdc14f2e05b846ebd5767186b66fe20463f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sklearn
import sklearn.ensemble
import lale.docstrings
import lale.operators
_hyperparams_schema = {
"description": "A random forest regressor.",
"allOf": [
{
"type": "object",
"required": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
"bootstrap",
],
"relevantToOptimizer": [
"n_estimators",
"criterion",
"max_depth",
"min_samples_split",
"min_samples_leaf",
"max_features",
"bootstrap",
],
"additionalProperties": False,
"properties": {
"n_estimators": {
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 10,
"maximumForOptimizer": 100,
"default": 10,
"description": "The number of trees in the forest.",
},
"criterion": {
"anyOf": [
{"enum": ["mae"], "forOptimizer": False},
{"enum": ["mse", "friedman_mse"]},
],
"default": "mse",
"description": "The function to measure the quality of a split.",
},
"max_depth": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 5,
},
{
"enum": [None],
"description": "Nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples.",
},
],
"default": None,
"description": "The maximum depth of the tree.",
},
"min_samples_split": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"laleMaximum": "X/maxItems", # number of rows
"minimumForOptimizer": 2,
"maximumForOptimizer": 5,
"forOptimizer": False,
"description": "Consider min_samples_split as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"maximum": 1.0,
"minimumForOptimizer": 0.01,
"maximumForOptimizer": 0.5,
"default": 0.05,
"description": "min_samples_split is a fraction and ceil(min_samples_split * n_samples) are the minimum number of samples for each split.",
},
],
"default": 2,
"description": "The minimum number of samples required to split an internal node.",
},
"min_samples_leaf": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"laleMaximum": "X/maxItems", # number of rows
"minimumForOptimizer": 1,
"maximumForOptimizer": 5,
"forOptimizer": False,
"description": "Consider min_samples_leaf as the minimum number.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 0.01,
"maximum": 0.5,
"default": 0.05,
"description": "min_samples_leaf is a fraction and ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node.",
},
],
"default": 1,
"description": "The minimum number of samples required to be at a leaf node.",
},
"min_weight_fraction_leaf": {
"type": "number",
"minimum": 0.0,
"maximum": 0.5,
"default": 0.0,
"description": "The minimum weighted fraction of the sum total of weights (of all the input samples) required to be at a leaf node. Samples have equal weight when sample_weight is not provided.",
},
"max_features": {
"anyOf": [
{
"type": "integer",
"minimum": 2,
"forOptimizer": False,
"laleMaximum": "X/items/maxItems", # number of columns
"description": "Consider max_features features at each split.",
},
{
"type": "number",
"minimum": 0.0,
"exclusiveMinimum": True,
"minimumForOptimizer": 0.01,
"maximum": 1.0,
"default": 0.5,
"distribution": "uniform",
"description": "max_features is a fraction and int(max_features * n_features) features are considered at each split.",
},
{"enum": ["auto", "sqrt", "log2", None]},
],
"default": "auto",
"description": "The number of features to consider when looking for the best split.",
},
"max_leaf_nodes": {
"anyOf": [
{
"type": "integer",
"minimum": 1,
"minimumForOptimizer": 3,
"maximumForOptimizer": 1000,
},
{
"enum": [None],
"description": "Unlimited number of leaf nodes.",
},
],
"default": None,
"description": "Grow trees with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity.",
},
"min_impurity_decrease": {
"type": "number",
"minimum": 0.0,
"maximumForOptimizer": 10.0,
"default": 0.0,
"description": "A node will be split if this split induces a decrease of the impurity greater than or equal to this value.",
},
"min_impurity_split": {
"anyOf": [{"type": "number", "minimum": 0.0}, {"enum": [None]}],
"default": None,
"description": "Threshold for early stopping in tree growth.",
},
"bootstrap": {
"type": "boolean",
"default": True,
"description": "Whether bootstrap samples are used when building trees. If False, the whole datset is used to build each tree.",
},
"oob_score": {
"type": "boolean",
"default": False,
"description": "Whether to use out-of-bag samples to estimate the generalization accuracy.",
},
"n_jobs": {
"anyOf": [
{
"description": "1 unless in joblib.parallel_backend context.",
"enum": [None],
},
{"description": "Use all processors.", "enum": [-1]},
{
"description": "Number of CPU cores.",
"type": "integer",
"minimum": 1,
},
],
"default": None,
"description": "The number of jobs to run in parallel for both fit and predict.",
},
"random_state": {
"description": "Seed of pseudo-random number generator.",
"anyOf": [
{"laleType": "numpy.random.RandomState"},
{
"description": "RandomState used by np.random",
"enum": [None],
},
{"description": "Explicit seed.", "type": "integer"},
],
"default": None,
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Controls the verbosity when fitting and predicting.",
},
"warm_start": {
"type": "boolean",
"default": False,
"description": "When set to True, reuse the solution of the previous call to fit and add more estimators to the ensemble, otherwise, just fit a whole new forest.",
},
},
}
],
}
_input_fit_schema = {
"description": "Build a forest of trees from the training set (X, y).",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
},
"y": {
"description": "The predicted classes.",
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"type": "array", "items": {"type": "number"}},
],
},
"sample_weight": {
"anyOf": [
{"type": "array", "items": {"type": "number"}},
{"enum": [None], "description": "Samples are equally weighted."},
],
"description": "Sample weights.",
},
},
}
_input_predict_schema = {
"type": "object",
"properties": {
"X": {
"type": "array",
"description": "The outer array is over samples aka rows.",
"items": {
"type": "array",
"description": "The inner array is over features aka columns.",
"items": {"type": "number"},
},
}
},
}
_output_predict_schema = {
"description": "The predicted values.",
"anyOf": [
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
{"type": "array", "items": {"type": "number"}},
],
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """`Random forest regressor`_ from scikit-learn.
.. _`Random forest regressor`: https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html
""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.sklearn.random_forest_regressor.html",
"import_from": "sklearn.ensemble",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "regressor"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_schema,
"output_predict": _output_predict_schema,
},
}
RandomForestRegressor = lale.operators.make_operator(
sklearn.ensemble.RandomForestRegressor, _combined_schemas
)
if sklearn.__version__ >= "0.22":
# old: https://scikit-learn.org/0.20/modules/generated/sklearn.ensemble.RandomForestRegressor.html
# new: https://scikit-learn.org/0.23/modules/generated/sklearn.ensemble.RandomForestRegressor.html
from lale.schemas import AnyOf, Float, Int, Null
RandomForestRegressor = RandomForestRegressor.customize_schema(
n_estimators=Int(
desc="The number of trees in the forest.",
default=100,
forOptimizer=True,
minimumForOptimizer=10,
maximumForOptimizer=100,
),
ccp_alpha=Float(
desc="Complexity parameter used for Minimal Cost-Complexity Pruning. The subtree with the largest cost complexity that is smaller than ccp_alpha will be chosen. By default, no pruning is performed.",
default=0.0,
forOptimizer=False,
minimum=0.0,
maximumForOptimizer=0.1,
),
max_samples=AnyOf(
types=[
Null(desc="Draw X.shape[0] samples."),
Int(desc="Draw max_samples samples.", minimum=1),
Float(
desc="Draw max_samples * X.shape[0] samples.",
minimum=0.0,
exclusiveMinimum=True,
maximum=1.0,
exclusiveMaximum=True,
),
],
desc="If bootstrap is True, the number of samples to draw from X to train each base estimator.",
default=None,
),
set_as_available=True,
)
lale.docstrings.set_docstrings(RandomForestRegressor)
| 41.427778
| 215
| 0.436972
|
02b7a499c52f149ffc58ef593948b27a0ef09da9
| 47
|
py
|
Python
|
documentation/models/__init__.py
|
ElNahoko/HSE_ARNOSH
|
1a8661db454e6a9e7f775a3ffd58a3936a43bb59
|
[
"Apache-2.0"
] | 1
|
2019-08-10T17:57:58.000Z
|
2019-08-10T17:57:58.000Z
|
documentation/models/__init__.py
|
ElNahoko/HSE_ARNOSH
|
1a8661db454e6a9e7f775a3ffd58a3936a43bb59
|
[
"Apache-2.0"
] | null | null | null |
documentation/models/__init__.py
|
ElNahoko/HSE_ARNOSH
|
1a8661db454e6a9e7f775a3ffd58a3936a43bb59
|
[
"Apache-2.0"
] | 2
|
2019-08-14T18:08:04.000Z
|
2019-09-04T19:01:08.000Z
|
# -*- coding: utf-8 -*-
from . import DOC_HSE
| 11.75
| 23
| 0.574468
|
2194b0f4c81f699d0b0074a2249dc674e7199f6d
| 3,538
|
py
|
Python
|
pypeln/task/stage.py
|
quarckster/pypeln
|
f4160d0f4d4718b67f79a0707d7261d249459a4b
|
[
"MIT"
] | 1,281
|
2018-09-20T05:35:27.000Z
|
2022-03-30T01:29:48.000Z
|
pypeln/task/stage.py
|
webclinic017/pypeln
|
5231806f2cac9d2019dacbbcf913484fd268b8c1
|
[
"MIT"
] | 78
|
2018-09-18T20:38:12.000Z
|
2022-03-30T20:16:02.000Z
|
pypeln/task/stage.py
|
webclinic017/pypeln
|
5231806f2cac9d2019dacbbcf913484fd268b8c1
|
[
"MIT"
] | 88
|
2018-09-24T10:46:14.000Z
|
2022-03-28T09:34:50.000Z
|
import typing as tp
from dataclasses import dataclass
from pypeln import utils as pypeln_utils
from pypeln.utils import T, Kwargs
from .queue import IterableQueue, OutputQueues
from .worker import Worker, StageParams, TaskPool, ProcessFn
from .supervisor import Supervisor
from . import utils
@dataclass
class Stage(pypeln_utils.BaseStage[T], tp.Iterable[T]):
process_fn: ProcessFn
workers: int
maxsize: int
total_sources: int
timeout: float
dependencies: tp.List["Stage"]
on_start: tp.Optional[tp.Callable[..., tp.Union[Kwargs, tp.Awaitable[Kwargs]]]]
on_done: tp.Optional[tp.Callable[..., tp.Union[Kwargs, tp.Awaitable[Kwargs]]]]
f_args: tp.List[str]
def __hash__(self):
return id(self)
def build(
self,
built: tp.Dict["Stage", OutputQueues],
output_queue: IterableQueue,
main_queue: IterableQueue,
) -> tp.Iterable[Worker]:
if self in built:
built[self].append(output_queue)
return
else:
built[self] = OutputQueues([output_queue])
input_queue = IterableQueue(
maxsize=self.maxsize, total_sources=self.total_sources
)
stage_params = StageParams.create(
input_queue=input_queue,
output_queues=built[self],
)
yield Worker(
process_fn=self.process_fn,
timeout=self.timeout,
stage_params=stage_params,
main_queue=main_queue,
on_start=self.on_start,
on_done=self.on_done,
f_args=self.f_args,
tasks=TaskPool.create(workers=self.workers, timeout=self.timeout),
)
for dependency in self.dependencies:
yield from dependency.build(built, input_queue, main_queue)
def to_iterable(self, maxsize: int, return_index: bool) -> tp.Iterable[T]:
# create a running event loop in case it doesn't exist
utils.get_running_loop()
main_queue: IterableQueue[pypeln_utils.Element] = IterableQueue(
maxsize=maxsize,
total_sources=1,
)
built = {}
workers: tp.List[Worker] = list(self.build(built, main_queue, main_queue))
supervisor = Supervisor(workers=workers, main_queue=main_queue)
with supervisor:
for elem in main_queue:
if return_index:
yield elem
else:
yield elem.value
async def to_async_iterable(
self, maxsize: int, return_index: bool
) -> tp.AsyncIterable[T]:
# build stages first to verify reuse
main_queue: IterableQueue[pypeln_utils.Element] = IterableQueue(
maxsize=maxsize,
total_sources=1,
)
built = {}
workers: tp.List[Worker] = list(self.build(built, main_queue, main_queue))
supervisor = Supervisor(workers=workers, main_queue=main_queue)
async with supervisor:
async for elem in main_queue:
if return_index:
yield elem
else:
yield elem.value
def __iter__(self):
return self.to_iterable(maxsize=self.maxsize, return_index=False)
def __aiter__(self):
return self.to_async_iterable(maxsize=self.maxsize, return_index=False)
async def _await(self):
return [x async for x in self]
def __await__(self) -> tp.Generator[tp.Any, None, tp.List[T]]:
return self._await().__await__()
| 29.983051
| 83
| 0.619842
|
250030e6eefea385d925851a5e35a0f9d8d9b471
| 891
|
py
|
Python
|
modules/Greetings.py
|
TheReaper62/Embeded
|
9cfa9422d33d8e5586888227a89d7bf758e680d1
|
[
"Apache-2.0"
] | 1
|
2021-09-21T10:25:07.000Z
|
2021-09-21T10:25:07.000Z
|
modules/Greetings.py
|
TheReaper62/Embeded
|
9cfa9422d33d8e5586888227a89d7bf758e680d1
|
[
"Apache-2.0"
] | null | null | null |
modules/Greetings.py
|
TheReaper62/Embeded
|
9cfa9422d33d8e5586888227a89d7bf758e680d1
|
[
"Apache-2.0"
] | null | null | null |
import discord
from discord.ext import commands
from discord.app import slash_command
class Greetings(commands.Cog):
def __init__(self, client):
self.client = client
self._last_member = None
@commands.Cog.listener()
async def on_member_join(self, member):
channel = member.guild.system_channel
if channel is not None:
await channel.send(f'Welcome {member.mention}.')
@slash_command()
async def hello(self, ctx, *, member: discord.Member = None):
"""Says hello"""
member = member or ctx.author
if self._last_member is None or self._last_member.id != member.id:
await ctx.respond(f'Hello {member.name}~')
else:
await ctx.respond(f'Hello {member.name}... This feels familiar.')
self._last_member = member
def setup(client):
client.add_cog(Greetings(client))
| 31.821429
| 77
| 0.652076
|
9a50963bf64b12aff4a81853feea3d71f2ce9a96
| 56,962
|
py
|
Python
|
sdk/python/pulumi_aws/rds/instance.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/rds/instance.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/rds/instance.py
|
JakeGinnivan/pulumi-aws
|
c91ef78932964ac74eda7f5da81f65b0f1798c93
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Instance(pulumi.CustomResource):
address: pulumi.Output[str]
"""
The hostname of the RDS instance. See also `endpoint` and `port`.
"""
allocated_storage: pulumi.Output[float]
"""
The allocated storage in gibibytes. If `max_allocated_storage` is configured, this argument represents the initial storage allocation and differences from the configuration will be ignored automatically when Storage Autoscaling occurs.
"""
allow_major_version_upgrade: pulumi.Output[bool]
"""
Indicates that major version
upgrades are allowed. Changing this parameter does not result in an outage and
the change is asynchronously applied as soon as possible.
"""
apply_immediately: pulumi.Output[bool]
"""
Specifies whether any database modifications
are applied immediately, or during the next maintenance window. Default is
`false`. See [Amazon RDS Documentation for more
information.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html)
"""
arn: pulumi.Output[str]
"""
The ARN of the RDS instance.
"""
auto_minor_version_upgrade: pulumi.Output[bool]
"""
Indicates that minor engine upgrades
will be applied automatically to the DB instance during the maintenance window.
Defaults to true.
"""
availability_zone: pulumi.Output[str]
"""
The AZ for the RDS instance.
"""
backup_retention_period: pulumi.Output[float]
"""
The days to retain backups for. Must be
between `0` and `35`. Must be greater than `0` if the database is used as a source for a Read Replica. [See Read Replica][1].
"""
backup_window: pulumi.Output[str]
"""
The daily time range (in UTC) during which
automated backups are created if they are enabled. Example: "09:46-10:16". Must
not overlap with `maintenance_window`.
"""
ca_cert_identifier: pulumi.Output[str]
"""
The identifier of the CA certificate for the DB instance.
"""
character_set_name: pulumi.Output[str]
"""
The character set name to use for DB
encoding in Oracle and Microsoft SQL instances (collation). This can't be changed. See [Oracle Character Sets
Supported in Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.OracleCharacterSets.html)
or [Server-Level Collation for Microsoft SQL Server](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.SQLServer.CommonDBATasks.Collation.html) for more information.
"""
copy_tags_to_snapshot: pulumi.Output[bool]
"""
Copy all Instance `tags` to snapshots. Default is `false`.
"""
db_subnet_group_name: pulumi.Output[str]
"""
Name of `DB subnet group`. DB instance will
be created in the VPC associated with the DB subnet group. If unspecified, will
be created in the `default` VPC, or in EC2 Classic, if available. When working
with read replicas, it should be specified only if the source database
specifies an instance in another AWS Region. See [DBSubnetGroupName in API
action CreateDBInstanceReadReplica](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstanceReadReplica.html)
for additional read replica contraints.
"""
delete_automated_backups: pulumi.Output[bool]
"""
Specifies whether to remove automated backups immediately after the DB instance is deleted. Default is `true`.
"""
deletion_protection: pulumi.Output[bool]
"""
If the DB instance should have deletion protection enabled. The database can't be deleted when this value is set to `true`. The default is `false`.
"""
domain: pulumi.Output[str]
"""
The ID of the Directory Service Active Directory domain to create the instance in.
"""
domain_iam_role_name: pulumi.Output[str]
"""
The name of the IAM role to be used when making API calls to the Directory Service.
"""
enabled_cloudwatch_logs_exports: pulumi.Output[list]
"""
List of log types to enable for exporting to CloudWatch logs. If omitted, no logs will be exported. Valid values (depending on `engine`). MySQL and MariaDB: `audit`, `error`, `general`, `slowquery`. PostgreSQL: `postgresql`, `upgrade`. MSSQL: `agent` , `error`. Oracle: `alert`, `audit`, `listener`, `trace`.
"""
endpoint: pulumi.Output[str]
"""
The connection endpoint in `address:port` format.
"""
engine: pulumi.Output[str]
"""
(Required unless a `snapshot_identifier` or `replicate_source_db`
is provided) The database engine to use. For supported values, see the Engine parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html).
Note that for Amazon Aurora instances the engine must match the `DB cluster`'s engine'.
For information on the difference between the available Aurora MySQL engines
see [Comparison between Aurora MySQL 1 and Aurora MySQL 2](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AuroraMySQL.Updates.20180206.html)
in the Amazon RDS User Guide.
"""
engine_version: pulumi.Output[str]
"""
The engine version to use. If `auto_minor_version_upgrade`
is enabled, you can provide a prefix of the version such as `5.7` (for `5.7.10`) and
this attribute will ignore differences in the patch version automatically (e.g. `5.7.17`).
For supported values, see the EngineVersion parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html).
Note that for Amazon Aurora instances the engine version must match the `DB cluster`'s engine version'.
"""
final_snapshot_identifier: pulumi.Output[str]
"""
The name of your final DB snapshot
when this DB instance is deleted. Must be provided if `skip_final_snapshot` is
set to `false`.
"""
hosted_zone_id: pulumi.Output[str]
"""
The canonical hosted zone ID of the DB instance (to be used
in a Route 53 Alias record).
"""
iam_database_authentication_enabled: pulumi.Output[bool]
"""
Specifies whether or
mappings of AWS Identity and Access Management (IAM) accounts to database
accounts is enabled.
"""
identifier: pulumi.Output[str]
"""
The name of the RDS instance,
if omitted, this provider will assign a random, unique identifier.
"""
identifier_prefix: pulumi.Output[str]
"""
Creates a unique
identifier beginning with the specified prefix. Conflicts with `identifier`.
"""
instance_class: pulumi.Output[str]
"""
The instance type of the RDS instance.
"""
iops: pulumi.Output[float]
"""
The amount of provisioned IOPS. Setting this implies a
storage_type of "io1".
"""
kms_key_id: pulumi.Output[str]
"""
The ARN for the KMS encryption key. If creating an
encrypted replica, set this to the destination KMS ARN.
"""
license_model: pulumi.Output[str]
"""
(Optional, but required for some DB engines, i.e. Oracle
SE1) License model information for this DB instance.
"""
maintenance_window: pulumi.Output[str]
"""
The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00". See [RDS
Maintenance Window
docs](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow)
for more information.
"""
max_allocated_storage: pulumi.Output[float]
"""
When configured, the upper limit to which Amazon RDS can automatically scale the storage of the DB instance. Configuring this will automatically ignore differences to `allocated_storage`. Must be greater than or equal to `allocated_storage` or `0` to disable Storage Autoscaling.
"""
monitoring_interval: pulumi.Output[float]
"""
The interval, in seconds, between points
when Enhanced Monitoring metrics are collected for the DB instance. To disable
collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid
Values: 0, 1, 5, 10, 15, 30, 60.
"""
monitoring_role_arn: pulumi.Output[str]
"""
The ARN for the IAM role that permits RDS
to send enhanced monitoring metrics to CloudWatch Logs. You can find more
information on the [AWS
Documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html)
what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances.
"""
multi_az: pulumi.Output[bool]
"""
Specifies if the RDS instance is multi-AZ
"""
name: pulumi.Output[str]
"""
The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance. Note that this does not apply for Oracle or SQL Server engines. See the [AWS documentation](http://docs.aws.amazon.com/cli/latest/reference/rds/create-db-instance.html) for more details on what applies for those engines.
"""
option_group_name: pulumi.Output[str]
"""
Name of the DB option group to associate.
"""
parameter_group_name: pulumi.Output[str]
"""
Name of the DB parameter group to
associate.
"""
password: pulumi.Output[str]
"""
(Required unless a `snapshot_identifier` or `replicate_source_db`
is provided) Password for the master DB user. Note that this may show up in
logs, and it will be stored in the state file.
"""
performance_insights_enabled: pulumi.Output[bool]
"""
Specifies whether Performance Insights are enabled. Defaults to false.
"""
performance_insights_kms_key_id: pulumi.Output[str]
"""
The ARN for the KMS key to encrypt Performance Insights data. When specifying `performance_insights_kms_key_id`, `performance_insights_enabled` needs to be set to true. Once KMS key is set, it can never be changed.
"""
performance_insights_retention_period: pulumi.Output[float]
"""
The amount of time in days to retain Performance Insights data. Either 7 (7 days) or 731 (2 years). When specifying `performance_insights_retention_period`, `performance_insights_enabled` needs to be set to true. Defaults to '7'.
"""
port: pulumi.Output[float]
"""
The port on which the DB accepts connections.
"""
publicly_accessible: pulumi.Output[bool]
"""
Bool to control if instance is publicly
accessible. Default is `false`.
"""
replicas: pulumi.Output[list]
replicate_source_db: pulumi.Output[str]
"""
Specifies that this resource is a Replicate
database, and to use this value as the source database. This correlates to the
`identifier` of another Amazon RDS Database to replicate (if replicating within
a single region) or ARN of the Amazon RDS Database to replicate (if replicating
cross-region). Note that if you are
creating a cross-region replica of an encrypted database you will also need to
specify a `kms_key_id`. See [DB Instance Replication][1] and [Working with
PostgreSQL and MySQL Read Replicas](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html)
for more information on using Replication.
"""
resource_id: pulumi.Output[str]
"""
The RDS Resource ID of this instance.
"""
s3_import: pulumi.Output[dict]
"""
Restore from a Percona Xtrabackup in S3. See [Importing Data into an Amazon RDS MySQL DB Instance](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/MySQL.Procedural.Importing.html)
* `bucket_name` (`str`) - The bucket name where your backup is stored
* `bucket_prefix` (`str`) - Can be blank, but is the path to your backup
* `ingestionRole` (`str`) - Role applied to load the data.
* `sourceEngine` (`str`) - Source engine for the backup
* `sourceEngineVersion` (`str`) - Version of the source engine used to make the backup
"""
security_group_names: pulumi.Output[list]
"""
List of DB Security Groups to
associate. Only used for [DB Instances on the _EC2-Classic_
Platform](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html#USER_VPC.FindDefaultVPC).
"""
skip_final_snapshot: pulumi.Output[bool]
"""
Determines whether a final DB snapshot is
created before the DB instance is deleted. If true is specified, no DBSnapshot
is created. If false is specified, a DB snapshot is created before the DB
instance is deleted, using the value from `final_snapshot_identifier`. Default
is `false`.
"""
snapshot_identifier: pulumi.Output[str]
"""
Specifies whether or not to create this
database from a snapshot. This correlates to the snapshot ID you'd find in the
RDS console, e.g: rds:production-2015-06-26-06-05.
"""
status: pulumi.Output[str]
"""
The RDS instance status.
"""
storage_encrypted: pulumi.Output[bool]
"""
Specifies whether the DB instance is
encrypted. Note that if you are creating a cross-region read replica this field
is ignored and you should instead declare `kms_key_id` with a valid ARN. The
default is `false` if not specified.
"""
storage_type: pulumi.Output[str]
"""
One of "standard" (magnetic), "gp2" (general
purpose SSD), or "io1" (provisioned IOPS SSD). The default is "io1" if `iops` is
specified, "gp2" if not.
"""
tags: pulumi.Output[dict]
"""
A map of tags to assign to the resource.
"""
timezone: pulumi.Output[str]
"""
Time zone of the DB instance. `timezone` is currently
only supported by Microsoft SQL Server. The `timezone` can only be set on
creation. See [MSSQL User
Guide](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone)
for more information.
"""
username: pulumi.Output[str]
"""
(Required unless a `snapshot_identifier` or `replicate_source_db`
is provided) Username for the master DB user.
"""
vpc_security_group_ids: pulumi.Output[list]
"""
List of VPC security groups to
associate.
"""
def __init__(__self__, resource_name, opts=None, allocated_storage=None, allow_major_version_upgrade=None, apply_immediately=None, auto_minor_version_upgrade=None, availability_zone=None, backup_retention_period=None, backup_window=None, ca_cert_identifier=None, character_set_name=None, copy_tags_to_snapshot=None, db_subnet_group_name=None, delete_automated_backups=None, deletion_protection=None, domain=None, domain_iam_role_name=None, enabled_cloudwatch_logs_exports=None, engine=None, engine_version=None, final_snapshot_identifier=None, iam_database_authentication_enabled=None, identifier=None, identifier_prefix=None, instance_class=None, iops=None, kms_key_id=None, license_model=None, maintenance_window=None, max_allocated_storage=None, monitoring_interval=None, monitoring_role_arn=None, multi_az=None, name=None, option_group_name=None, parameter_group_name=None, password=None, performance_insights_enabled=None, performance_insights_kms_key_id=None, performance_insights_retention_period=None, port=None, publicly_accessible=None, replicate_source_db=None, s3_import=None, security_group_names=None, skip_final_snapshot=None, snapshot_identifier=None, storage_encrypted=None, storage_type=None, tags=None, timezone=None, username=None, vpc_security_group_ids=None, __props__=None, __name__=None, __opts__=None):
"""
Provides an RDS instance resource. A DB instance is an isolated database
environment in the cloud. A DB instance can contain multiple user-created
databases.
Changes to a DB instance can occur when you manually change a parameter, such as
`allocated_storage`, and are reflected in the next maintenance window. Because
of this, this provider may report a difference in its planning phase because a
modification has not yet taken place. You can use the `apply_immediately` flag
to instruct the service to apply the change immediately (see documentation
below).
When upgrading the major version of an engine, `allow_major_version_upgrade`
must be set to `true`.
> **Note:** using `apply_immediately` can result in a brief downtime as the
server reboots. See the AWS Docs on [RDS Maintenance][2] for more information.
> **Note:** All arguments including the username and password will be stored in
the raw state as plain-text.
## RDS Instance Class Types
Amazon RDS supports three types of instance classes: Standard, Memory Optimized,
and Burstable Performance. For more information please read the AWS RDS documentation
about [DB Instance Class Types](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html)
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
default = aws.rds.Instance("default",
allocated_storage=20,
engine="mysql",
engine_version="5.7",
instance_class="db.t2.micro",
name="mydb",
parameter_group_name="default.mysql5.7",
password="foobarbaz",
storage_type="gp2",
username="foo")
```
### Storage Autoscaling
```python
import pulumi
import pulumi_aws as aws
example = aws.rds.Instance("example",
allocated_storage=50,
max_allocated_storage=100)
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[float] allocated_storage: The allocated storage in gibibytes. If `max_allocated_storage` is configured, this argument represents the initial storage allocation and differences from the configuration will be ignored automatically when Storage Autoscaling occurs.
:param pulumi.Input[bool] allow_major_version_upgrade: Indicates that major version
upgrades are allowed. Changing this parameter does not result in an outage and
the change is asynchronously applied as soon as possible.
:param pulumi.Input[bool] apply_immediately: Specifies whether any database modifications
are applied immediately, or during the next maintenance window. Default is
`false`. See [Amazon RDS Documentation for more
information.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html)
:param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades
will be applied automatically to the DB instance during the maintenance window.
Defaults to true.
:param pulumi.Input[str] availability_zone: The AZ for the RDS instance.
:param pulumi.Input[float] backup_retention_period: The days to retain backups for. Must be
between `0` and `35`. Must be greater than `0` if the database is used as a source for a Read Replica. [See Read Replica][1].
:param pulumi.Input[str] backup_window: The daily time range (in UTC) during which
automated backups are created if they are enabled. Example: "09:46-10:16". Must
not overlap with `maintenance_window`.
:param pulumi.Input[str] ca_cert_identifier: The identifier of the CA certificate for the DB instance.
:param pulumi.Input[str] character_set_name: The character set name to use for DB
encoding in Oracle and Microsoft SQL instances (collation). This can't be changed. See [Oracle Character Sets
Supported in Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.OracleCharacterSets.html)
or [Server-Level Collation for Microsoft SQL Server](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.SQLServer.CommonDBATasks.Collation.html) for more information.
:param pulumi.Input[bool] copy_tags_to_snapshot: Copy all Instance `tags` to snapshots. Default is `false`.
:param pulumi.Input[str] db_subnet_group_name: Name of `DB subnet group`. DB instance will
be created in the VPC associated with the DB subnet group. If unspecified, will
be created in the `default` VPC, or in EC2 Classic, if available. When working
with read replicas, it should be specified only if the source database
specifies an instance in another AWS Region. See [DBSubnetGroupName in API
action CreateDBInstanceReadReplica](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstanceReadReplica.html)
for additional read replica contraints.
:param pulumi.Input[bool] delete_automated_backups: Specifies whether to remove automated backups immediately after the DB instance is deleted. Default is `true`.
:param pulumi.Input[bool] deletion_protection: If the DB instance should have deletion protection enabled. The database can't be deleted when this value is set to `true`. The default is `false`.
:param pulumi.Input[str] domain: The ID of the Directory Service Active Directory domain to create the instance in.
:param pulumi.Input[str] domain_iam_role_name: The name of the IAM role to be used when making API calls to the Directory Service.
:param pulumi.Input[list] enabled_cloudwatch_logs_exports: List of log types to enable for exporting to CloudWatch logs. If omitted, no logs will be exported. Valid values (depending on `engine`). MySQL and MariaDB: `audit`, `error`, `general`, `slowquery`. PostgreSQL: `postgresql`, `upgrade`. MSSQL: `agent` , `error`. Oracle: `alert`, `audit`, `listener`, `trace`.
:param pulumi.Input[str] engine: (Required unless a `snapshot_identifier` or `replicate_source_db`
is provided) The database engine to use. For supported values, see the Engine parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html).
Note that for Amazon Aurora instances the engine must match the `DB cluster`'s engine'.
For information on the difference between the available Aurora MySQL engines
see [Comparison between Aurora MySQL 1 and Aurora MySQL 2](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AuroraMySQL.Updates.20180206.html)
in the Amazon RDS User Guide.
:param pulumi.Input[str] engine_version: The engine version to use. If `auto_minor_version_upgrade`
is enabled, you can provide a prefix of the version such as `5.7` (for `5.7.10`) and
this attribute will ignore differences in the patch version automatically (e.g. `5.7.17`).
For supported values, see the EngineVersion parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html).
Note that for Amazon Aurora instances the engine version must match the `DB cluster`'s engine version'.
:param pulumi.Input[str] final_snapshot_identifier: The name of your final DB snapshot
when this DB instance is deleted. Must be provided if `skip_final_snapshot` is
set to `false`.
:param pulumi.Input[bool] iam_database_authentication_enabled: Specifies whether or
mappings of AWS Identity and Access Management (IAM) accounts to database
accounts is enabled.
:param pulumi.Input[str] identifier: The name of the RDS instance,
if omitted, this provider will assign a random, unique identifier.
:param pulumi.Input[str] identifier_prefix: Creates a unique
identifier beginning with the specified prefix. Conflicts with `identifier`.
:param pulumi.Input[dict] instance_class: The instance type of the RDS instance.
:param pulumi.Input[float] iops: The amount of provisioned IOPS. Setting this implies a
storage_type of "io1".
:param pulumi.Input[str] kms_key_id: The ARN for the KMS encryption key. If creating an
encrypted replica, set this to the destination KMS ARN.
:param pulumi.Input[str] license_model: (Optional, but required for some DB engines, i.e. Oracle
SE1) License model information for this DB instance.
:param pulumi.Input[str] maintenance_window: The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00". See [RDS
Maintenance Window
docs](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow)
for more information.
:param pulumi.Input[float] max_allocated_storage: When configured, the upper limit to which Amazon RDS can automatically scale the storage of the DB instance. Configuring this will automatically ignore differences to `allocated_storage`. Must be greater than or equal to `allocated_storage` or `0` to disable Storage Autoscaling.
:param pulumi.Input[float] monitoring_interval: The interval, in seconds, between points
when Enhanced Monitoring metrics are collected for the DB instance. To disable
collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid
Values: 0, 1, 5, 10, 15, 30, 60.
:param pulumi.Input[str] monitoring_role_arn: The ARN for the IAM role that permits RDS
to send enhanced monitoring metrics to CloudWatch Logs. You can find more
information on the [AWS
Documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html)
what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances.
:param pulumi.Input[bool] multi_az: Specifies if the RDS instance is multi-AZ
:param pulumi.Input[str] name: The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance. Note that this does not apply for Oracle or SQL Server engines. See the [AWS documentation](http://docs.aws.amazon.com/cli/latest/reference/rds/create-db-instance.html) for more details on what applies for those engines.
:param pulumi.Input[str] option_group_name: Name of the DB option group to associate.
:param pulumi.Input[str] parameter_group_name: Name of the DB parameter group to
associate.
:param pulumi.Input[str] password: (Required unless a `snapshot_identifier` or `replicate_source_db`
is provided) Password for the master DB user. Note that this may show up in
logs, and it will be stored in the state file.
:param pulumi.Input[bool] performance_insights_enabled: Specifies whether Performance Insights are enabled. Defaults to false.
:param pulumi.Input[str] performance_insights_kms_key_id: The ARN for the KMS key to encrypt Performance Insights data. When specifying `performance_insights_kms_key_id`, `performance_insights_enabled` needs to be set to true. Once KMS key is set, it can never be changed.
:param pulumi.Input[float] performance_insights_retention_period: The amount of time in days to retain Performance Insights data. Either 7 (7 days) or 731 (2 years). When specifying `performance_insights_retention_period`, `performance_insights_enabled` needs to be set to true. Defaults to '7'.
:param pulumi.Input[float] port: The port on which the DB accepts connections.
:param pulumi.Input[bool] publicly_accessible: Bool to control if instance is publicly
accessible. Default is `false`.
:param pulumi.Input[str] replicate_source_db: Specifies that this resource is a Replicate
database, and to use this value as the source database. This correlates to the
`identifier` of another Amazon RDS Database to replicate (if replicating within
a single region) or ARN of the Amazon RDS Database to replicate (if replicating
cross-region). Note that if you are
creating a cross-region replica of an encrypted database you will also need to
specify a `kms_key_id`. See [DB Instance Replication][1] and [Working with
PostgreSQL and MySQL Read Replicas](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html)
for more information on using Replication.
:param pulumi.Input[dict] s3_import: Restore from a Percona Xtrabackup in S3. See [Importing Data into an Amazon RDS MySQL DB Instance](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/MySQL.Procedural.Importing.html)
:param pulumi.Input[list] security_group_names: List of DB Security Groups to
associate. Only used for [DB Instances on the _EC2-Classic_
Platform](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html#USER_VPC.FindDefaultVPC).
:param pulumi.Input[bool] skip_final_snapshot: Determines whether a final DB snapshot is
created before the DB instance is deleted. If true is specified, no DBSnapshot
is created. If false is specified, a DB snapshot is created before the DB
instance is deleted, using the value from `final_snapshot_identifier`. Default
is `false`.
:param pulumi.Input[str] snapshot_identifier: Specifies whether or not to create this
database from a snapshot. This correlates to the snapshot ID you'd find in the
RDS console, e.g: rds:production-2015-06-26-06-05.
:param pulumi.Input[bool] storage_encrypted: Specifies whether the DB instance is
encrypted. Note that if you are creating a cross-region read replica this field
is ignored and you should instead declare `kms_key_id` with a valid ARN. The
default is `false` if not specified.
:param pulumi.Input[dict] storage_type: One of "standard" (magnetic), "gp2" (general
purpose SSD), or "io1" (provisioned IOPS SSD). The default is "io1" if `iops` is
specified, "gp2" if not.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] timezone: Time zone of the DB instance. `timezone` is currently
only supported by Microsoft SQL Server. The `timezone` can only be set on
creation. See [MSSQL User
Guide](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone)
for more information.
:param pulumi.Input[str] username: (Required unless a `snapshot_identifier` or `replicate_source_db`
is provided) Username for the master DB user.
:param pulumi.Input[list] vpc_security_group_ids: List of VPC security groups to
associate.
The **s3_import** object supports the following:
* `bucket_name` (`pulumi.Input[str]`) - The bucket name where your backup is stored
* `bucket_prefix` (`pulumi.Input[str]`) - Can be blank, but is the path to your backup
* `ingestionRole` (`pulumi.Input[str]`) - Role applied to load the data.
* `sourceEngine` (`pulumi.Input[str]`) - Source engine for the backup
* `sourceEngineVersion` (`pulumi.Input[str]`) - Version of the source engine used to make the backup
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['allocated_storage'] = allocated_storage
__props__['allow_major_version_upgrade'] = allow_major_version_upgrade
__props__['apply_immediately'] = apply_immediately
__props__['auto_minor_version_upgrade'] = auto_minor_version_upgrade
__props__['availability_zone'] = availability_zone
__props__['backup_retention_period'] = backup_retention_period
__props__['backup_window'] = backup_window
__props__['ca_cert_identifier'] = ca_cert_identifier
__props__['character_set_name'] = character_set_name
__props__['copy_tags_to_snapshot'] = copy_tags_to_snapshot
__props__['db_subnet_group_name'] = db_subnet_group_name
__props__['delete_automated_backups'] = delete_automated_backups
__props__['deletion_protection'] = deletion_protection
__props__['domain'] = domain
__props__['domain_iam_role_name'] = domain_iam_role_name
__props__['enabled_cloudwatch_logs_exports'] = enabled_cloudwatch_logs_exports
__props__['engine'] = engine
__props__['engine_version'] = engine_version
__props__['final_snapshot_identifier'] = final_snapshot_identifier
__props__['iam_database_authentication_enabled'] = iam_database_authentication_enabled
__props__['identifier'] = identifier
__props__['identifier_prefix'] = identifier_prefix
if instance_class is None:
raise TypeError("Missing required property 'instance_class'")
__props__['instance_class'] = instance_class
__props__['iops'] = iops
__props__['kms_key_id'] = kms_key_id
__props__['license_model'] = license_model
__props__['maintenance_window'] = maintenance_window
__props__['max_allocated_storage'] = max_allocated_storage
__props__['monitoring_interval'] = monitoring_interval
__props__['monitoring_role_arn'] = monitoring_role_arn
__props__['multi_az'] = multi_az
__props__['name'] = name
__props__['option_group_name'] = option_group_name
__props__['parameter_group_name'] = parameter_group_name
__props__['password'] = password
__props__['performance_insights_enabled'] = performance_insights_enabled
__props__['performance_insights_kms_key_id'] = performance_insights_kms_key_id
__props__['performance_insights_retention_period'] = performance_insights_retention_period
__props__['port'] = port
__props__['publicly_accessible'] = publicly_accessible
__props__['replicate_source_db'] = replicate_source_db
__props__['s3_import'] = s3_import
__props__['security_group_names'] = security_group_names
__props__['skip_final_snapshot'] = skip_final_snapshot
__props__['snapshot_identifier'] = snapshot_identifier
__props__['storage_encrypted'] = storage_encrypted
__props__['storage_type'] = storage_type
__props__['tags'] = tags
__props__['timezone'] = timezone
__props__['username'] = username
__props__['vpc_security_group_ids'] = vpc_security_group_ids
__props__['address'] = None
__props__['arn'] = None
__props__['endpoint'] = None
__props__['hosted_zone_id'] = None
__props__['replicas'] = None
__props__['resource_id'] = None
__props__['status'] = None
super(Instance, __self__).__init__(
'aws:rds/instance:Instance',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, address=None, allocated_storage=None, allow_major_version_upgrade=None, apply_immediately=None, arn=None, auto_minor_version_upgrade=None, availability_zone=None, backup_retention_period=None, backup_window=None, ca_cert_identifier=None, character_set_name=None, copy_tags_to_snapshot=None, db_subnet_group_name=None, delete_automated_backups=None, deletion_protection=None, domain=None, domain_iam_role_name=None, enabled_cloudwatch_logs_exports=None, endpoint=None, engine=None, engine_version=None, final_snapshot_identifier=None, hosted_zone_id=None, iam_database_authentication_enabled=None, identifier=None, identifier_prefix=None, instance_class=None, iops=None, kms_key_id=None, license_model=None, maintenance_window=None, max_allocated_storage=None, monitoring_interval=None, monitoring_role_arn=None, multi_az=None, name=None, option_group_name=None, parameter_group_name=None, password=None, performance_insights_enabled=None, performance_insights_kms_key_id=None, performance_insights_retention_period=None, port=None, publicly_accessible=None, replicas=None, replicate_source_db=None, resource_id=None, s3_import=None, security_group_names=None, skip_final_snapshot=None, snapshot_identifier=None, status=None, storage_encrypted=None, storage_type=None, tags=None, timezone=None, username=None, vpc_security_group_ids=None):
"""
Get an existing Instance resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address: The hostname of the RDS instance. See also `endpoint` and `port`.
:param pulumi.Input[float] allocated_storage: The allocated storage in gibibytes. If `max_allocated_storage` is configured, this argument represents the initial storage allocation and differences from the configuration will be ignored automatically when Storage Autoscaling occurs.
:param pulumi.Input[bool] allow_major_version_upgrade: Indicates that major version
upgrades are allowed. Changing this parameter does not result in an outage and
the change is asynchronously applied as soon as possible.
:param pulumi.Input[bool] apply_immediately: Specifies whether any database modifications
are applied immediately, or during the next maintenance window. Default is
`false`. See [Amazon RDS Documentation for more
information.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html)
:param pulumi.Input[str] arn: The ARN of the RDS instance.
:param pulumi.Input[bool] auto_minor_version_upgrade: Indicates that minor engine upgrades
will be applied automatically to the DB instance during the maintenance window.
Defaults to true.
:param pulumi.Input[str] availability_zone: The AZ for the RDS instance.
:param pulumi.Input[float] backup_retention_period: The days to retain backups for. Must be
between `0` and `35`. Must be greater than `0` if the database is used as a source for a Read Replica. [See Read Replica][1].
:param pulumi.Input[str] backup_window: The daily time range (in UTC) during which
automated backups are created if they are enabled. Example: "09:46-10:16". Must
not overlap with `maintenance_window`.
:param pulumi.Input[str] ca_cert_identifier: The identifier of the CA certificate for the DB instance.
:param pulumi.Input[str] character_set_name: The character set name to use for DB
encoding in Oracle and Microsoft SQL instances (collation). This can't be changed. See [Oracle Character Sets
Supported in Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.OracleCharacterSets.html)
or [Server-Level Collation for Microsoft SQL Server](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.SQLServer.CommonDBATasks.Collation.html) for more information.
:param pulumi.Input[bool] copy_tags_to_snapshot: Copy all Instance `tags` to snapshots. Default is `false`.
:param pulumi.Input[str] db_subnet_group_name: Name of `DB subnet group`. DB instance will
be created in the VPC associated with the DB subnet group. If unspecified, will
be created in the `default` VPC, or in EC2 Classic, if available. When working
with read replicas, it should be specified only if the source database
specifies an instance in another AWS Region. See [DBSubnetGroupName in API
action CreateDBInstanceReadReplica](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstanceReadReplica.html)
for additional read replica contraints.
:param pulumi.Input[bool] delete_automated_backups: Specifies whether to remove automated backups immediately after the DB instance is deleted. Default is `true`.
:param pulumi.Input[bool] deletion_protection: If the DB instance should have deletion protection enabled. The database can't be deleted when this value is set to `true`. The default is `false`.
:param pulumi.Input[str] domain: The ID of the Directory Service Active Directory domain to create the instance in.
:param pulumi.Input[str] domain_iam_role_name: The name of the IAM role to be used when making API calls to the Directory Service.
:param pulumi.Input[list] enabled_cloudwatch_logs_exports: List of log types to enable for exporting to CloudWatch logs. If omitted, no logs will be exported. Valid values (depending on `engine`). MySQL and MariaDB: `audit`, `error`, `general`, `slowquery`. PostgreSQL: `postgresql`, `upgrade`. MSSQL: `agent` , `error`. Oracle: `alert`, `audit`, `listener`, `trace`.
:param pulumi.Input[str] endpoint: The connection endpoint in `address:port` format.
:param pulumi.Input[str] engine: (Required unless a `snapshot_identifier` or `replicate_source_db`
is provided) The database engine to use. For supported values, see the Engine parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html).
Note that for Amazon Aurora instances the engine must match the `DB cluster`'s engine'.
For information on the difference between the available Aurora MySQL engines
see [Comparison between Aurora MySQL 1 and Aurora MySQL 2](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AuroraMySQL.Updates.20180206.html)
in the Amazon RDS User Guide.
:param pulumi.Input[str] engine_version: The engine version to use. If `auto_minor_version_upgrade`
is enabled, you can provide a prefix of the version such as `5.7` (for `5.7.10`) and
this attribute will ignore differences in the patch version automatically (e.g. `5.7.17`).
For supported values, see the EngineVersion parameter in [API action CreateDBInstance](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html).
Note that for Amazon Aurora instances the engine version must match the `DB cluster`'s engine version'.
:param pulumi.Input[str] final_snapshot_identifier: The name of your final DB snapshot
when this DB instance is deleted. Must be provided if `skip_final_snapshot` is
set to `false`.
:param pulumi.Input[str] hosted_zone_id: The canonical hosted zone ID of the DB instance (to be used
in a Route 53 Alias record).
:param pulumi.Input[bool] iam_database_authentication_enabled: Specifies whether or
mappings of AWS Identity and Access Management (IAM) accounts to database
accounts is enabled.
:param pulumi.Input[str] identifier: The name of the RDS instance,
if omitted, this provider will assign a random, unique identifier.
:param pulumi.Input[str] identifier_prefix: Creates a unique
identifier beginning with the specified prefix. Conflicts with `identifier`.
:param pulumi.Input[dict] instance_class: The instance type of the RDS instance.
:param pulumi.Input[float] iops: The amount of provisioned IOPS. Setting this implies a
storage_type of "io1".
:param pulumi.Input[str] kms_key_id: The ARN for the KMS encryption key. If creating an
encrypted replica, set this to the destination KMS ARN.
:param pulumi.Input[str] license_model: (Optional, but required for some DB engines, i.e. Oracle
SE1) License model information for this DB instance.
:param pulumi.Input[str] maintenance_window: The window to perform maintenance in.
Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00". See [RDS
Maintenance Window
docs](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow)
for more information.
:param pulumi.Input[float] max_allocated_storage: When configured, the upper limit to which Amazon RDS can automatically scale the storage of the DB instance. Configuring this will automatically ignore differences to `allocated_storage`. Must be greater than or equal to `allocated_storage` or `0` to disable Storage Autoscaling.
:param pulumi.Input[float] monitoring_interval: The interval, in seconds, between points
when Enhanced Monitoring metrics are collected for the DB instance. To disable
collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid
Values: 0, 1, 5, 10, 15, 30, 60.
:param pulumi.Input[str] monitoring_role_arn: The ARN for the IAM role that permits RDS
to send enhanced monitoring metrics to CloudWatch Logs. You can find more
information on the [AWS
Documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html)
what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances.
:param pulumi.Input[bool] multi_az: Specifies if the RDS instance is multi-AZ
:param pulumi.Input[str] name: The name of the database to create when the DB instance is created. If this parameter is not specified, no database is created in the DB instance. Note that this does not apply for Oracle or SQL Server engines. See the [AWS documentation](http://docs.aws.amazon.com/cli/latest/reference/rds/create-db-instance.html) for more details on what applies for those engines.
:param pulumi.Input[str] option_group_name: Name of the DB option group to associate.
:param pulumi.Input[str] parameter_group_name: Name of the DB parameter group to
associate.
:param pulumi.Input[str] password: (Required unless a `snapshot_identifier` or `replicate_source_db`
is provided) Password for the master DB user. Note that this may show up in
logs, and it will be stored in the state file.
:param pulumi.Input[bool] performance_insights_enabled: Specifies whether Performance Insights are enabled. Defaults to false.
:param pulumi.Input[str] performance_insights_kms_key_id: The ARN for the KMS key to encrypt Performance Insights data. When specifying `performance_insights_kms_key_id`, `performance_insights_enabled` needs to be set to true. Once KMS key is set, it can never be changed.
:param pulumi.Input[float] performance_insights_retention_period: The amount of time in days to retain Performance Insights data. Either 7 (7 days) or 731 (2 years). When specifying `performance_insights_retention_period`, `performance_insights_enabled` needs to be set to true. Defaults to '7'.
:param pulumi.Input[float] port: The port on which the DB accepts connections.
:param pulumi.Input[bool] publicly_accessible: Bool to control if instance is publicly
accessible. Default is `false`.
:param pulumi.Input[str] replicate_source_db: Specifies that this resource is a Replicate
database, and to use this value as the source database. This correlates to the
`identifier` of another Amazon RDS Database to replicate (if replicating within
a single region) or ARN of the Amazon RDS Database to replicate (if replicating
cross-region). Note that if you are
creating a cross-region replica of an encrypted database you will also need to
specify a `kms_key_id`. See [DB Instance Replication][1] and [Working with
PostgreSQL and MySQL Read Replicas](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html)
for more information on using Replication.
:param pulumi.Input[str] resource_id: The RDS Resource ID of this instance.
:param pulumi.Input[dict] s3_import: Restore from a Percona Xtrabackup in S3. See [Importing Data into an Amazon RDS MySQL DB Instance](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/MySQL.Procedural.Importing.html)
:param pulumi.Input[list] security_group_names: List of DB Security Groups to
associate. Only used for [DB Instances on the _EC2-Classic_
Platform](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html#USER_VPC.FindDefaultVPC).
:param pulumi.Input[bool] skip_final_snapshot: Determines whether a final DB snapshot is
created before the DB instance is deleted. If true is specified, no DBSnapshot
is created. If false is specified, a DB snapshot is created before the DB
instance is deleted, using the value from `final_snapshot_identifier`. Default
is `false`.
:param pulumi.Input[str] snapshot_identifier: Specifies whether or not to create this
database from a snapshot. This correlates to the snapshot ID you'd find in the
RDS console, e.g: rds:production-2015-06-26-06-05.
:param pulumi.Input[str] status: The RDS instance status.
:param pulumi.Input[bool] storage_encrypted: Specifies whether the DB instance is
encrypted. Note that if you are creating a cross-region read replica this field
is ignored and you should instead declare `kms_key_id` with a valid ARN. The
default is `false` if not specified.
:param pulumi.Input[dict] storage_type: One of "standard" (magnetic), "gp2" (general
purpose SSD), or "io1" (provisioned IOPS SSD). The default is "io1" if `iops` is
specified, "gp2" if not.
:param pulumi.Input[dict] tags: A map of tags to assign to the resource.
:param pulumi.Input[str] timezone: Time zone of the DB instance. `timezone` is currently
only supported by Microsoft SQL Server. The `timezone` can only be set on
creation. See [MSSQL User
Guide](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone)
for more information.
:param pulumi.Input[str] username: (Required unless a `snapshot_identifier` or `replicate_source_db`
is provided) Username for the master DB user.
:param pulumi.Input[list] vpc_security_group_ids: List of VPC security groups to
associate.
The **s3_import** object supports the following:
* `bucket_name` (`pulumi.Input[str]`) - The bucket name where your backup is stored
* `bucket_prefix` (`pulumi.Input[str]`) - Can be blank, but is the path to your backup
* `ingestionRole` (`pulumi.Input[str]`) - Role applied to load the data.
* `sourceEngine` (`pulumi.Input[str]`) - Source engine for the backup
* `sourceEngineVersion` (`pulumi.Input[str]`) - Version of the source engine used to make the backup
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["address"] = address
__props__["allocated_storage"] = allocated_storage
__props__["allow_major_version_upgrade"] = allow_major_version_upgrade
__props__["apply_immediately"] = apply_immediately
__props__["arn"] = arn
__props__["auto_minor_version_upgrade"] = auto_minor_version_upgrade
__props__["availability_zone"] = availability_zone
__props__["backup_retention_period"] = backup_retention_period
__props__["backup_window"] = backup_window
__props__["ca_cert_identifier"] = ca_cert_identifier
__props__["character_set_name"] = character_set_name
__props__["copy_tags_to_snapshot"] = copy_tags_to_snapshot
__props__["db_subnet_group_name"] = db_subnet_group_name
__props__["delete_automated_backups"] = delete_automated_backups
__props__["deletion_protection"] = deletion_protection
__props__["domain"] = domain
__props__["domain_iam_role_name"] = domain_iam_role_name
__props__["enabled_cloudwatch_logs_exports"] = enabled_cloudwatch_logs_exports
__props__["endpoint"] = endpoint
__props__["engine"] = engine
__props__["engine_version"] = engine_version
__props__["final_snapshot_identifier"] = final_snapshot_identifier
__props__["hosted_zone_id"] = hosted_zone_id
__props__["iam_database_authentication_enabled"] = iam_database_authentication_enabled
__props__["identifier"] = identifier
__props__["identifier_prefix"] = identifier_prefix
__props__["instance_class"] = instance_class
__props__["iops"] = iops
__props__["kms_key_id"] = kms_key_id
__props__["license_model"] = license_model
__props__["maintenance_window"] = maintenance_window
__props__["max_allocated_storage"] = max_allocated_storage
__props__["monitoring_interval"] = monitoring_interval
__props__["monitoring_role_arn"] = monitoring_role_arn
__props__["multi_az"] = multi_az
__props__["name"] = name
__props__["option_group_name"] = option_group_name
__props__["parameter_group_name"] = parameter_group_name
__props__["password"] = password
__props__["performance_insights_enabled"] = performance_insights_enabled
__props__["performance_insights_kms_key_id"] = performance_insights_kms_key_id
__props__["performance_insights_retention_period"] = performance_insights_retention_period
__props__["port"] = port
__props__["publicly_accessible"] = publicly_accessible
__props__["replicas"] = replicas
__props__["replicate_source_db"] = replicate_source_db
__props__["resource_id"] = resource_id
__props__["s3_import"] = s3_import
__props__["security_group_names"] = security_group_names
__props__["skip_final_snapshot"] = skip_final_snapshot
__props__["snapshot_identifier"] = snapshot_identifier
__props__["status"] = status
__props__["storage_encrypted"] = storage_encrypted
__props__["storage_type"] = storage_type
__props__["tags"] = tags
__props__["timezone"] = timezone
__props__["username"] = username
__props__["vpc_security_group_ids"] = vpc_security_group_ids
return Instance(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 68.217964
| 1,380
| 0.708771
|
6ca3f27854ff5aba9aa7dcdc3ba1c65b78e62959
| 9,880
|
py
|
Python
|
botenv/lib/python3.9/site-packages/deprecated/classic.py
|
0xtuytuy/unit-crypto-ski-week-poap-bot
|
9bab0a6013a29db9ce76311d4f6fa1d0922ac5c1
|
[
"MIT"
] | 169
|
2017-12-05T15:22:20.000Z
|
2022-03-08T03:24:56.000Z
|
botenv/lib/python3.9/site-packages/deprecated/classic.py
|
0xtuytuy/unit-crypto-ski-week-poap-bot
|
9bab0a6013a29db9ce76311d4f6fa1d0922ac5c1
|
[
"MIT"
] | 48
|
2018-06-21T22:39:37.000Z
|
2022-01-07T17:57:59.000Z
|
botenv/lib/python3.9/site-packages/deprecated/classic.py
|
0xtuytuy/unit-crypto-ski-week-poap-bot
|
9bab0a6013a29db9ce76311d4f6fa1d0922ac5c1
|
[
"MIT"
] | 23
|
2018-06-21T22:36:48.000Z
|
2021-12-22T19:31:18.000Z
|
# -*- coding: utf-8 -*-
"""
Classic deprecation warning
===========================
Classic ``@deprecated`` decorator to deprecate old python classes, functions or methods.
.. _The Warnings Filter: https://docs.python.org/3/library/warnings.html#the-warnings-filter
"""
import functools
import inspect
import platform
import warnings
import wrapt
try:
# If the C extension for wrapt was compiled and wrapt/_wrappers.pyd exists, then the
# stack level that should be passed to warnings.warn should be 2. However, if using
# a pure python wrapt, a extra stacklevel is required.
import wrapt._wrappers
_routine_stacklevel = 2
_class_stacklevel = 2
except ImportError:
_routine_stacklevel = 3
if platform.python_implementation() == "PyPy":
_class_stacklevel = 2
else:
_class_stacklevel = 3
string_types = (type(b''), type(u''))
class ClassicAdapter(wrapt.AdapterFactory):
"""
Classic adapter -- *for advanced usage only*
This adapter is used to get the deprecation message according to the wrapped object type:
class, function, standard method, static method, or class method.
This is the base class of the :class:`~deprecated.sphinx.SphinxAdapter` class
which is used to update the wrapped object docstring.
You can also inherit this class to change the deprecation message.
In the following example, we change the message into "The ... is deprecated.":
.. code-block:: python
import inspect
from deprecated.classic import ClassicAdapter
from deprecated.classic import deprecated
class MyClassicAdapter(ClassicAdapter):
def get_deprecated_msg(self, wrapped, instance):
if instance is None:
if inspect.isclass(wrapped):
fmt = "The class {name} is deprecated."
else:
fmt = "The function {name} is deprecated."
else:
if inspect.isclass(instance):
fmt = "The class method {name} is deprecated."
else:
fmt = "The method {name} is deprecated."
if self.reason:
fmt += " ({reason})"
if self.version:
fmt += " -- Deprecated since version {version}."
return fmt.format(name=wrapped.__name__,
reason=self.reason or "",
version=self.version or "")
Then, you can use your ``MyClassicAdapter`` class like this in your source code:
.. code-block:: python
@deprecated(reason="use another function", adapter_cls=MyClassicAdapter)
def some_old_function(x, y):
return x + y
"""
def __init__(self, reason="", version="", action=None, category=DeprecationWarning):
"""
Construct a wrapper adapter.
:type reason: str
:param reason:
Reason message which documents the deprecation in your library (can be omitted).
:type version: str
:param version:
Version of your project which deprecates this feature.
If you follow the `Semantic Versioning <https://semver.org/>`_,
the version number has the format "MAJOR.MINOR.PATCH".
:type action: str
:param action:
A warning filter used to activate or not the deprecation warning.
Can be one of "error", "ignore", "always", "default", "module", or "once".
If ``None`` or empty, the the global filtering mechanism is used.
See: `The Warnings Filter`_ in the Python documentation.
:type category: type
:param category:
The warning category to use for the deprecation warning.
By default, the category class is :class:`~DeprecationWarning`,
you can inherit this class to define your own deprecation warning category.
"""
self.reason = reason or ""
self.version = version or ""
self.action = action
self.category = category
super(ClassicAdapter, self).__init__()
def get_deprecated_msg(self, wrapped, instance):
"""
Get the deprecation warning message for the user.
:param wrapped: Wrapped class or function.
:param instance: The object to which the wrapped function was bound when it was called.
:return: The warning message.
"""
if instance is None:
if inspect.isclass(wrapped):
fmt = "Call to deprecated class {name}."
else:
fmt = "Call to deprecated function (or staticmethod) {name}."
else:
if inspect.isclass(instance):
fmt = "Call to deprecated class method {name}."
else:
fmt = "Call to deprecated method {name}."
if self.reason:
fmt += " ({reason})"
if self.version:
fmt += " -- Deprecated since version {version}."
return fmt.format(name=wrapped.__name__, reason=self.reason or "", version=self.version or "")
def __call__(self, wrapped):
"""
Decorate your class or function.
:param wrapped: Wrapped class or function.
:return: the decorated class or function.
.. versionchanged:: 1.2.4
Don't pass arguments to :meth:`object.__new__` (other than *cls*).
.. versionchanged:: 1.2.8
The warning filter is not set if the *action* parameter is ``None`` or empty.
"""
if inspect.isclass(wrapped):
old_new1 = wrapped.__new__
def wrapped_cls(cls, *args, **kwargs):
msg = self.get_deprecated_msg(wrapped, None)
if self.action:
with warnings.catch_warnings():
warnings.simplefilter(self.action, self.category)
warnings.warn(msg, category=self.category, stacklevel=_class_stacklevel)
else:
warnings.warn(msg, category=self.category, stacklevel=_class_stacklevel)
if old_new1 is object.__new__:
return old_new1(cls)
# actually, we don't know the real signature of *old_new1*
return old_new1(cls, *args, **kwargs)
wrapped.__new__ = staticmethod(wrapped_cls)
return wrapped
def deprecated(*args, **kwargs):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
**Classic usage:**
To use this, decorate your deprecated function with **@deprecated** decorator:
.. code-block:: python
from deprecated import deprecated
@deprecated
def some_old_function(x, y):
return x + y
You can also decorate a class or a method:
.. code-block:: python
from deprecated import deprecated
class SomeClass(object):
@deprecated
def some_old_method(self, x, y):
return x + y
@deprecated
class SomeOldClass(object):
pass
You can give a *reason* message to help the developer to choose another function/class,
and a *version* number to specify the starting version number of the deprecation.
.. code-block:: python
from deprecated import deprecated
@deprecated(reason="use another function", version='1.2.0')
def some_old_function(x, y):
return x + y
The *category* keyword argument allow you to specify the deprecation warning class of your choice.
By default, :exc:`DeprecationWarning` is used but you can choose :exc:`FutureWarning`,
:exc:`PendingDeprecationWarning` or a custom subclass.
.. code-block:: python
from deprecated import deprecated
@deprecated(category=PendingDeprecationWarning)
def some_old_function(x, y):
return x + y
The *action* keyword argument allow you to locally change the warning filtering.
*action* can be one of "error", "ignore", "always", "default", "module", or "once".
If ``None``, empty or missing, the the global filtering mechanism is used.
See: `The Warnings Filter`_ in the Python documentation.
.. code-block:: python
from deprecated import deprecated
@deprecated(action="error")
def some_old_function(x, y):
return x + y
"""
if args and isinstance(args[0], string_types):
kwargs['reason'] = args[0]
args = args[1:]
if args and not callable(args[0]):
raise TypeError(repr(type(args[0])))
if args:
action = kwargs.get('action')
category = kwargs.get('category', DeprecationWarning)
adapter_cls = kwargs.pop('adapter_cls', ClassicAdapter)
adapter = adapter_cls(**kwargs)
wrapped = args[0]
if inspect.isclass(wrapped):
wrapped = adapter(wrapped)
return wrapped
elif inspect.isroutine(wrapped):
@wrapt.decorator(adapter=adapter)
def wrapper_function(wrapped_, instance_, args_, kwargs_):
msg = adapter.get_deprecated_msg(wrapped_, instance_)
if action:
with warnings.catch_warnings():
warnings.simplefilter(action, category)
warnings.warn(msg, category=category, stacklevel=_routine_stacklevel)
else:
warnings.warn(msg, category=category, stacklevel=_routine_stacklevel)
return wrapped_(*args_, **kwargs_)
return wrapper_function(wrapped)
else:
raise TypeError(repr(type(wrapped)))
return functools.partial(deprecated, **kwargs)
| 33.720137
| 102
| 0.606781
|
b1f631fc212ea7f8f3f2da1212da43e3df4e7e92
| 2,219
|
py
|
Python
|
Examples/Tests/ElectrostaticSphereEB/PICMI_inputs_3d.py
|
hklion/WarpX
|
3c2d0ee2815ab1df21b9f78d899fe7b1a9651758
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-06-23T23:38:50.000Z
|
2021-06-23T23:38:50.000Z
|
Examples/Tests/ElectrostaticSphereEB/PICMI_inputs_3d.py
|
hklion/WarpX
|
3c2d0ee2815ab1df21b9f78d899fe7b1a9651758
|
[
"BSD-3-Clause-LBNL"
] | 106
|
2021-06-08T23:57:54.000Z
|
2022-03-08T00:36:46.000Z
|
Examples/Tests/ElectrostaticSphereEB/PICMI_inputs_3d.py
|
hklion/WarpX
|
3c2d0ee2815ab1df21b9f78d899fe7b1a9651758
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-06-21T18:50:43.000Z
|
2021-06-21T18:50:43.000Z
|
#!/usr/bin/env python3
from pywarpx import picmi
##########################
# physics parameters
##########################
V_domain_boundary = 0.0
V_embedded_boundary = 1.0
##########################
# numerics parameters
##########################
dt = 1e-6
# --- Nb time steps
max_steps = 1
# --- grid
nx = 64
ny = 64
nz = 64
xmin = -0.5
xmax = 0.5
ymin = -0.5
ymax = 0.5
zmin = -0.5
zmax = 0.5
##########################
# numerics components
##########################
grid = picmi.Cartesian3DGrid(
number_of_cells = [nx, ny, nz],
lower_bound = [xmin, ymin, zmin],
upper_bound = [xmax, ymax, zmax],
lower_boundary_conditions = ['dirichlet', 'dirichlet', 'dirichlet'],
upper_boundary_conditions = ['dirichlet', 'dirichlet', 'dirichlet'],
lower_boundary_conditions_particles = ['absorbing', 'absorbing', 'absorbing'],
upper_boundary_conditions_particles = ['absorbing', 'absorbing', 'absorbing'],
warpx_potential_lo_x = V_domain_boundary,
warpx_potential_hi_x = V_domain_boundary,
warpx_potential_lo_y = V_domain_boundary,
warpx_potential_hi_y = V_domain_boundary,
warpx_potential_lo_z = V_domain_boundary,
warpx_potential_hi_z = V_domain_boundary,
warpx_blocking_factor=8,
warpx_max_grid_size = 128
)
solver = picmi.ElectrostaticSolver(
grid=grid, method='Multigrid', required_precision=1e-7
)
embedded_boundary = picmi.EmbeddedBoundary(
implicit_function="-(x**2+y**2+z**2-radius**2)",
potential=V_embedded_boundary,
radius = 0.3
)
##########################
# diagnostics
##########################
field_diag = picmi.FieldDiagnostic(
name = 'diag1',
grid = grid,
period = 1,
data_list = ['Ex', 'Ey', 'Ez', 'phi', 'rho'],
write_dir = '.',
warpx_file_prefix = 'Python_ElectrostaticSphereEB_plt'
)
##########################
# simulation setup
##########################
sim = picmi.Simulation(
solver = solver,
time_step_size = dt,
max_steps = max_steps,
warpx_embedded_boundary=embedded_boundary,
warpx_field_gathering_algo='momentum-conserving'
)
sim.add_diagnostic(field_diag)
##########################
# simulation run
##########################
sim.step(max_steps)
| 21.970297
| 82
| 0.601622
|
c17e927a9f9a9b00b4a9f1ae20cdbe984b907f2f
| 7,266
|
py
|
Python
|
nets/off.py
|
reallongnguyen/Optical-Flow-Guided-Feature
|
f05081d76ced407c7c68013d49d57f0a20c71715
|
[
"MIT"
] | 19
|
2018-12-21T03:45:02.000Z
|
2022-02-11T18:44:55.000Z
|
nets/off.py
|
islingio/Optical-Flow-Guided-Feature
|
f05081d76ced407c7c68013d49d57f0a20c71715
|
[
"MIT"
] | 11
|
2020-09-25T19:44:20.000Z
|
2021-09-07T23:55:41.000Z
|
nets/off.py
|
islingio/Optical-Flow-Guided-Feature
|
f05081d76ced407c7c68013d49d57f0a20c71715
|
[
"MIT"
] | 7
|
2018-10-30T04:03:36.000Z
|
2020-06-16T00:09:21.000Z
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
from tensorflow.contrib.layers import l2_regularizer, batch_norm, conv2d
from nets import nets_factory
# tf.add_check_numerics_ops()
# slim = tf.contrib.slim
_NUM_CHANELS = 128
sobel_x = tf.constant([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]], tf.float32)
sobel_x_filter = tf.reshape(sobel_x, [3, 3, 1, 1])
sobel_y_filter = tf.transpose(sobel_x_filter, [1, 0, 2, 3])
def sobel(feature):
with tf.variable_scope('sobel'):
channels = tf.unstack(feature, axis=3)
fx = []
fy = []
for channel in channels:
channel = tf.expand_dims(channel, axis=3)
filtered_x = tf.nn.conv2d(channel, sobel_x_filter, [1, 1, 1, 1], padding='SAME')
filtered_y = tf.nn.conv2d(channel, sobel_y_filter, [1, 1, 1, 1], padding='SAME')
fx.append(filtered_x)
fy.append(filtered_y)
return tf.concat(fx, axis=3), tf.concat(fy, axis=3)
def _padding(tensor, out_size):
t_width = tensor.get_shape()[1]
delta = tf.subtract(out_size, t_width)
pad_left = tf.floor_div(delta, 2)
pad_right = delta - pad_left
return tf.pad(
tensor,
[
[0, 0],
[pad_left, pad_right],
[pad_left, pad_right],
[0, 0]
],
'CONSTANT'
)
def padding_and_concat(list_feature, out_size):
padded_list = []
for item in list_feature:
padded = tf.cond(tf.equal(out_size, item.get_shape()[1]),
lambda: item,
lambda: _padding(item, out_size))
shape = item.get_shape()
padded.set_shape([shape[0], out_size, out_size, shape[3]])
padded_list.append(padded)
return tf.concat(padded_list, axis=3)
def off_unit(feature_t0, feature_t1, lower_unit):
with tf.variable_scope('off_unit', values=[feature_t0, feature_t1]):
# feature_t0 = batch_norm(feature_t0)
# feature_t0 = tf.nn.relu(feature_t0)
feature_t0 = conv2d(feature_t0, _NUM_CHANELS, 1, padding='SAME',
# weights_initializer=tf.truncated_normal_initializer(0, 0.01),
# biases_initializer=tf.zeros_initializer,
# weights_regularizer=l2_regularizer(1e-3),
# biases_regularizer=l2_regularizer(0.0001),
# normalizer_fn=batch_norm,
scope='conv1x1_t0')
# feature_t1 = batch_norm(feature_t1)
# feature_t1 = tf.nn.relu(feature_t1)
feature_t1 = conv2d(feature_t1, _NUM_CHANELS, 1, padding='SAME',
# weights_initializer=tf.truncated_normal_initializer(0, 0.01),
# biases_initializer=tf.zeros_initializer,
# weights_regularizer=l2_regularizer(1e-3),
# biases_regularizer=l2_regularizer(0.0001),
# normalizer_fn=batch_norm,
scope='conv1x1_t1')
ft = tf.subtract(feature_t0, feature_t1)
fx, fy = sobel(feature_t0)
return tf.concat(
[
fx,
fy,
ft,
lower_unit
],
axis=3
)
def off_unit_first(feature_t0, feature_t1):
with tf.variable_scope('off_unit_first', values=[feature_t0, feature_t1]):
# feature_t0 = batch_norm(feature_t0)
# feature_t0 = tf.nn.relu(feature_t0)
feature_t0 = conv2d(feature_t0, _NUM_CHANELS, 1, padding='SAME',
# weights_initializer=tf.truncated_normal_initializer(0, 0.01),
# biases_initializer=tf.zeros_initializer,
# weights_regularizer=l2_regularizer(1e-3),
# biases_regularizer=l2_regularizer(0.0001),
# normalizer_fn=batch_norm,
scope='conv1x1_t0')
# feature_t1 = batch_norm(feature_t1)
# feature_t1 = tf.nn.relu(feature_t1)
feature_t1 = conv2d(feature_t1, _NUM_CHANELS, 1, padding='SAME',
# weights_initializer=tf.truncated_normal_initializer(0, 0.01),
# biases_initializer=tf.zeros_initializer,
# weights_regularizer=l2_regularizer(1e-3),
# biases_regularizer=l2_regularizer(0.0001),
# normalizer_fn=batch_norm,
scope='conv1x1_t1')
ft = tf.subtract(feature_t0, feature_t1)
fx, fy = sobel(feature_t0)
return tf.concat(
[
fx,
fy,
ft
],
axis=3
)
def off_sub_network(list_feature_k0, list_feature_k1,
list_feature_k20, list_feature_k21,
list_feature_k40, list_feature_k41, num_classes=11, is_training=True):
'''
:param list_feature_k0: list of feature with maximum size (k size) of segment t
:param list_feature_k1: list of feature with maximum size (k size) of segment t + delta_t
:param list_feature_k20: list of feature with k/2 size of segment t
:param list_feature_k21: list of feature with k/2 size of segment t + delta_t
:param list_feature_k40: list of feature with k/4 size of segment t
:param list_feature_k41: list of feature with k/4 size of segment t + delta_t
:param num_classes: number classes
:return: logits, endpoints
'''
resnet_v2_20 = nets_factory.get_network_fn(
'resnet_v2_26',
num_classes=num_classes,
weight_decay=0.001,
is_training=is_training)
endpoints = {}
with tf.variable_scope('OFFSubNetwork'):
with tf.variable_scope('Tier1'):
feature_k0 = padding_and_concat(list_feature_k0, 111)
feature_k1 = padding_and_concat(list_feature_k1, 111)
net = off_unit_first(feature_k0, feature_k1)
logits, tier1_enpoints = resnet_v2_20(net)
endpoints['tier1'] = tier1_enpoints['OFF/OFFSubNetwork/Tier1/resnet_v2_26/block4']
endpoints['logits_tier1'] = logits
with tf.variable_scope('Tier2'):
feature_k20 = padding_and_concat(list_feature_k20, 56)
feature_k21 = padding_and_concat(list_feature_k21, 56)
net = off_unit(feature_k20, feature_k21, lower_unit=endpoints['tier1'])
logits, tier2_endpoint = resnet_v2_20(net)
endpoints['tier2'] = tier2_endpoint['OFF/OFFSubNetwork/Tier2/resnet_v2_26/block4']
endpoints['logits_tier2'] = logits
with tf.variable_scope('Tier3'):
feature_k40 = padding_and_concat(list_feature_k40, 28)
feature_k41 = padding_and_concat(list_feature_k41, 28)
net = off_unit(feature_k40, feature_k41, lower_unit=endpoints['tier2'])
logits, tier3_endpoints = resnet_v2_20(net)
endpoints['logits_tier3'] = logits
endpoints['predictions'] = tier3_endpoints['predictions']
return logits, endpoints
| 39.064516
| 94
| 0.5951
|
b6e37840740823f0061d05f93aa8117c4bd85acf
| 1,271
|
py
|
Python
|
src/mpl3115a2_simpletest.py
|
GAVLab/ros_mpl3115a2
|
ba6408044187857766b325bbee6c323a9753d051
|
[
"BSD-3-Clause"
] | null | null | null |
src/mpl3115a2_simpletest.py
|
GAVLab/ros_mpl3115a2
|
ba6408044187857766b325bbee6c323a9753d051
|
[
"BSD-3-Clause"
] | null | null | null |
src/mpl3115a2_simpletest.py
|
GAVLab/ros_mpl3115a2
|
ba6408044187857766b325bbee6c323a9753d051
|
[
"BSD-3-Clause"
] | null | null | null |
# Simple demo of the MPL3115A2 sensor.
# Will read the pressure and temperature and print them out every second.
# Author: Tony DiCola
import time
import board
import busio
import adafruit_mpl3115a2
# Initialize the I2C bus.
i2c = busio.I2C(board.SCL, board.SDA)
# Initialize the MPL3115A2.
sensor = adafruit_mpl3115a2.MPL3115A2(i2c)
# Alternatively you can specify a different I2C address for the device:
#sensor = adafruit_mpl3115a2.MPL3115A2(i2c, address=0x10)
# You can configure the pressure at sealevel to get better altitude estimates.
# This value has to be looked up from your local weather forecast or meteorlogical
# reports. It will change day by day and even hour by hour with weather
# changes. Remember altitude estimation from barometric pressure is not exact!
# Set this to a value in pascals:
sensor.sealevel_pressure = 102250
# Main loop to read the sensor values and print them every second.
while True:
pressure = sensor.pressure
print('Pressure: {0:0.3f} pascals'.format(pressure))
altitude = sensor.altitude
print('Altitude: {0:0.3f} meters'.format(altitude))
temperature = sensor.temperature
print('Temperature: {0:0.3f} degrees Celsius'.format(temperature))
time.sleep(1.0)
| 35.305556
| 82
| 0.742722
|
c45d976077c0f0fbbcbb00951bec3b8c35e0bc17
| 25,359
|
py
|
Python
|
SimulationToolsUI/ResultsViewerUI.py
|
keim-hs-esslingen/ki4robofleet
|
1ff1df5d53ab80c0dcd7b84d87c2df0071e0bf9f
|
[
"MIT"
] | 4
|
2021-07-06T03:55:25.000Z
|
2022-03-27T17:05:59.000Z
|
SimulationToolsUI/ResultsViewerUI.py
|
keim-hs-esslingen/ki4robofleet
|
1ff1df5d53ab80c0dcd7b84d87c2df0071e0bf9f
|
[
"MIT"
] | null | null | null |
SimulationToolsUI/ResultsViewerUI.py
|
keim-hs-esslingen/ki4robofleet
|
1ff1df5d53ab80c0dcd7b84d87c2df0071e0bf9f
|
[
"MIT"
] | 1
|
2022-02-23T11:53:05.000Z
|
2022-02-23T11:53:05.000Z
|
#!/usr/bin/env python3
# =============================================================================
# Created at Hochschule Esslingen - University of Applied Sciences
# Department: Anwendungszentrum KEIM
# Contact: emanuel.reichsoellner@hs-esslingen.de
# Date: May 2021
# License: MIT License
# =============================================================================
# This Script creates a User Interface to read the Simulationresults from the
# session_{timestamp}.json Files and to display them in a well-arranged way.
# As a nice feature the color theme of the chart can be changed :)
# =============================================================================
# Defining our Constants for the Result- Calculation:
# we assume 15kWh/100km to calculate the power consumption for the vehicles
vehiclePowerConsumption = 0.15
# we assume 401 gram / kWh (German Energymix 2019) to calculate the emissions
germanEnergyMix2019 = 401
# we assume 32 cent / kWh (German Energy price) to calculate the energy cost
germanEnergyPrice2021 = 0.32
# the usual factor to calculate the monthly fleet Cost is carPrize / 1000 *40
# we assume 22500€ / 1000 * 40 = 900€ / Month
# with a daily usage of 10h we assumed the fleetCost to 3€ / h per vehicle
vehicleCostPerHour = 3
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import (
QApplication,
QWidget,
QFileDialog,
QTextEdit,
QPushButton,
QLabel,
QVBoxLayout,
)
from PyQt5.QtChart import (
QChart,
QChartView,
QBarSet,
QPercentBarSeries,
QBarCategoryAxis,
QValueAxis,
QStackedBarSeries,
)
from PyQt5.QtGui import QPainter, QIcon, QPixmap
from PyQt5.QtCore import Qt
import os
import json
import csv
import pandas
from PyQt5.QtWidgets import *
from PyQt5.QtChart import *
class Result:
def __init__(
self,
emptyKm,
passengerKm,
fullfilledRequests,
roboTaxis,
strategy,
pRidingTime,
pWaitingTime,
lookAheadTime,
latenessFactor,
realisticTime,
simulationExitTime,
cpuTime,
):
self.emptyKm = float(emptyKm)
self.passengerKm = float(passengerKm)
self.fullfilledRequests = int(fullfilledRequests)
self.roboTaxis = int(roboTaxis)
self.strategy = strategy
self.pRidingTime = int(pRidingTime)
self.pWaitingTime = int(pWaitingTime)
self.lookAheadTime = str(lookAheadTime)
self.latenessFactor = str(latenessFactor)
self.realisticTime = str(realisticTime)
self.simulationExitTime = simulationExitTime
self.emptyKmPerRequest = self.emptyKm / self.fullfilledRequests
self.passengerKmPerRequest = self.passengerKm / self.fullfilledRequests
# Waiting Time in Minutes:
self.waitingTimePerRequest = self.pWaitingTime / self.fullfilledRequests // 60
# Riding Time in Minutes:
self.ridingTimePerRequest = self.pRidingTime / self.fullfilledRequests // 60
self.totalDistancePerRequest = (
self.emptyKmPerRequest + self.passengerKmPerRequest
)
self.powerConsumptionPerRequest = (
self.totalDistancePerRequest * vehiclePowerConsumption
)
self.emissionsPerRequest = self.powerConsumptionPerRequest * germanEnergyMix2019
self.energyCostPerRequest = (
self.powerConsumptionPerRequest * germanEnergyPrice2021
)
# to calculate the fleet cost, we consider the amount of time which was necessary to
# fulfill all requests (simulationExitTime[s]) multiplied by the fleet size (roboTaxis) and cost per vehicle (vehicleCostPerHour/3600s)
self.fleetCostPerRequest = (
(self.simulationExitTime * self.roboTaxis * vehicleCostPerHour)
/ self.fullfilledRequests
/ 3600
)
self.totalCostPerRequest = self.energyCostPerRequest + self.fleetCostPerRequest
self.costPerPassengerKm = self.totalCostPerRequest / self.passengerKmPerRequest
self.cpuTime = cpuTime
class ResultManager:
def __init__(self):
self.resultList = []
self.resultIndex = 0
self.numberOfResults = 0
def addResult(self, result):
self.resultList.append(result)
self.numberOfResults += 1
def getFirstResult(self):
if self.numberOfResults > 0:
return self.resultList[0]
else:
return None
def getNextResult(self):
if self.numberOfResults > 0:
if self.resultIndex < self.numberOfResults - 1:
self.resultIndex += 1
else:
self.resultIndex = 0
return self.resultList[self.resultIndex]
else:
return None
def getPreviousResult(self):
if self.numberOfResults > 0:
if self.resultIndex > 0:
self.resultIndex -= 1
else:
self.resultIndex = self.numberOfResults - 1
return self.resultList[self.resultIndex]
else:
return None
class ResultsViewerWindow(QWidget):
def __init__(self):
super().__init__()
self.currentSessionJsonFile = ""
self.setWindowTitle("KI4ROBOFLEET Results Viewer v0.4")
self.setGeometry(100, 100, 1800, 1200)
self.uiInit()
self.show()
def uiInit(self):
self.resultManager = ResultManager()
self.gridLayout = QGridLayout(self)
ctrlElements = QVBoxLayout()
self.sessionFileLabel = QLabel("List of Session Files")
self.sessionFileLabel.move(95, 20)
ctrlElements.addWidget(self.sessionFileLabel)
self.sessionFileList = QListWidget()
self.sessionFileList.move(50, 40)
ctrlElements.addWidget(self.sessionFileList)
ctrlElements1 = QVBoxLayout()
buttonReadFile = QPushButton("Read File - Overview", self)
buttonReadFile.clicked.connect(self.readFile)
ctrlElements1.addWidget(buttonReadFile)
nextRunResults = QPushButton("view Details: next Run", self)
nextRunResults.clicked.connect(self.nextResult)
ctrlElements1.addWidget(nextRunResults)
previousRunResults = QPushButton("view Details: previous Run", self)
previousRunResults.clicked.connect(self.previousResult)
ctrlElements1.addWidget(previousRunResults)
self.styleComboBox = QComboBox(self)
self.styleComboBox.addItem("Style: Bright Theme")
self.styleComboBox.addItem("Style: Blueprint Theme")
self.styleComboBox.addItem("Style: Dark Theme")
self.styleComboBox.addItem("Style: Sandy Theme")
self.styleComboBox.addItem("Style: Blue Theme")
self.styleComboBox.addItem("Style: Contrast Theme")
self.styleComboBox.addItem("Style: Icy Theme")
self.styleComboBox.setCurrentText("Style: Dark Theme")
self.styleComboBox.currentIndexChanged.connect(self.changeStyle)
ctrlElements1.addWidget(self.styleComboBox)
buttonResultCsv = QPushButton("Create Result CSV and HTML", self)
buttonResultCsv.clicked.connect(self.createResultCsv)
ctrlElements1.addWidget(buttonResultCsv)
# self.gridLayout.addLayout(ctrlElements, 0, 0, 1, 3)
self.gridLayout.addLayout(ctrlElements, 0, 0)
self.gridLayout.addLayout(ctrlElements1, 1, 0)
self.chart1 = QChart()
self.chartView1 = QChartView(self.chart1)
self.chart1.setTheme(QChart.ChartThemeDark)
self.gridLayout.addWidget(self.chartView1, 0, 1)
self.chart2 = QChart()
self.chartView2 = QChartView(self.chart2)
self.chart2.setTheme(QChart.ChartThemeDark)
self.gridLayout.addWidget(self.chartView2, 0, 2)
textResultsLeft = QVBoxLayout()
textResultsRight = QVBoxLayout()
self.leafIcon = QLabel("")
self.leafIcon.setFixedSize(60, 60)
self.coinIcon = QLabel("")
self.coinIcon.setFixedSize(60, 60)
self.simulationRunLabel = QLabel("")
self.simulationRunLabel.setFont(QFont("Arial", 18))
self.numberOfRoboTaxis = QLabel("")
self.numberOfRoboTaxis.setFont(QFont("Arial", 18))
self.numberOfRequests = QLabel("")
self.numberOfRequests.setFont(QFont("Arial", 18))
self.simulationStrategyLabel = QLabel("")
self.simulationStrategyLabel.setFont(QFont("Arial", 18))
self.simulationTimeLabel = QLabel("")
self.simulationTimeLabel.setFont(QFont("Arial", 18))
self.totalDistance = QLabel("")
self.totalDistance.setFont(QFont("Arial", 18))
self.distanceWithPassenger = QLabel("")
self.distanceWithPassenger.setFont(QFont("Arial", 18))
self.powerConsumption = QLabel("")
self.powerConsumption.setFont(QFont("Arial", 18))
self.energyCost = QLabel("")
self.energyCost.setFont(QFont("Arial", 18))
self.fleetCost = QLabel("")
self.fleetCost.setFont(QFont("Arial", 18))
self.totalCost = QLabel("")
self.totalCost.setFont(QFont("Arial", 18))
self.costPerPassengerKm = QLabel("")
self.costPerPassengerKm.setFont(QFont("Arial", 18))
self.emissions = QLabel("")
self.emissions.setFont(QFont("Arial", 18))
self.customerWaitingTime = QLabel("")
self.customerWaitingTime.setFont(QFont("Arial", 18))
textResultsLeft.setAlignment(Qt.AlignCenter)
textResultsLeft.addWidget(self.simulationRunLabel)
textResultsLeft.addWidget(self.numberOfRoboTaxis)
textResultsLeft.addWidget(self.numberOfRequests)
textResultsLeft.addWidget(self.simulationStrategyLabel)
textResultsLeft.addWidget(self.simulationTimeLabel)
textResultsLeft.addWidget(self.leafIcon)
textResultsLeft.addWidget(self.powerConsumption)
textResultsLeft.addWidget(self.emissions)
textResultsRight.setAlignment(Qt.AlignCenter)
textResultsRight.addWidget(self.totalDistance)
textResultsRight.addWidget(self.distanceWithPassenger)
textResultsRight.addWidget(self.customerWaitingTime)
textResultsRight.addWidget(self.coinIcon)
textResultsRight.addWidget(self.energyCost)
textResultsRight.addWidget(self.fleetCost)
textResultsRight.addWidget(self.totalCost)
textResultsRight.addWidget(self.costPerPassengerKm)
self.gridLayout.addLayout(textResultsLeft, 1, 1)
self.gridLayout.addLayout(textResultsRight, 1, 2)
self.gridLayout.setColumnStretch(0, 1)
self.gridLayout.setColumnStretch(1, 2)
self.gridLayout.setColumnStretch(2, 2)
self.gridLayout.setRowStretch(0, 1)
self.gridLayout.setRowStretch(1, 1)
for r, d, f in os.walk("./"):
for file in f:
if (".json" in file) and ("session" in file):
self.sessionFileList.addItem(file)
def createChart(self, results):
self.gridLayout.removeWidget(self.chartView1)
self.gridLayout.removeWidget(self.chartView2)
self.chart1 = QChart()
self.chartView1 = QChartView(self.chart1)
self.gridLayout.addWidget(self.chartView1, 0, 1)
self.chart2 = QChart()
self.chartView2 = QChartView(self.chart2)
self.gridLayout.addWidget(self.chartView2, 0, 2)
set1 = []
set1.append(QBarSet("empty"))
set1.append(QBarSet("with passenger"))
set2 = []
set2.append(QBarSet("RidingTime"))
set2.append(QBarSet("WaitingTime"))
numberOfRoboTaxis = []
maxKm = 0
maxTime = 0
for result in results:
numberOfRoboTaxis.append(str(result.roboTaxis))
set1[0].append(result.emptyKmPerRequest)
set1[1].append(result.passengerKmPerRequest)
set2[0].append(result.ridingTimePerRequest)
set2[1].append(result.waitingTimePerRequest)
series1 = QBarSeries()
series2 = QBarSeries()
for i in range(len(set1)):
series1.append(set1[i])
for i in range(len(set2)):
series2.append(set2[i])
axisX1 = QBarCategoryAxis()
axisX1.setTitleText("Number of RoboTaxis")
axisY1 = QValueAxis()
axisY1.setTitleText("Distance [km]")
axisY1.setRange(0, maxKm)
axisX1.append(numberOfRoboTaxis)
axisX2 = QBarCategoryAxis()
axisX2.setTitleText("Number of RoboTaxis")
axisY2 = QValueAxis()
axisY2.setTitleText("Time [min]")
axisY2.setRange(0, maxTime)
axisX2.append(numberOfRoboTaxis)
self.chart1.addSeries(series1)
self.chart1.setTitle("Driving Distance per Passenger")
self.chart1.setAnimationOptions(QChart.SeriesAnimations)
self.chart1.addAxis(axisX1, Qt.AlignBottom)
self.chart1.addAxis(axisY1, Qt.AlignLeft)
self.chartView1.chart().legend().setAlignment(Qt.AlignBottom)
self.chart2.addSeries(series2)
self.chart2.setTitle("Time per Passenger")
self.chart2.setAnimationOptions(QChart.SeriesAnimations)
self.chart2.addAxis(axisX2, Qt.AlignBottom)
self.chart2.addAxis(axisY2, Qt.AlignLeft)
self.chartView2.chart().legend().setAlignment(Qt.AlignBottom)
self.changeStyle()
def changeStyle(self):
if self.styleComboBox.currentIndex() == 0:
self.chart1.setTheme(QChart.ChartThemeLight)
self.chart2.setTheme(QChart.ChartThemeLight)
if self.styleComboBox.currentIndex() == 1:
self.chart1.setTheme(QChart.ChartThemeBlueCerulean)
self.chart2.setTheme(QChart.ChartThemeBlueCerulean)
if self.styleComboBox.currentIndex() == 2:
self.chart1.setTheme(QChart.ChartThemeDark)
self.chart2.setTheme(QChart.ChartThemeDark)
if self.styleComboBox.currentIndex() == 3:
self.chart1.setTheme(QChart.ChartThemeBrownSand)
self.chart2.setTheme(QChart.ChartThemeBrownSand)
if self.styleComboBox.currentIndex() == 4:
self.chart1.setTheme(QChart.ChartThemeBlueNcs)
self.chart2.setTheme(QChart.ChartThemeBlueNcs)
if self.styleComboBox.currentIndex() == 5:
self.chart1.setTheme(QChart.ChartThemeHighContrast)
self.chart2.setTheme(QChart.ChartThemeHighContrast)
if self.styleComboBox.currentIndex() == 6:
self.chart1.setTheme(QChart.ChartThemeBlueIcy)
self.chart2.setTheme(QChart.ChartThemeBlueIcy)
self.chart1.setAnimationOptions(QChart.NoAnimation)
self.chart1.setAnimationOptions(QChart.GridAxisAnimations)
# self.chart1.setAnimationOptions(QChart.SeriesAnimations)
self.chart2.setAnimationOptions(QChart.NoAnimation)
self.chart2.setAnimationOptions(QChart.GridAxisAnimations)
def readFile(self):
try:
selectedFile = self.sessionFileList.currentItem().text()
self.currentSessionJsonFile = selectedFile
print("View Results for", selectedFile)
try:
self.resultManager = ResultManager()
# find path to file
pathToFile = ""
for r, d, f in os.walk("./"):
for file in f:
if selectedFile in file:
pathToFile = r + "/" + file
with open(pathToFile) as sessionFile:
data = json.load(sessionFile)
# sort Results by num_of_vehicles
sortedResults = sorted(
data["results"],
key=lambda x: x["num_of_vehicles"],
reverse=False,
)
for result in sortedResults:
lookAheadTime = ""
if "look_ahead_time" in result:
lookAheadTime = str(result["look_ahead_time"])
latenessFactor = ""
if "lateness_factor" in result:
latenessFactor = str(result["lateness_factor"])
realisticTime = ""
if "realistic_time" in result:
realisticTime = str(result["realistic_time"])
self.resultManager.addResult(
Result(
result["d_empty (km)"],
result["d_pass (km)"],
result["fullfilled requests"],
result["num_of_vehicles"],
result["strategy"],
result["t_drive (sec)"],
result["t_wait (sec)"],
lookAheadTime,
latenessFactor,
realisticTime,
result["simulation_exit_time (sec)"],
result["cpuTime (sec)"],
)
)
self.clearFields()
self.createChart(self.resultManager.resultList)
self.resultManager.resultIndex = self.resultManager.numberOfResults - 1
except:
print("Problems by creating Graph")
except:
print("Please select a Session File!")
def nextResult(self):
self.viewResults(self.resultManager.getNextResult())
def previousResult(self):
self.viewResults(self.resultManager.getPreviousResult())
def clearFields(self):
self.simulationRunLabel.setText("")
self.numberOfRoboTaxis.setText("")
self.simulationStrategyLabel.setText("")
self.simulationTimeLabel.setText("")
self.totalDistance.setText("")
self.powerConsumption.setText("")
self.emissions.setText("")
self.energyCost.setText("")
self.customerWaitingTime.setText("")
self.leafIcon.clear()
self.coinIcon.clear()
self.numberOfRequests.setText("")
self.distanceWithPassenger.setText("")
self.fleetCost.setText("")
self.totalCost.setText("")
self.costPerPassengerKm.setText("")
def viewResults(self, result):
self.leafIcon.setPixmap(
QPixmap("./SimulationToolsUI/icons/leaf.png").scaledToWidth(60)
)
self.coinIcon.setPixmap(
QPixmap("./SimulationToolsUI/icons/coin.png").scaledToWidth(60)
)
self.simulationRunLabel.setText(
"Run "
+ str(self.resultManager.resultIndex + 1)
+ " of "
+ str(self.resultManager.numberOfResults)
)
self.numberOfRoboTaxis.setText("RoboTaxis : " + str(result.roboTaxis))
self.numberOfRequests.setText("Requests : " + str(result.fullfilledRequests))
strategy = "Strategy: " + result.strategy
if "look_ahead" in result.strategy:
strategy += " " + result.lookAheadTime + "s"
if "shared" in result.strategy:
strategy += " LF=" + result.latenessFactor + " RT=" + result.realisticTime
self.simulationStrategyLabel.setText(strategy)
self.simulationTimeLabel.setText(
"requiredTime : "
+ str(int(result.simulationExitTime) // 3600)
+ "h "
+ str((int(result.simulationExitTime) % 3600) // 60)
+ "m "
+ str(int(result.simulationExitTime) % 60)
+ "s"
)
self.totalDistance.setText(
"Total Driving Distance: "
+ str(float("{:.2f}".format(result.totalDistancePerRequest)))
+ " km"
)
self.distanceWithPassenger.setText(
"Distance With Passenger: "
+ str(float("{:.2f}".format(result.passengerKmPerRequest)))
+ " km"
)
self.powerConsumption.setText(
"Power Consumption: "
+ str(float("{:.2f}".format(result.powerConsumptionPerRequest)))
+ " kWh"
)
self.emissions.setText(
"Emissions: "
+ str(float("{:.2f}".format(result.emissionsPerRequest)))
+ " g CO2"
)
self.energyCost.setText(
"Energy Cost: "
+ str(float("{:.2f}".format(result.energyCostPerRequest)))
+ " €"
)
self.fleetCost.setText(
"Fleet Cost: "
+ str(float("{:.2f}".format(result.fleetCostPerRequest)))
+ " €"
)
self.totalCost.setText(
"Total Cost: "
+ str(float("{:.2f}".format(result.totalCostPerRequest)))
+ " €"
)
self.costPerPassengerKm.setText(
"Cost per km: "
+ str(float("{:.2f}".format(result.costPerPassengerKm)))
+ " €"
)
self.customerWaitingTime.setText(
"Waiting Time: " + str(int(result.waitingTimePerRequest)) + " minutes"
)
results = []
results.append(result)
self.createChart(results)
def createResultCsv(self, result):
cwd = os.getcwd()
if len(self.currentSessionJsonFile) > 0:
resultCsvFileName = (
cwd + "/Results/" + self.currentSessionJsonFile.split(".")[0] + ".csv"
)
print("Writing Result CSV File...")
with open(resultCsvFileName, mode="w") as csv_file:
fieldnames = [
"Requests",
"Strategy",
"reqiredTime[s]",
"RoboTaxis",
"avDrivingDist[km]",
"avPassengerDist[km]",
"avWaitingTime[min]",
"avPowerConsumption[kWh]",
"avEmissions[gCO2]",
"avEnergyCost[€]",
"avFleetCost[€]",
"avTotalCost[€]",
"avCostPerPassengerKm[€]",
"cpuTime[s]",
]
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for result in self.resultManager.resultList:
strategy = result.strategy
if "look_ahead" in result.strategy:
strategy += " " + result.lookAheadTime + "s"
if "shared" in result.strategy:
strategy += (
" LF="
+ result.latenessFactor
+ " RT="
+ result.realisticTime
)
writer.writerow(
{
"Requests": result.fullfilledRequests,
"Strategy": strategy,
"reqiredTime[s]": result.simulationExitTime,
"RoboTaxis": result.roboTaxis,
"avDrivingDist[km]": str(
float("{:.2f}".format(result.totalDistancePerRequest))
),
"avPassengerDist[km]": str(
float("{:.2f}".format(result.passengerKmPerRequest))
),
"avWaitingTime[min]": str(
int(result.waitingTimePerRequest)
),
"avPowerConsumption[kWh]": str(
float(
"{:.2f}".format(result.powerConsumptionPerRequest)
)
),
"avEmissions[gCO2]": str(
float("{:.2f}".format(result.emissionsPerRequest))
),
"avEnergyCost[€]": str(
float("{:.2f}".format(result.energyCostPerRequest))
),
"avFleetCost[€]": str(
float("{:.2f}".format(result.fleetCostPerRequest))
),
"avTotalCost[€]": str(
float("{:.2f}".format(result.totalCostPerRequest))
),
"avCostPerPassengerKm[€]": str(
float("{:.2f}".format(result.costPerPassengerKm))
),
"cpuTime[s]": str(int(result.cpuTime)),
}
)
print("READY!")
print(resultCsvFileName, " was created")
# create Result HTML file
resultHtmlFileName = resultCsvFileName.split(".")[0] + ".html"
pandas.read_csv(resultCsvFileName).to_html(resultHtmlFileName)
print(resultHtmlFileName, " was created")
else:
print("Please first select and read a Result Session JSON File!")
| 36.698987
| 143
| 0.575417
|
1b678beb2bb546521cfc3b27dd7b3f043f67331e
| 1,555
|
py
|
Python
|
sdm/tests/test_sdm.py
|
dougalsutherland/py-sdm
|
9773bf33438986a8312db82b1c9086c81fae5099
|
[
"BSD-3-Clause"
] | 21
|
2015-02-17T13:59:52.000Z
|
2019-05-08T09:25:52.000Z
|
sdm/tests/test_sdm.py
|
beartiantian/py-sdm
|
9773bf33438986a8312db82b1c9086c81fae5099
|
[
"BSD-3-Clause"
] | 2
|
2015-01-19T17:08:23.000Z
|
2015-01-22T19:19:06.000Z
|
sdm/tests/test_sdm.py
|
dougalsutherland/py-sdm
|
9773bf33438986a8312db82b1c9086c81fae5099
|
[
"BSD-3-Clause"
] | 5
|
2016-01-07T06:45:29.000Z
|
2018-05-21T15:50:05.000Z
|
from functools import partial
import os
import numpy as np
from sklearn.preprocessing import LabelEncoder
from .. import SDC, NuSDC, Features
data_dir = os.path.join(os.path.dirname(__file__), 'data')
################################################################################
# TODO: add *real* tests
def _check_acc(acc):
assert acc >= .85, "accuracy is only {}".format(acc)
def test_simple():
div_funcs = ['hellinger', 'kl', 'l2',
'renyi:0.7', 'renyi:0.9', 'renyi:0.99']
Ks = [3, 8]
for name in ['gaussian-2d-mean0-std1,2']: # , 'gaussian-20d-mean0-std1,2']:
feats = Features.load_from_hdf5(os.path.join(data_dir, name + '.h5'))
le = LabelEncoder()
y = le.fit_transform(feats.categories)
for div_func in div_funcs:
for K in Ks:
for cls in [SDC, NuSDC]:
for wts in [None, np.random.uniform(.7, 1.3, len(feats))]:
clf = cls(div_func=div_func, K=K, n_proc=1)
acc, preds = clf.crossvalidate(
feats, y, sample_weight=wts, num_folds=3)
fn = partial(_check_acc, acc)
fn.description = "CV: {} - {}, K={}".format(
name, div_func, K)
yield fn
################################################################################
if __name__ == '__main__':
import warnings
warnings.filterwarnings('error', module='sdm')
import nose
nose.main()
| 31.1
| 80
| 0.485531
|
ed255a93edf617817fa081784ecf413134128468
| 36
|
py
|
Python
|
moviealert/exceptions.py
|
ArionMiles/Diomedes
|
50e50bae65bc35901230ead01fe5c259db1344b0
|
[
"MIT"
] | 10
|
2019-03-08T12:34:21.000Z
|
2020-07-20T17:05:34.000Z
|
moviealert/exceptions.py
|
ArionMiles/Diomedes
|
50e50bae65bc35901230ead01fe5c259db1344b0
|
[
"MIT"
] | 12
|
2019-05-04T18:56:34.000Z
|
2022-02-10T07:33:50.000Z
|
moviealert/exceptions.py
|
ArionMiles/Diomedes
|
50e50bae65bc35901230ead01fe5c259db1344b0
|
[
"MIT"
] | 1
|
2019-04-12T16:00:15.000Z
|
2019-04-12T16:00:15.000Z
|
class BMSError(Exception):
pass
| 12
| 26
| 0.722222
|
b30dba06436353d8faf55cd5df68ed17d7e1dd52
| 201
|
py
|
Python
|
packs/consul/actions/parse_nodes.py
|
userlocalhost2000/st2contrib
|
1a5f759e76401743ed9023d298a3d767e3885db1
|
[
"Apache-2.0"
] | 164
|
2015-01-17T16:08:33.000Z
|
2021-08-03T02:34:07.000Z
|
packs/consul/actions/parse_nodes.py
|
userlocalhost2000/st2contrib
|
1a5f759e76401743ed9023d298a3d767e3885db1
|
[
"Apache-2.0"
] | 442
|
2015-01-01T11:19:01.000Z
|
2017-09-06T23:26:17.000Z
|
packs/consul/actions/parse_nodes.py
|
userlocalhost2000/st2contrib
|
1a5f759e76401743ed9023d298a3d767e3885db1
|
[
"Apache-2.0"
] | 202
|
2015-01-13T00:37:40.000Z
|
2020-11-07T11:30:10.000Z
|
from lib import action
class ConsulParseNodesAction(action.ConsulBaseAction):
def run(self, data):
nodes = []
# Loop through the keys, and return the needful
return nodes
| 22.333333
| 55
| 0.671642
|
22eafde8bf7201393d6bda5decdbd7215c7458f8
| 1,324
|
py
|
Python
|
main.py
|
fm85/IoTB_Raspi_MQTT-Publish
|
3290b0ea81d110950bca16984cc88d0d923a58fb
|
[
"MIT"
] | null | null | null |
main.py
|
fm85/IoTB_Raspi_MQTT-Publish
|
3290b0ea81d110950bca16984cc88d0d923a58fb
|
[
"MIT"
] | null | null | null |
main.py
|
fm85/IoTB_Raspi_MQTT-Publish
|
3290b0ea81d110950bca16984cc88d0d923a58fb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
------------------------------------------------------------------
Projekt: MQTT Client Beispiel
Beschreibung: Publiziert exemlarisch einen Sensorwert über MQTT
Abhängigkeiten: paho-mqtt (vorher mit pip3 installieren)
Institution: GBS
Verfasser: F.Reifler
Datum: 27.08.2020
------------------------------------------------------------------
"""
import paho.mqtt.client as mqtt
import Adafruit_DHT
import socket
import sys
import time
myClient = mqtt.Client()
mySensorType = 11
mySensorPin = 4
def on_connect(client, userdata, flags, rc):
print("Connected with result code" + str(rc))
def initMQTTClient():
myClient.on_connect = on_connect
keepalive = 60
myClient.connect("172.20.1.31",1883,keepalive)
myClient.loop_start()
def getHostname():
return socket.gethostname()
def start():
try:
initMQTTClient()
topic = getHostname() + "/temperature"
print("Publishing with the following Topic:" + topic)
while(True):
time.sleep(2)
humidity, temperature = Adafruit_DHT.read(mySensorType, mySensorPin)
message = temperature
myClient.publish(topic,message)
except (KeyboardInterrupt, SystemExit):
print("Interrupted")
sys.exit(1)
start()
| 25.960784
| 80
| 0.602719
|
eb45a785c3c38d4811ae4891ef509566f4e61c25
| 9,008
|
py
|
Python
|
trax/supervised/training_test.py
|
YannickWehr/trax
|
67dda3b236339a7f6de803a3f84a9e92d0f0442c
|
[
"Apache-2.0"
] | null | null | null |
trax/supervised/training_test.py
|
YannickWehr/trax
|
67dda3b236339a7f6de803a3f84a9e92d0f0442c
|
[
"Apache-2.0"
] | null | null | null |
trax/supervised/training_test.py
|
YannickWehr/trax
|
67dda3b236339a7f6de803a3f84a9e92d0f0442c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for supervised training: core classes and flows."""
import os
import time
from absl.testing import absltest
from jax import test_util # pylint: disable=unused-import
from jax.config import config
import numpy as np
from trax import fastmath
from trax import layers as tl
from trax import optimizers
from trax import shapes
from trax import test_utils
from trax.supervised import training
class TrainingTest(absltest.TestCase):
def setUp(self):
super().setUp()
test_utils.ensure_flag('test_tmpdir')
def test_loop_no_eval_task(self):
"""Runs a training loop with no eval task(s)."""
model = tl.Serial(tl.Dense(1))
task = training.TrainTask(
_very_simple_data(), tl.L2Loss(), optimizers.SGD(.01))
training_session = training.Loop(model, [task])
# Loop should initialize and run successfully, even with no eval task.
training_session.run(n_steps=5)
def test_loop_no_eval_task_tfnp(self):
"""Runs a training loop with no eval task(s), TFNP backend."""
with fastmath.use_backend(fastmath.Backend.TFNP):
model = tl.Serial(tl.Dense(1))
task = training.TrainTask(
_very_simple_data(), tl.L2Loss(), optimizers.Adam(.01))
training_session = training.Loop(model, [task])
# Loop should initialize and run successfully, even with no eval task.
training_session.run(n_steps=5)
def test_train_dense_layer(self):
"""Trains a very simple network on a very simple task."""
model = tl.Serial(tl.Dense(1))
task = training.TrainTask(
_very_simple_data(), tl.L2Loss(), optimizers.SGD(.01))
eval_task = training.EvalTask(
_very_simple_data(), # deliberately re-using training data
[tl.L2Loss()],
metric_names=['SGD.L2Loss'])
training_session = training.Loop(model, [task], eval_tasks=[eval_task],
eval_at=lambda step_n: step_n % 2 == 0)
self.assertEqual(0, training_session.step)
training_session.run(n_steps=15)
self.assertEqual(15, training_session.step)
training_session.run(n_steps=5)
self.assertEqual(20, training_session.step)
def test_train_dense_layer_with_momentum(self):
"""Trains with an optimizer that has slots / requires initialization."""
model = tl.Serial(tl.Dense(1))
task = training.TrainTask(
_very_simple_data(), tl.L2Loss(), optimizers.Momentum(.01))
eval_task = training.EvalTask(
_very_simple_data(), # deliberately re-using training data
[tl.L2Loss()],
metric_names=['Momentum.L2Loss'])
training_session = training.Loop(model, [task], eval_tasks=[eval_task],
eval_at=lambda step_n: step_n % 2 == 0)
self.assertEqual(0, training_session.step)
training_session.run(n_steps=20)
self.assertEqual(20, training_session.step)
def test_train_dense_layer_evals(self):
"""Trains a very simple network on a very simple task, 2 epochs."""
model = tl.Serial(tl.Dense(1))
task = training.TrainTask(
_very_simple_data(), tl.L2Loss(), optimizers.SGD(.01))
eval_task = training.EvalTask(
_very_simple_data(), # deliberately re-using training data
[tl.L2Loss()])
training_session = training.Loop(model, [task], eval_tasks=[eval_task],
eval_at=lambda step_n: False)
self.assertEqual(0, training_session.step)
training_session.run(n_steps=10)
self.assertEqual(10, training_session.step)
training_session.run_evals()
self.assertEqual(10, training_session.step) # Unchanged
def test_summaries_are_written(self):
"""Training writes down metrics when writting is turned on."""
model = tl.Serial(tl.Dense(1))
task = training.TrainTask(
_very_simple_data(), tl.L2Loss(), optimizers.SGD(.01))
eval_task = training.EvalTask(
_very_simple_data(), # deliberately re-using training data
[tl.L2Loss()],
metric_names=['SGD.L2Loss'])
tmp_dir = self.create_tempdir().full_path
training_session = training.Loop(model, [task], eval_tasks=[eval_task],
eval_at=lambda step_n: step_n % 2 == 0,
output_dir=tmp_dir)
expected_train_metric_dir = os.path.join(tmp_dir, 'train')
expected_eval_metric_dir = os.path.join(tmp_dir, 'eval')
for directory in [expected_train_metric_dir, expected_eval_metric_dir]:
self.assertFalse(
os.path.isdir(directory), 'Failed for directory %s.' % directory)
training_session.run(n_steps=15)
time.sleep(1) # wait for the files to be closed
for directory in [expected_train_metric_dir, expected_eval_metric_dir]:
self.assertTrue(
os.path.isdir(directory), 'Failed for directory %s.' % directory)
self.assertEqual(
1, _count_files(directory), 'Failed for directory %s.' % directory)
training_session.run(n_steps=5)
time.sleep(1) # wait for the files to be closed
for directory in [expected_train_metric_dir, expected_eval_metric_dir]:
self.assertEqual(
2, _count_files(directory), 'Failed for directory %s.' % directory)
def test_restores_step(self):
"""Training restores step from directory where it saved it."""
model = tl.Serial(tl.Dense(1))
task = training.TrainTask(
_very_simple_data(), tl.L2Loss(), optimizers.SGD(.01))
tmp_dir = self.create_tempdir().full_path
loop = training.Loop(model, [task],
checkpoint_at=lambda step_n: step_n % 2 == 0,
output_dir=tmp_dir)
loop.run(4)
loop2 = training.Loop(model, [task], output_dir=tmp_dir)
self.assertEqual(4, loop2.step)
def test_trains_on_two_tasks(self):
"""Trains a very simple network on two very simple tasks."""
model = tl.Serial(tl.Dense(3), tl.Dense(1))
task = training.TrainTask(
_very_simple_data(),
tl.L2Loss(),
optimizers.SGD(.01)
)
eval_task = training.EvalTask(
_very_simple_data(), # deliberately re-using training data
[tl.L2Loss()],
)
training_session = training.Loop(
model,
tasks=(task, task),
eval_tasks=(eval_task, eval_task),
which_task=lambda step_n: step_n % 2,
)
self.assertEqual(0, training_session.step)
training_session.run(n_steps=15)
self.assertEqual(15, training_session.step)
training_session.run(n_steps=5)
self.assertEqual(20, training_session.step)
def test_can_predict_with_trained_model(self):
model = tl.Serial(tl.Dense(3), tl.Branch(tl.Dense(1), tl.Dense(2)))
train_tasks, eval_tasks = [], []
for output_dim in [1, 2]:
# The head we select from the model: 0 for output_dim 1 and 1 for 2.
head_index = output_dim - 1
train_tasks.append(training.TrainTask(
_very_simple_data(output_dim),
tl.Serial(tl.Select([head_index], n_in=2), tl.L2Loss()),
optimizers.SGD(.01)
))
eval_tasks.append(training.EvalTask(
_very_simple_data(output_dim), # deliberately re-use training data
[tl.Serial(tl.Select([head_index], n_in=2), tl.L2Loss())]
))
tmp_dir = self.create_tempdir().full_path
training_session = training.Loop(
model,
tasks=train_tasks,
eval_tasks=eval_tasks,
checkpoint_at=lambda step_n: step_n == 1,
output_dir=tmp_dir,
which_task=lambda step_n: step_n % 2,
)
training_session.run(n_steps=2)
trained_model = training_session.eval_model
inp = next(_very_simple_data())[0]
out = trained_model(inp)
self.assertEqual(
shapes.signature(out),
(shapes.ShapeDtype((8, 1)), shapes.ShapeDtype((8, 2))),
)
def _very_simple_data(output_dim=1):
""""Returns stream of labeled data that maps small integers to constant pi."""
inputs_batch = np.arange(8).reshape((8, 1)) # 8 items per batch
targets_batch = np.pi * np.ones((8, output_dim))
labeled_batch = (inputs_batch, targets_batch, np.ones_like(targets_batch))
while True:
yield labeled_batch
def _count_files(path):
"""Returns number of files in a given directory."""
return len([filename for filename in os.listdir(path)
if os.path.isfile(os.path.join(path, filename))])
if __name__ == '__main__':
config.config_with_absl()
absltest.main()
| 39.336245
| 80
| 0.676399
|
63a383e7cbab3802c707edbaff1f31e38b9ee4b5
| 407
|
py
|
Python
|
src/imageclassifier/wsgi.py
|
ootsutsukee/Adv-Pro-Python2-final-project
|
50f0c8396d114be857f721918fa0013d2f15ff2c
|
[
"MIT"
] | null | null | null |
src/imageclassifier/wsgi.py
|
ootsutsukee/Adv-Pro-Python2-final-project
|
50f0c8396d114be857f721918fa0013d2f15ff2c
|
[
"MIT"
] | null | null | null |
src/imageclassifier/wsgi.py
|
ootsutsukee/Adv-Pro-Python2-final-project
|
50f0c8396d114be857f721918fa0013d2f15ff2c
|
[
"MIT"
] | 1
|
2022-02-26T17:50:12.000Z
|
2022-02-26T17:50:12.000Z
|
"""
WSGI config for imageclassifier project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'imageclassifier.settings')
application = get_wsgi_application()
| 23.941176
| 78
| 0.793612
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.