text stringlengths 4 1.02M | meta dict |
|---|---|
"""
Python script create_data_libraries
Created by Anne Pajon under user 'pajon01' on 14/03/2016
Install bioblend
$ virtualenv venv
$ source venv/bin/activate
$ pip install bioblend
$ python create_data_libraries.py
"""
from bioblend import galaxy
import os
gi = galaxy.GalaxyInstance('http://52.55.43.141/', '3b8a7dffd8de121e8667aedd859b8dee')
lc = galaxy.libraries.LibraryClient(gi)
# Create training library every time from scratch
TRAINING_LIB_NAME = 'GalaxyCam Training'
training_lib = lc.get_libraries(name=TRAINING_LIB_NAME)
# Delete library
for l in training_lib:
lc.delete_library(library_id=l['id'])
# create library
training_lib = lc.create_library(name=TRAINING_LIB_NAME, description='Data Libraries for Galaxy Training course')
print training_lib
# Create training library folders and sub-folders
FOLDERS = ['data_visualisation', 'getting_started', 'interval_operations', 'loading_data', 'workflows']
for f in FOLDERS:
print f
training_folder = lc.create_folder(library_id=training_lib['id'], folder_name=f)
print training_folder
for filename in os.listdir(os.path.join('data_libraries', f)):
if os.path.isfile(os.path.join('data_libraries', f, filename)):
print filename
lc.upload_file_from_local_path(library_id=training_lib['id'], file_local_path=os.path.join('data_libraries', f, filename), folder_id=training_folder[0]['id'], dbkey='hg19')
for path, subdirs, files in os.walk(os.path.join('data_libraries', f)):
for subf in subdirs:
print subf
training_subfolder = lc.create_folder(library_id=training_lib['id'], base_folder_id=training_folder[0]['id'], folder_name=subf)
print training_subfolder
for filename in os.listdir(os.path.join('data_libraries', f, subf)):
if os.path.isfile(os.path.join('data_libraries', f, subf, filename)):
print filename
lc.upload_file_from_local_path(library_id=training_lib['id'], file_local_path=os.path.join('data_libraries', f, subf, filename), folder_id=training_subfolder[0]['id'], dbkey='hg19')
| {
"content_hash": "91d9f353a0b0eacb22d6bd9f295d0dcb",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 201,
"avg_line_length": 41.745098039215684,
"alnum_prop": 0.6965711601690935,
"repo_name": "galaxycam/galaxy-intro",
"id": "fef6e8b3e298685859403d98fc440e8db40b6bd7",
"size": "2129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "create_data_libraries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2129"
}
],
"symlink_target": ""
} |
__author__ = 'marvinsmith'
# Python Libraries
import curses, logging
# --------------------------------- #
# - Base Window Type - #
# --------------------------------- #
class Base_Window_Type(object):
# Window Title
window_title = ''
# Window render screen
screen = None
# Current Field
current_field = 0
# Address Cursors
sub_fields = [0]
sub_field_range = [0]
# --------------------------- #
# - Constructor - #
# --------------------------- #
def __init__(self, title = None,
screen = None,
current_field = 0):
# Set the title
if title is not None:
self.window_title = title
# Set the screen
if screen is not None:
self.screen = screen
# Current Field
self.current_field = current_field
# Address Cursors
self.sub_fields = [0]
self.sub_field_range = [0]
# ----------------------------- #
# - Render the Header - #
# ----------------------------- #
def Render_Header(self):
# Print the header
self.screen.addstr(0, 0, self.window_title)
self.screen.addstr(1, 0, '-' * (curses.COLS-1))
# ------------------------------------------- #
# - Increment Cursor Field (Down) - #
# ------------------------------------------- #
def Increment_Active_Field(self):
# Increment the subfield
self.sub_fields[self.current_field] += 1
# If the subfield is greater than the range, then increment the field
if self.sub_fields[self.current_field] > self.sub_field_range[self.current_field]:
# Reset the current subfield
self.sub_fields[self.current_field]=0
# Increment the field
self.current_field += 1
# If the field is past the range, then reset
if self.current_field >= len(self.sub_fields):
self.current_field = 0
# Reset the active subfield
self.sub_fields[self.current_field]=0
# ------------------------------------ #
# - Decrement Cursor Field - #
# ------------------------------------ #
def Decrement_Active_Field(self):
# Decrement the subfield
self.sub_fields[self.current_field] -= 1
# If the subfield is less than 0
if self.sub_fields[self.current_field] < 0:
# Decrement the field
self.current_field -= 1
# If the field is less than 0, move back to the end
if self.current_field < 0:
self.current_field = len(self.sub_fields)-1
# Adjust the current subfield
self.sub_fields[self.current_field] = self.sub_field_range[self.current_field]
# ---------------------------------- #
# - Base Sub-Window Type - #
# ---------------------------------- #
class Base_Sub_Window_Type(object):
# Window Title
window_title = ''
# Window Screen
screen = None
# Cursor
current_field = 0
sub_fields = [0]
sub_field_ranges = [0]
# Cursor List
cursors = []
# -------------------------- #
# - Constructor - #
# -------------------------- #
def __init__(self, screen, title = None):
# Set the title
if title is not None:
self.title = title
# Set the screen
self.screen = screen
# --------------------------- #
# - Render Line - #
# --------------------------- #
def Render_Line(self, field, data, row, col, highlight, color_set=[0,1]):
if len(field) <= 0:
return
# Print the field
self.screen.addstr( row, col, field)
# Configure the color pair
cpair = curses.color_pair(color_set[0])
if highlight == True:
cpair = curses.color_pair(color_set[1])
# Compute the offset
col_offset = col + len(field)
# Don't print if the string is null
if len(data) <= 0:
return
self.screen.addstr( row, col_offset, data, cpair )
# Set the cursor
if highlight == True:
cpos = self.cursors[self.current_field]
self.screen.addch( row, col_offset + cpos, data[cpos], cpair | curses.A_UNDERLINE)
| {
"content_hash": "9a6a3661488654b5f823679339a0286e",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 94,
"avg_line_length": 27.512345679012345,
"alnum_prop": 0.46017500560915414,
"repo_name": "marvins/LLNMS",
"id": "b6f875cb56f98710910fef4e9c65d11b0ca498d6",
"size": "4457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/core/python/llnms/viewer/ui/UI_Window_Base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1455677"
},
{
"name": "CMake",
"bytes": "7500"
},
{
"name": "PowerShell",
"bytes": "24325"
},
{
"name": "Python",
"bytes": "200978"
},
{
"name": "Shell",
"bytes": "252921"
}
],
"symlink_target": ""
} |
import os.path
import sublime
import sublime_plugin
PREF = "Preferences"
PREF_EXT = ".sublime-settings"
PREF_USER = PREF + PREF_EXT
PREF_SKIN = "Skins" + PREF_EXT
def decode_resource(name):
"""Load and decode sublime text resource.
Arguments:
name - Name of the resource file to load.
returns:
This function always returns a valid dict object of the decoded
resource. The returned object is empty if something goes wrong.
"""
try:
return sublime.decode_value(sublime.load_resource(name)) or {}
except Exception as e:
message = "Skins: loading %s failed with %s" % (name, e)
sublime.status_message(message)
print(message)
return {}
def validate_skin(skin_data, fallback_theme=None, fallback_colors=None):
"""Check skin integrity and return the boolean result.
For a skin to be valid at least 'color_scheme' or 'theme' must exist.
If one of both values is invalid, it may be replaced with a fallback value.
Otherwise SublimeText's behavior when loading the skin is unpredictable.
SublimeLinter automatically creates and applies patched color schemes if
they doesn't contain linter icon scopes. To ensure not to break this
feature this function ensures not to apply such a hacked color scheme
directly so SublimeLinter can do his job correctly.
Arguments:
skin_data (dict):
JSON object with all settings to apply for the skin.
fallback_theme (string):
A valid theme name to inject into skin_data, if skin_data does not
contain a valid one.
fallback_colors (string):
A valid color_scheme path to inject into skin_data, if skin_data
does not contain a valid one.
"""
# check theme file
theme_name = skin_data[PREF].get("theme")
theme_ok = theme_name and sublime.find_resources(theme_name)
# check color scheme
color_scheme_ok = False
color_scheme_name = skin_data[PREF].get("color_scheme")
if color_scheme_name:
path, tail = os.path.split(color_scheme_name)
name = tail.replace(" (SL)", "")
color_schemes = sublime.find_resources(name)
if color_schemes:
# Try to find the exact path from *.skins file
resource_path = "/".join((path, name))
for found in color_schemes:
if found == resource_path:
color_scheme_ok = True
break
# Use the first found color scheme which matches 'name'
if not color_scheme_ok:
skin_data[PREF]["color_scheme"] = color_schemes[0]
color_scheme_ok = True
valid = theme_ok or color_scheme_ok
if valid:
if fallback_theme and not theme_ok:
skin_data[PREF]["theme"] = fallback_theme
if fallback_colors and not color_scheme_ok:
skin_data[PREF]["color_scheme"] = fallback_colors
return valid
def load_user_skins():
"""Open the 'Saved Skins.skins' and read all valid skins from it."""
return {name: data
for name, data in decode_resource(
"Packages/User/Saved Skins.skins").items()
if validate_skin(data)}
def save_user_skins(skins):
"""Save the skins to the 'Saved Skins.skins'."""
user_skins_file = os.path.join(
sublime.packages_path(), "User", "Saved Skins.skins")
with open(user_skins_file, "w", encoding="utf-8") as file:
file.write(sublime.encode_value(skins, True))
class SetSkinCommand(sublime_plugin.WindowCommand):
"""Implements the 'set_skin' command."""
# A sublime.Settings object of the global Sublime Text settings
prefs = None
# The last selected row index - used to debounce the search so we
# aren't apply a new theme with every keypress
last_selected = -1
def run(self, package=None, name=None):
"""Apply all visual settings stored in a skin.
If 'set_skin' is called with both args 'package' and 'name',
the provided information will be used to directly switch to
the desired skin.
sublime.run_command("set_skin", {
"package": "User", "name": "Preset 1"})
If 'package' is a string but name is not, a quick panel with all
skins provided by the package is displayed.
If at least one of the args is not a string, a quick panel with all
available skins is displayed.
sublime.run_command("set_skin")
Arguments:
package (string): name of the package providing the skin or (User)
name (string): name of the skin in the <skins>.skins file
"""
if not self.prefs:
self.prefs = sublime.load_settings(PREF_USER)
if isinstance(package, str):
if isinstance(name, str):
# directly apply new skin
for skins_file in sublime.find_resources("*.skins"):
if package in skins_file:
skin = decode_resource(skins_file).get(name)
if validate_skin(skin):
self.set_skin(package, name, skin)
else:
# show only skins provided by the package
self.show_quick_panel(filter=package)
else:
# prepare and show quick panel asynchronous
self.show_quick_panel()
def show_quick_panel(self, filter=None):
"""Display a quick panel with all available skins."""
initial_color = self.prefs.get("color_scheme")
initial_theme = self.prefs.get("theme")
initial_skin = self.prefs.get("skin")
initial_selected = -1
# a dictionary with all preferences to restore on abort
initial_prefs = {}
# the icon to display next to the skin name
icon = "💦 "
# the package and skin name to display in the quick panel
items = []
# the skin objects with all settings
skins = []
# Create the lists of all available skins.
for skins_file in sublime.find_resources("*.skins"):
package = skins_file.split("/", 2)[1]
if filter and filter != package:
continue
for name, skin in decode_resource(skins_file).items():
if validate_skin(skin, initial_theme, initial_color):
if initial_skin == "/".join((package, name)):
initial_selected = len(items)
items.append([icon + name, package])
skins.append(skin)
def on_done(index):
"""Apply selected skin if user pressed enter or revert changes.
Arguments:
index (int): Index of the selected skin if user pressed ENTER
or -1 if user aborted by pressing ESC.
"""
if index == -1:
for key, val in initial_prefs.items():
if val:
self.prefs.set(key, val)
else:
self.prefs.erase(key)
sublime.save_settings(PREF_USER)
return
name, package = items[index]
self.set_skin(package, name.strip(icon), skins[index])
def on_highlight(index):
"""Temporarily apply new skin, if quick panel selection changed.
Arguments:
index (int): Index of the highlighted skin.
"""
if index == -1:
return
self.last_selected = index
def preview_skin():
# The selected row has changed since the timeout was created.
if index != self.last_selected:
return
for key, val in skins[index][PREF].items():
# backup settings before changing the first time
if key not in initial_prefs:
initial_prefs[key] = self.prefs.get(key)
if val:
self.prefs.set(key, val)
else:
self.prefs.erase(key)
# start timer to delay the preview a little bit
sublime.set_timeout_async(preview_skin, 250)
self.window.show_quick_panel(
items=items, selected_index=initial_selected,
flags=sublime.KEEP_OPEN_ON_FOCUS_LOST,
on_select=on_done, on_highlight=on_highlight)
def set_skin(self, package, name, skin):
"""Apply all skin settings.
Arguments:
package (string): name of the package providing the skin or (User)
name (string): name of the skin in the <skins>.skins file
skin (dict): all settings to apply
"""
self.prefs.set("skin", "/".join((package, name)))
for pkg_name, pkg_prefs in skin.items():
try:
pkgs = sublime.load_settings(pkg_name + PREF_EXT)
for key, val in pkg_prefs.items():
if isinstance(val, dict):
new = pkgs.get(key)
new.update(val)
val = new
if val:
pkgs.set(key, val)
else:
pkgs.erase(key)
sublime.save_settings(pkg_name + PREF_EXT)
except Exception:
pass
class DeleteUserSkinCommand(sublime_plugin.WindowCommand):
"""Implements the 'delete_user_skin' command."""
def is_visible(self):
"""Show command only if user skins exist."""
return any(
validate_skin(data) for data in decode_resource(
"Packages/User/Saved Skins.skins").values())
def run(self, name=None):
"""Delete a user defined skin or show quick panel to select one.
Arguments:
name (string): The name of the skin to delete.
"""
skins = load_user_skins()
if not skins:
return
def delete_skin(skin):
"""Delete the skin from 'Saved Skins.skins' file."""
if skin not in skins.keys():
sublime.status_message("Skin not deleted!")
return
del skins[skin]
save_user_skins(skins)
sublime.status_message("Skin %s deleted!" % skin)
if name:
return delete_skin(name)
# the icon to display next to the skin name
icon = "🚮 "
# built quick panel items
items = [[
icon + skin,
"Delete existing skin."
] for skin in sorted(skins.keys())]
def on_done(index):
"""A quick panel item was selected."""
if index >= 0:
delete_skin(items[index][0].lstrip(icon))
# display a quick panel with all user skins
self.window.show_quick_panel(
items=items, on_select=on_done,
flags=sublime.KEEP_OPEN_ON_FOCUS_LOST)
class SaveUserSkinCommand(sublime_plugin.WindowCommand):
"""Implements the 'save_user_skin' command."""
def run(self, name=None):
"""Save visual settings as user defined skin.
If the command is called without arguments, it shows an input panel
to ask the user for the desired name to save the skin as.
sublime.run_command("save_user_skin")
The command can be called to save the current skin
with a predefined name:
sublime.run_command("save_user_skin", {"name": "Preset 1"})
Arguments:
name (string): If not None this names the skin to save the current
visual settings as.
"""
skins = load_user_skins()
def save_skin(name):
"""Save the skin with provided name."""
# Compose the new skin by loading all settings from all existing
# <pkg_name>.sublime-settings files defined in <template>.
template = sublime.load_settings(PREF_SKIN).get("skin-template")
new_skin = {}
for pkg_name, css in template.items():
val = self.transform(decode_resource(
"Packages/User/%s.sublime-settings" % pkg_name), css)
if val:
new_skin[pkg_name] = val
# Check whether the minimum requirements are met.
if not validate_skin(new_skin):
sublime.status_message("Invalid skin %s not saved!" % name)
return
# Save the skin.
skins[name] = new_skin
save_user_skins(skins)
sublime.status_message("Saved skin %s!" % name)
if name:
return save_skin(name)
# the icon to display next to the skin name
icon = "🔃 "
# built quick panel items
items = [[
"💾 Save as new skin ...",
"Enter the name in the following input panel, please."
]] + [[
icon + skin,
"Update existing skin."
] for skin in sorted(skins.keys())]
def on_done(index):
"""A quick panel item was selected."""
if index == 0:
# Save as new skin ...
self.window.show_input_panel(
"Enter skins name:", "", save_skin, None, None)
elif index > 0:
# Update existing skin.
save_skin(items[index][0].lstrip(icon))
# display a quick panel with all user skins
self.window.show_quick_panel(
items=items, on_select=on_done,
flags=sublime.KEEP_OPEN_ON_FOCUS_LOST)
@classmethod
def transform(cls, json, css):
"""Filter JSON object by a stylesheet.
This function transforms the <json> object by recursively
parsing it and returning only the child objects whose keys
match the values in the cascaded stylesheet <css>.
Arguments:
json The data source to filter
css The stylesheet used as filter
Each <object> must exist in <json>.
Each <key> and its value is read from <json> and
added to the returned object.
EXAMPLE:
<object> : [<key>, <key>, ...],
<object> : {
<object> : [<key>, <key>, ...]
}
"""
if json and css:
if isinstance(css, dict):
node = {}
for key, style in css.items():
value = cls.transform(json[key], style)
# do not add empty objects
if value:
node[key] = value
return node
if isinstance(css, list):
return {key: json[key] for key in css if key in json}
elif css in json:
return {css: json[css]}
return None
| {
"content_hash": "dcb9a9d32413e1dc63c083fd7c3f3aa7",
"timestamp": "",
"source": "github",
"line_count": 407,
"max_line_length": 79,
"avg_line_length": 37.00982800982801,
"alnum_prop": 0.5513509924981743,
"repo_name": "deathaxe/sublime-skins",
"id": "ed714b184c442d3fde59bfd63af30c5a245f7d54",
"size": "15099",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15099"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('servico', '0039_eventodestatus_init'),
]
operations = [
migrations.RenameModel(
old_name='NumeroDocumento',
new_name='Documento',
),
]
| {
"content_hash": "2bfa377db95b0773f7e589f246e54481",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 48,
"avg_line_length": 19.705882352941178,
"alnum_prop": 0.6029850746268657,
"repo_name": "anselmobd/fo2",
"id": "361b4a5617879e0525e35765ae4acf6156eada92",
"size": "409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/servico/migrations/0040_numeronocumento_to_documento.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160899"
},
{
"name": "HTML",
"bytes": "855985"
},
{
"name": "JavaScript",
"bytes": "203109"
},
{
"name": "PLSQL",
"bytes": "2762"
},
{
"name": "Python",
"bytes": "3228268"
},
{
"name": "Shell",
"bytes": "2161"
}
],
"symlink_target": ""
} |
import copy
from oslo_log import log as logging
from tempest_lib.services.identity.v2.token_client import TokenClientJSON
from tempest_lib.services.identity.v3.token_client import V3TokenClientJSON
from tempest.common import cred_provider
from tempest.common import negative_rest_client
from tempest import config
from tempest import exceptions
from tempest import manager
from tempest.services.baremetal.v1.json.baremetal_client import \
BaremetalClient
from tempest.services import botoclients
from tempest.services.compute.json.agents_client import \
AgentsClient
from tempest.services.compute.json.aggregates_client import \
AggregatesClient
from tempest.services.compute.json.availability_zone_client import \
AvailabilityZoneClient
from tempest.services.compute.json.baremetal_nodes_client import \
BaremetalNodesClient
from tempest.services.compute.json.certificates_client import \
CertificatesClient
from tempest.services.compute.json.extensions_client import \
ExtensionsClient
from tempest.services.compute.json.fixed_ips_client import FixedIPsClient
from tempest.services.compute.json.flavors_client import FlavorsClient
from tempest.services.compute.json.floating_ip_pools_client import \
FloatingIPPoolsClient
from tempest.services.compute.json.floating_ips_bulk_client import \
FloatingIPsBulkClient
from tempest.services.compute.json.floating_ips_client import \
FloatingIPsClient
from tempest.services.compute.json.hosts_client import HostsClient
from tempest.services.compute.json.hypervisor_client import \
HypervisorClient
from tempest.services.compute.json.images_client import ImagesClient
from tempest.services.compute.json.instance_usage_audit_log_client import \
InstanceUsagesAuditLogClient
from tempest.services.compute.json.interfaces_client import \
InterfacesClient
from tempest.services.compute.json.keypairs_client import KeyPairsClient
from tempest.services.compute.json.limits_client import LimitsClient
from tempest.services.compute.json.migrations_client import \
MigrationsClient
from tempest.services.compute.json.networks_client import NetworksClient
from tempest.services.compute.json.quota_classes_client import \
QuotaClassesClient
from tempest.services.compute.json.quotas_client import QuotasClient
from tempest.services.compute.json.security_group_default_rules_client import \
SecurityGroupDefaultRulesClient
from tempest.services.compute.json.security_group_rules_client import \
SecurityGroupRulesClient
from tempest.services.compute.json.security_groups_client import \
SecurityGroupsClient
from tempest.services.compute.json.server_groups_client import \
ServerGroupsClient
from tempest.services.compute.json.servers_client import ServersClient
from tempest.services.compute.json.services_client import ServicesClient
from tempest.services.compute.json.tenant_networks_client import \
TenantNetworksClient
from tempest.services.compute.json.tenant_usages_client import \
TenantUsagesClient
from tempest.services.compute.json.versions_client import VersionsClient
from tempest.services.compute.json.volumes_extensions_client import \
VolumesExtensionsClient
from tempest.services.data_processing.v1_1.data_processing_client import \
DataProcessingClient
from tempest.services.database.json.flavors_client import \
DatabaseFlavorsClient
from tempest.services.database.json.limits_client import \
DatabaseLimitsClient
from tempest.services.database.json.versions_client import \
DatabaseVersionsClient
from tempest.services.identity.v2.json.identity_client import \
IdentityClient
from tempest.services.identity.v3.json.credentials_client import \
CredentialsClient
from tempest.services.identity.v3.json.endpoints_client import \
EndPointClient
from tempest.services.identity.v3.json.identity_client import \
IdentityV3Client
from tempest.services.identity.v3.json.policy_client import PolicyClient
from tempest.services.identity.v3.json.region_client import RegionClient
from tempest.services.identity.v3.json.service_client import \
ServiceClient
from tempest.services.image.v1.json.image_client import ImageClient
from tempest.services.image.v2.json.image_client import ImageClientV2
from tempest.services.messaging.json.messaging_client import \
MessagingClient
from tempest.services.network.json.network_client import NetworkClient
from tempest.services.object_storage.account_client import AccountClient
from tempest.services.object_storage.container_client import ContainerClient
from tempest.services.object_storage.object_client import ObjectClient
from tempest.services.orchestration.json.orchestration_client import \
OrchestrationClient
from tempest.services.telemetry.json.telemetry_client import \
TelemetryClient
from tempest.services.volume.json.admin.volume_hosts_client import \
VolumeHostsClient
from tempest.services.volume.json.admin.volume_quotas_client import \
VolumeQuotasClient
from tempest.services.volume.json.admin.volume_services_client import \
VolumesServicesClient
from tempest.services.volume.json.admin.volume_types_client import \
VolumeTypesClient
from tempest.services.volume.json.availability_zone_client import \
VolumeAvailabilityZoneClient
from tempest.services.volume.json.backups_client import BackupsClient
from tempest.services.volume.json.extensions_client import \
ExtensionsClient as VolumeExtensionClient
from tempest.services.volume.json.qos_client import QosSpecsClient
from tempest.services.volume.json.snapshots_client import SnapshotsClient
from tempest.services.volume.json.volumes_client import VolumesClient
from tempest.services.volume.v2.json.admin.volume_hosts_client import \
VolumeHostsV2Client
from tempest.services.volume.v2.json.admin.volume_quotas_client import \
VolumeQuotasV2Client
from tempest.services.volume.v2.json.admin.volume_services_client import \
VolumesServicesV2Client
from tempest.services.volume.v2.json.admin.volume_types_client import \
VolumeTypesV2Client
from tempest.services.volume.v2.json.availability_zone_client import \
VolumeV2AvailabilityZoneClient
from tempest.services.volume.v2.json.backups_client import BackupsClientV2
from tempest.services.volume.v2.json.extensions_client import \
ExtensionsV2Client as VolumeV2ExtensionClient
from tempest.services.volume.v2.json.qos_client import QosSpecsV2Client
from tempest.services.volume.v2.json.snapshots_client import \
SnapshotsV2Client
from tempest.services.volume.v2.json.volumes_client import VolumesV2Client
CONF = config.CONF
LOG = logging.getLogger(__name__)
class Manager(manager.Manager):
"""
Top level manager for OpenStack tempest clients
"""
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
# NOTE: Tempest uses timeout values of compute API if project specific
# timeout values don't exist.
default_params_with_timeout_values = {
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
default_params_with_timeout_values.update(default_params)
def __init__(self, credentials=None, service=None):
super(Manager, self).__init__(credentials=credentials)
self._set_compute_clients()
self._set_database_clients()
self._set_identity_clients()
self._set_volume_clients()
self._set_object_storage_clients()
self.baremetal_client = BaremetalClient(
self.auth_provider,
CONF.baremetal.catalog_type,
CONF.identity.region,
endpoint_type=CONF.baremetal.endpoint_type,
**self.default_params_with_timeout_values)
self.network_client = NetworkClient(
self.auth_provider,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**self.default_params)
self.messaging_client = MessagingClient(
self.auth_provider,
CONF.messaging.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
if CONF.service_available.ceilometer:
self.telemetry_client = TelemetryClient(
self.auth_provider,
CONF.telemetry.catalog_type,
CONF.identity.region,
endpoint_type=CONF.telemetry.endpoint_type,
**self.default_params_with_timeout_values)
if CONF.service_available.glance:
self.image_client = ImageClient(
self.auth_provider,
CONF.image.catalog_type,
CONF.image.region or CONF.identity.region,
endpoint_type=CONF.image.endpoint_type,
build_interval=CONF.image.build_interval,
build_timeout=CONF.image.build_timeout,
**self.default_params)
self.image_client_v2 = ImageClientV2(
self.auth_provider,
CONF.image.catalog_type,
CONF.image.region or CONF.identity.region,
endpoint_type=CONF.image.endpoint_type,
build_interval=CONF.image.build_interval,
build_timeout=CONF.image.build_timeout,
**self.default_params)
self.orchestration_client = OrchestrationClient(
self.auth_provider,
CONF.orchestration.catalog_type,
CONF.orchestration.region or CONF.identity.region,
endpoint_type=CONF.orchestration.endpoint_type,
build_interval=CONF.orchestration.build_interval,
build_timeout=CONF.orchestration.build_timeout,
**self.default_params)
self.data_processing_client = DataProcessingClient(
self.auth_provider,
CONF.data_processing.catalog_type,
CONF.identity.region,
endpoint_type=CONF.data_processing.endpoint_type,
**self.default_params_with_timeout_values)
self.negative_client = negative_rest_client.NegativeRestClient(
self.auth_provider, service, **self.default_params)
# Generating EC2 credentials in tempest is only supported
# with identity v2
if CONF.identity_feature_enabled.api_v2 and \
CONF.identity.auth_version == 'v2':
# EC2 and S3 clients, if used, will check configured AWS
# credentials and generate new ones if needed
self.ec2api_client = botoclients.APIClientEC2(self.identity_client)
self.s3_client = botoclients.ObjectClientS3(self.identity_client)
def _set_compute_clients(self):
params = {
'service': CONF.compute.catalog_type,
'region': CONF.compute.region or CONF.identity.region,
'endpoint_type': CONF.compute.endpoint_type,
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
params.update(self.default_params)
self.agents_client = AgentsClient(self.auth_provider, **params)
self.networks_client = NetworksClient(self.auth_provider, **params)
self.migrations_client = MigrationsClient(self.auth_provider,
**params)
self.security_group_default_rules_client = (
SecurityGroupDefaultRulesClient(self.auth_provider, **params))
self.certificates_client = CertificatesClient(self.auth_provider,
**params)
self.servers_client = ServersClient(
self.auth_provider,
enable_instance_password=CONF.compute_feature_enabled
.enable_instance_password,
**params)
self.server_groups_client = ServerGroupsClient(
self.auth_provider, **params)
self.limits_client = LimitsClient(self.auth_provider, **params)
self.images_client = ImagesClient(self.auth_provider, **params)
self.keypairs_client = KeyPairsClient(self.auth_provider, **params)
self.quotas_client = QuotasClient(self.auth_provider, **params)
self.quota_classes_client = QuotaClassesClient(self.auth_provider,
**params)
self.flavors_client = FlavorsClient(self.auth_provider, **params)
self.extensions_client = ExtensionsClient(self.auth_provider,
**params)
self.floating_ip_pools_client = FloatingIPPoolsClient(
self.auth_provider, **params)
self.floating_ips_bulk_client = FloatingIPsBulkClient(
self.auth_provider, **params)
self.floating_ips_client = FloatingIPsClient(self.auth_provider,
**params)
self.security_group_rules_client = SecurityGroupRulesClient(
self.auth_provider, **params)
self.security_groups_client = SecurityGroupsClient(
self.auth_provider, **params)
self.interfaces_client = InterfacesClient(self.auth_provider,
**params)
self.fixed_ips_client = FixedIPsClient(self.auth_provider,
**params)
self.availability_zone_client = AvailabilityZoneClient(
self.auth_provider, **params)
self.aggregates_client = AggregatesClient(self.auth_provider,
**params)
self.services_client = ServicesClient(self.auth_provider, **params)
self.tenant_usages_client = TenantUsagesClient(self.auth_provider,
**params)
self.hosts_client = HostsClient(self.auth_provider, **params)
self.hypervisor_client = HypervisorClient(self.auth_provider,
**params)
self.instance_usages_audit_log_client = \
InstanceUsagesAuditLogClient(self.auth_provider, **params)
self.tenant_networks_client = \
TenantNetworksClient(self.auth_provider, **params)
self.baremetal_nodes_client = BaremetalNodesClient(
self.auth_provider, **params)
# NOTE: The following client needs special timeout values because
# the API is a proxy for the other component.
params_volume = copy.deepcopy(params)
params_volume.update({
'build_interval': CONF.volume.build_interval,
'build_timeout': CONF.volume.build_timeout
})
self.volumes_extensions_client = VolumesExtensionsClient(
self.auth_provider, **params_volume)
self.compute_versions_client = VersionsClient(self.auth_provider,
**params_volume)
def _set_database_clients(self):
self.database_flavors_client = DatabaseFlavorsClient(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
self.database_limits_client = DatabaseLimitsClient(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
self.database_versions_client = DatabaseVersionsClient(
self.auth_provider,
CONF.database.catalog_type,
CONF.identity.region,
**self.default_params_with_timeout_values)
def _set_identity_clients(self):
params = {
'service': CONF.identity.catalog_type,
'region': CONF.identity.region
}
params.update(self.default_params_with_timeout_values)
params_v2_admin = params.copy()
params_v2_admin['endpoint_type'] = CONF.identity.v2_admin_endpoint_type
# Client uses admin endpoint type of Keystone API v2
self.identity_client = IdentityClient(self.auth_provider,
**params_v2_admin)
params_v2_public = params.copy()
params_v2_public['endpoint_type'] = (
CONF.identity.v2_public_endpoint_type)
# Client uses public endpoint type of Keystone API v2
self.identity_public_client = IdentityClient(self.auth_provider,
**params_v2_public)
params_v3 = params.copy()
params_v3['endpoint_type'] = CONF.identity.v3_endpoint_type
# Client uses the endpoint type of Keystone API v3
self.identity_v3_client = IdentityV3Client(self.auth_provider,
**params_v3)
self.endpoints_client = EndPointClient(self.auth_provider,
**params)
self.service_client = ServiceClient(self.auth_provider, **params)
self.policy_client = PolicyClient(self.auth_provider, **params)
self.region_client = RegionClient(self.auth_provider, **params)
self.credentials_client = CredentialsClient(self.auth_provider,
**params)
# Token clients do not use the catalog. They only need default_params.
# They read auth_url, so they should only be set if the corresponding
# API version is marked as enabled
if CONF.identity_feature_enabled.api_v2:
if CONF.identity.uri:
self.token_client = TokenClientJSON(
CONF.identity.uri, **self.default_params)
else:
msg = 'Identity v2 API enabled, but no identity.uri set'
raise exceptions.InvalidConfiguration(msg)
if CONF.identity_feature_enabled.api_v3:
if CONF.identity.uri_v3:
self.token_v3_client = V3TokenClientJSON(
CONF.identity.uri_v3, **self.default_params)
else:
msg = 'Identity v3 API enabled, but no identity.uri_v3 set'
raise exceptions.InvalidConfiguration(msg)
def _set_volume_clients(self):
params = {
'service': CONF.volume.catalog_type,
'region': CONF.volume.region or CONF.identity.region,
'endpoint_type': CONF.volume.endpoint_type,
'build_interval': CONF.volume.build_interval,
'build_timeout': CONF.volume.build_timeout
}
params.update(self.default_params)
self.volume_qos_client = QosSpecsClient(self.auth_provider,
**params)
self.volume_qos_v2_client = QosSpecsV2Client(
self.auth_provider, **params)
self.volume_services_v2_client = VolumesServicesV2Client(
self.auth_provider, **params)
self.backups_client = BackupsClient(self.auth_provider, **params)
self.backups_v2_client = BackupsClientV2(self.auth_provider,
**params)
self.snapshots_client = SnapshotsClient(self.auth_provider,
**params)
self.snapshots_v2_client = SnapshotsV2Client(self.auth_provider,
**params)
self.volumes_client = VolumesClient(
self.auth_provider, default_volume_size=CONF.volume.volume_size,
**params)
self.volumes_v2_client = VolumesV2Client(
self.auth_provider, default_volume_size=CONF.volume.volume_size,
**params)
self.volume_types_client = VolumeTypesClient(self.auth_provider,
**params)
self.volume_services_client = VolumesServicesClient(
self.auth_provider, **params)
self.volume_hosts_client = VolumeHostsClient(self.auth_provider,
**params)
self.volume_hosts_v2_client = VolumeHostsV2Client(
self.auth_provider, **params)
self.volume_quotas_client = VolumeQuotasClient(self.auth_provider,
**params)
self.volume_quotas_v2_client = VolumeQuotasV2Client(self.auth_provider,
**params)
self.volumes_extension_client = VolumeExtensionClient(
self.auth_provider, **params)
self.volumes_v2_extension_client = VolumeV2ExtensionClient(
self.auth_provider, **params)
self.volume_availability_zone_client = \
VolumeAvailabilityZoneClient(self.auth_provider, **params)
self.volume_v2_availability_zone_client = \
VolumeV2AvailabilityZoneClient(self.auth_provider, **params)
self.volume_types_v2_client = VolumeTypesV2Client(
self.auth_provider, **params)
def _set_object_storage_clients(self):
params = {
'service': CONF.object_storage.catalog_type,
'region': CONF.object_storage.region or CONF.identity.region,
'endpoint_type': CONF.object_storage.endpoint_type
}
params.update(self.default_params_with_timeout_values)
self.account_client = AccountClient(self.auth_provider, **params)
self.container_client = ContainerClient(self.auth_provider, **params)
self.object_client = ObjectClient(self.auth_provider, **params)
class AdminManager(Manager):
"""
Manager object that uses the admin credentials for its
managed client objects
"""
def __init__(self, service=None):
super(AdminManager, self).__init__(
credentials=cred_provider.get_configured_credentials(
'identity_admin'),
service=service)
| {
"content_hash": "967917a8fe22641489392523dca8d8ab",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 79,
"avg_line_length": 48.947136563876654,
"alnum_prop": 0.6653766537665377,
"repo_name": "dkalashnik/tempest",
"id": "c0d45857c3485fded19be64d80be0265d7f584d7",
"size": "22858",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tempest/clients.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2790231"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
} |
import os.path as op
import warnings
import inspect
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
from nose.tools import assert_true, assert_raises, assert_equal
from mne import find_events, pick_types
from mne.io import read_raw_egi
from mne.io.tests.test_raw import _test_raw_reader
from mne.io.egi.egi import _combine_triggers
from mne.utils import run_tests_if_main
from mne.datasets.testing import data_path, requires_testing_data
warnings.simplefilter('always') # enable b/c these tests throw warnings
FILE = inspect.getfile(inspect.currentframe())
base_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
egi_fname = op.join(base_dir, 'test_egi.raw')
egi_txt_fname = op.join(base_dir, 'test_egi.txt')
@requires_testing_data
def test_io_egi_mff():
"""Test importing EGI MFF simple binary files"""
egi_fname_mff = op.join(data_path(), 'EGI', 'test_egi.mff')
raw = read_raw_egi(egi_fname_mff, include=None)
assert_true('RawMff' in repr(raw))
include = ['DIN1', 'DIN2', 'DIN3', 'DIN4', 'DIN5', 'DIN7']
raw = _test_raw_reader(read_raw_egi, input_fname=egi_fname_mff,
include=include, channel_naming='EEG %03d')
assert_equal('eeg' in raw, True)
eeg_chan = [c for c in raw.ch_names if 'EEG' in c]
assert_equal(len(eeg_chan), 129)
picks = pick_types(raw.info, eeg=True)
assert_equal(len(picks), 129)
assert_equal('STI 014' in raw.ch_names, True)
events = find_events(raw, stim_channel='STI 014')
assert_equal(len(events), 8)
assert_equal(np.unique(events[:, 1])[0], 0)
assert_true(np.unique(events[:, 0])[0] != 0)
assert_true(np.unique(events[:, 2])[0] != 0)
assert_raises(ValueError, read_raw_egi, egi_fname_mff, include=['Foo'],
preload=False)
assert_raises(ValueError, read_raw_egi, egi_fname_mff, exclude=['Bar'],
preload=False)
for ii, k in enumerate(include, 1):
assert_true(k in raw.event_id)
assert_true(raw.event_id[k] == ii)
def test_io_egi():
"""Test importing EGI simple binary files."""
# test default
with open(egi_txt_fname) as fid:
data = np.loadtxt(fid)
t = data[0]
data = data[1:]
data *= 1e-6 # μV
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
raw = read_raw_egi(egi_fname, include=None)
assert_true('RawEGI' in repr(raw))
assert_equal(len(w), 1)
assert_true(w[0].category == RuntimeWarning)
msg = 'Did not find any event code with more than one event.'
assert_true(msg in '%s' % w[0].message)
data_read, t_read = raw[:256]
assert_allclose(t_read, t)
assert_allclose(data_read, data, atol=1e-10)
include = ['TRSP', 'XXX1']
with warnings.catch_warnings(record=True): # preload=None
raw = _test_raw_reader(read_raw_egi, input_fname=egi_fname,
include=include)
assert_equal('eeg' in raw, True)
eeg_chan = [c for c in raw.ch_names if c.startswith('E')]
assert_equal(len(eeg_chan), 256)
picks = pick_types(raw.info, eeg=True)
assert_equal(len(picks), 256)
assert_equal('STI 014' in raw.ch_names, True)
events = find_events(raw, stim_channel='STI 014')
assert_equal(len(events), 2) # ground truth
assert_equal(np.unique(events[:, 1])[0], 0)
assert_true(np.unique(events[:, 0])[0] != 0)
assert_true(np.unique(events[:, 2])[0] != 0)
triggers = np.array([[0, 1, 1, 0], [0, 0, 1, 0]])
# test trigger functionality
triggers = np.array([[0, 1, 0, 0], [0, 0, 1, 0]])
events_ids = [12, 24]
new_trigger = _combine_triggers(triggers, events_ids)
assert_array_equal(np.unique(new_trigger), np.unique([0, 12, 24]))
assert_raises(ValueError, read_raw_egi, egi_fname, include=['Foo'],
preload=False)
assert_raises(ValueError, read_raw_egi, egi_fname, exclude=['Bar'],
preload=False)
for ii, k in enumerate(include, 1):
assert_true(k in raw.event_id)
assert_true(raw.event_id[k] == ii)
run_tests_if_main()
| {
"content_hash": "fb8a67cfa5f04d39f6fa46a939d98ad8",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 75,
"avg_line_length": 37.270270270270274,
"alnum_prop": 0.6316171138506164,
"repo_name": "jaeilepp/mne-python",
"id": "90af188772ee4d2617f87c3d8a98a8c9cc9d95af",
"size": "4257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/io/egi/tests/test_egi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3928"
},
{
"name": "Python",
"bytes": "6113850"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from keystone.common import environment
from keystone import config
from keystone import tests
CONF = config.CONF
class IPv6TestCase(tests.TestCase):
@classmethod
def setUpClass(cls):
cls.skip_if_no_ipv6()
def setUp(self):
super(IPv6TestCase, self).setUp()
self.load_backends()
def test_ipv6_ok(self):
"""Make sure both public and admin API work with ipv6."""
self.public_server = self.serveapp('keystone', name='main',
host="::1", port=0)
self.admin_server = self.serveapp('keystone', name='admin',
host="::1", port=0)
# Verify Admin
conn = environment.httplib.HTTPConnection('::1', CONF.admin_port)
conn.request('GET', '/')
resp = conn.getresponse()
self.assertEqual(resp.status, 300)
# Verify Public
conn = environment.httplib.HTTPConnection('::1', CONF.public_port)
conn.request('GET', '/')
resp = conn.getresponse()
self.assertEqual(resp.status, 300)
| {
"content_hash": "56c4fff37b81fce9a588780beae98924",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 74,
"avg_line_length": 33.06060606060606,
"alnum_prop": 0.5811182401466545,
"repo_name": "townbull/keystone-dtrust",
"id": "e55847da2de1c3daae0395278a94aa460aea7ffb",
"size": "1722",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev/domain-trusts",
"path": "keystone/tests/test_ipv6.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2371560"
},
{
"name": "Shell",
"bytes": "31430"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy
from python_speech_features import sigproc
from scipy.fftpack import dct
def mfcc(signal,samplerate=16000,winlen=0.025,winstep=0.01,numcep=13,
nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97,ceplifter=22,appendEnergy=True,
winfunc=lambda x:numpy.ones((x,))):
"""Compute MFCC features from an audio signal.
:param signal: the audio signal from which to compute features. Should be an N*1 array
:param samplerate: the samplerate of the signal we are working with.
:param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
:param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
:param numcep: the number of cepstrum to return, default 13
:param nfilt: the number of filters in the filterbank, default 26.
:param nfft: the FFT size. Default is 512.
:param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
:param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
:param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
:param ceplifter: apply a lifter to final cepstral coefficients. 0 is no lifter. Default is 22.
:param appendEnergy: if this is true, the zeroth cepstral coefficient is replaced with the log of the total frame energy.
:param winfunc: the analysis window to apply to each frame. By default no window is applied.
:returns: A numpy array of size (NUMFRAMES by numcep) containing features. Each row holds 1 feature vector.
"""
feat,energy = fbank(signal,samplerate,winlen,winstep,nfilt,nfft,lowfreq,highfreq,preemph,winfunc)
feat = numpy.log(feat)
feat = dct(feat, type=2, axis=1, norm='ortho')[:,:numcep]
feat = lifter(feat,ceplifter)
if appendEnergy: feat[:,0] = numpy.log(energy) # replace first cepstral coefficient with log of frame energy
return feat
def fbank(signal,samplerate=16000,winlen=0.025,winstep=0.01,
nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97,
winfunc=lambda x:numpy.ones((x,))):
"""Compute Mel-filterbank energy features from an audio signal.
:param signal: the audio signal from which to compute features. Should be an N*1 array
:param samplerate: the samplerate of the signal we are working with.
:param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
:param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
:param nfilt: the number of filters in the filterbank, default 26.
:param nfft: the FFT size. Default is 512.
:param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
:param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
:param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
:param winfunc: the analysis window to apply to each frame. By default no window is applied.
:returns: 2 values. The first is a numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector. The
second return value is the energy in each frame (total energy, unwindowed)
"""
highfreq= highfreq or samplerate/2
signal = sigproc.preemphasis(signal,preemph)
frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate, winfunc)
pspec = sigproc.powspec(frames,nfft)
energy = numpy.sum(pspec,1) # this stores the total energy in each frame
energy = numpy.where(energy == 0,numpy.finfo(float).eps,energy) # if energy is zero, we get problems with log
fb = get_filterbanks(nfilt,nfft,samplerate,lowfreq,highfreq)
feat = numpy.dot(pspec,fb.T) # compute the filterbank energies
feat = numpy.where(feat == 0,numpy.finfo(float).eps,feat) # if feat is zero, we get problems with log
return feat,energy
def logfbank(signal,samplerate=16000,winlen=0.025,winstep=0.01,
nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97):
"""Compute log Mel-filterbank energy features from an audio signal.
:param signal: the audio signal from which to compute features. Should be an N*1 array
:param samplerate: the samplerate of the signal we are working with.
:param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
:param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
:param nfilt: the number of filters in the filterbank, default 26.
:param nfft: the FFT size. Default is 512.
:param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
:param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
:param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
:returns: A numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector.
"""
feat,energy = fbank(signal,samplerate,winlen,winstep,nfilt,nfft,lowfreq,highfreq,preemph)
return numpy.log(feat)
def ssc(signal,samplerate=16000,winlen=0.025,winstep=0.01,
nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97,
winfunc=lambda x:numpy.ones((x,))):
"""Compute Spectral Subband Centroid features from an audio signal.
:param signal: the audio signal from which to compute features. Should be an N*1 array
:param samplerate: the samplerate of the signal we are working with.
:param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
:param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
:param nfilt: the number of filters in the filterbank, default 26.
:param nfft: the FFT size. Default is 512.
:param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
:param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
:param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
:param winfunc: the analysis window to apply to each frame. By default no window is applied.
:returns: A numpy array of size (NUMFRAMES by nfilt) containing features. Each row holds 1 feature vector.
"""
highfreq= highfreq or samplerate/2
signal = sigproc.preemphasis(signal,preemph)
frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate, winfunc)
pspec = sigproc.powspec(frames,nfft)
pspec = numpy.where(pspec == 0,numpy.finfo(float).eps,pspec) # if things are all zeros we get problems
fb = get_filterbanks(nfilt,nfft,samplerate,lowfreq,highfreq)
feat = numpy.dot(pspec,fb.T) # compute the filterbank energies
R = numpy.tile(numpy.linspace(1,samplerate/2,numpy.size(pspec,1)),(numpy.size(pspec,0),1))
return numpy.dot(pspec*R,fb.T) / feat
def hz2mel(hz):
"""Convert a value in Hertz to Mels
:param hz: a value in Hz. This can also be a numpy array, conversion proceeds element-wise.
:returns: a value in Mels. If an array was passed in, an identical sized array is returned.
"""
return 2595 * numpy.log10(1+hz/700.)
def mel2hz(mel):
"""Convert a value in Mels to Hertz
:param mel: a value in Mels. This can also be a numpy array, conversion proceeds element-wise.
:returns: a value in Hertz. If an array was passed in, an identical sized array is returned.
"""
return 700*(10**(mel/2595.0)-1)
def get_filterbanks(nfilt=20,nfft=512,samplerate=16000,lowfreq=0,highfreq=None):
"""Compute a Mel-filterbank. The filters are stored in the rows, the columns correspond
to fft bins. The filters are returned as an array of size nfilt * (nfft/2 + 1)
:param nfilt: the number of filters in the filterbank, default 20.
:param nfft: the FFT size. Default is 512.
:param samplerate: the samplerate of the signal we are working with. Affects mel spacing.
:param lowfreq: lowest band edge of mel filters, default 0 Hz
:param highfreq: highest band edge of mel filters, default samplerate/2
:returns: A numpy array of size nfilt * (nfft/2 + 1) containing filterbank. Each row holds 1 filter.
"""
highfreq= highfreq or samplerate/2
assert highfreq <= samplerate/2, "highfreq is greater than samplerate/2"
# compute points evenly spaced in mels
lowmel = hz2mel(lowfreq)
highmel = hz2mel(highfreq)
melpoints = numpy.linspace(lowmel,highmel,nfilt+2)
# our points are in Hz, but we use fft bins, so we have to convert
# from Hz to fft bin number
bin = numpy.floor((nfft+1)*mel2hz(melpoints)/samplerate)
fbank = numpy.zeros([nfilt,nfft//2+1])
for j in range(0,nfilt):
for i in range(int(bin[j]), int(bin[j+1])):
fbank[j,i] = (i - bin[j]) / (bin[j+1]-bin[j])
for i in range(int(bin[j+1]), int(bin[j+2])):
fbank[j,i] = (bin[j+2]-i) / (bin[j+2]-bin[j+1])
return fbank
def lifter(cepstra, L=22):
"""Apply a cepstral lifter the the matrix of cepstra. This has the effect of increasing the
magnitude of the high frequency DCT coeffs.
:param cepstra: the matrix of mel-cepstra, will be numframes * numcep in size.
:param L: the liftering coefficient to use. Default is 22. L <= 0 disables lifter.
"""
if L > 0:
nframes,ncoeff = numpy.shape(cepstra)
n = numpy.arange(ncoeff)
lift = 1 + (L/2.)*numpy.sin(numpy.pi*n/L)
return lift*cepstra
else:
# values of L <= 0, do nothing
return cepstra
def delta(feat, N):
"""Compute delta features from a feature vector sequence.
:param feat: A numpy array of size (NUMFRAMES by number of features) containing features. Each row holds 1 feature vector.
:param N: For each frame, calculate delta features based on preceding and following N frames
:returns: A numpy array of size (NUMFRAMES by number of features) containing delta features. Each row holds 1 delta feature vector.
"""
NUMFRAMES = len(feat)
feat = numpy.concatenate(([feat[0] for i in range(N)], feat, [feat[-1] for i in range(N)]))
denom = sum([2*i*i for i in range(1,N+1)])
dfeat = []
for j in range(NUMFRAMES):
dfeat.append(numpy.sum([n*feat[N+j+n] for n in range(-1*N,N+1)], axis=0)/denom)
return dfeat
| {
"content_hash": "7924b6ff9679396e58132a7b4b5715e8",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 137,
"avg_line_length": 56.74731182795699,
"alnum_prop": 0.7019422074846045,
"repo_name": "ybdarrenwang/python_speech_features",
"id": "fa902a351a1f979727d5b0414bc9198038f5ced0",
"size": "10681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_speech_features/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16674"
}
],
"symlink_target": ""
} |
"""
Visitor doing some postprocessing on the astroid tree.
Try to resolve definitions (namespace) dictionary, relationship...
"""
from __future__ import print_function
import collections
import os
import traceback
import astroid
from astroid import bases
from astroid import exceptions
from astroid import manager
from astroid import modutils
from astroid import node_classes
from pylint.pyreverse import utils
def _iface_hdlr(_):
"""Handler used by interfaces to handle suspicious interface nodes."""
return True
def _astroid_wrapper(func, modname):
print('parsing %s...' % modname)
try:
return func(modname)
except exceptions.AstroidBuildingException as exc:
print(exc)
except Exception as exc: # pylint: disable=broad-except
traceback.print_exc()
def interfaces(node, herited=True, handler_func=_iface_hdlr):
"""Return an iterator on interfaces implemented by the given class node."""
# FIXME: what if __implements__ = (MyIFace, MyParent.__implements__)...
try:
implements = bases.Instance(node).getattr('__implements__')[0]
except exceptions.NotFoundError:
return
if not herited and implements.frame() is not node:
return
found = set()
missing = False
for iface in node_classes.unpack_infer(implements):
if iface is astroid.YES:
missing = True
continue
if iface not in found and handler_func(iface):
found.add(iface)
yield iface
if missing:
raise exceptions.InferenceError()
class IdGeneratorMixIn(object):
"""Mixin adding the ability to generate integer uid."""
def __init__(self, start_value=0):
self.id_count = start_value
def init_counter(self, start_value=0):
"""init the id counter
"""
self.id_count = start_value
def generate_id(self):
"""generate a new identifier
"""
self.id_count += 1
return self.id_count
class Linker(IdGeneratorMixIn, utils.LocalsVisitor):
"""Walk on the project tree and resolve relationships.
According to options the following attributes may be
added to visited nodes:
* uid,
a unique identifier for the node (on astroid.Project, astroid.Module,
astroid.Class and astroid.locals_type). Only if the linker
has been instantiated with tag=True parameter (False by default).
* Function
a mapping from locals names to their bounded value, which may be a
constant like a string or an integer, or an astroid node
(on astroid.Module, astroid.Class and astroid.Function).
* instance_attrs_type
as locals_type but for klass member attributes (only on astroid.Class)
* implements,
list of implemented interface _objects_ (only on astroid.Class nodes)
"""
def __init__(self, project, inherited_interfaces=0, tag=False):
IdGeneratorMixIn.__init__(self)
utils.LocalsVisitor.__init__(self)
# take inherited interface in consideration or not
self.inherited_interfaces = inherited_interfaces
# tag nodes or not
self.tag = tag
# visited project
self.project = project
def visit_project(self, node):
"""visit an pyreverse.utils.Project node
* optionally tag the node with a unique id
"""
if self.tag:
node.uid = self.generate_id()
for module in node.modules:
self.visit(module)
def visit_package(self, node):
"""visit an astroid.Package node
* optionally tag the node with a unique id
"""
if self.tag:
node.uid = self.generate_id()
for subelmt in node.values():
self.visit(subelmt)
def visit_module(self, node):
"""visit an astroid.Module node
* set the locals_type mapping
* set the depends mapping
* optionally tag the node with a unique id
"""
if hasattr(node, 'locals_type'):
return
node.locals_type = collections.defaultdict(list)
node.depends = []
if self.tag:
node.uid = self.generate_id()
def visit_classdef(self, node):
"""visit an astroid.Class node
* set the locals_type and instance_attrs_type mappings
* set the implements list and build it
* optionally tag the node with a unique id
"""
if hasattr(node, 'locals_type'):
return
node.locals_type = collections.defaultdict(list)
if self.tag:
node.uid = self.generate_id()
# resolve ancestors
for baseobj in node.ancestors(recurs=False):
specializations = getattr(baseobj, 'specializations', [])
specializations.append(node)
baseobj.specializations = specializations
# resolve instance attributes
node.instance_attrs_type = collections.defaultdict(list)
for assattrs in node.instance_attrs.values():
for assattr in assattrs:
self.handle_assattr_type(assattr, node)
# resolve implemented interface
try:
node.implements = list(interfaces(node, self.inherited_interfaces))
except astroid.InferenceError:
node.implements = ()
def visit_functiondef(self, node):
"""visit an astroid.Function node
* set the locals_type mapping
* optionally tag the node with a unique id
"""
if hasattr(node, 'locals_type'):
return
node.locals_type = collections.defaultdict(list)
if self.tag:
node.uid = self.generate_id()
link_project = visit_project
link_module = visit_module
link_class = visit_classdef
link_function = visit_functiondef
def visit_assignname(self, node):
"""visit an astroid.AssName node
handle locals_type
"""
# avoid double parsing done by different Linkers.visit
# running over the same project:
if hasattr(node, '_handled'):
return
node._handled = True
if node.name in node.frame():
frame = node.frame()
else:
# the name has been defined as 'global' in the frame and belongs
# there.
frame = node.root()
try:
if not hasattr(frame, 'locals_type'):
# If the frame doesn't have a locals_type yet,
# it means it wasn't yet visited. Visit it now
# to add what's missing from it.
if isinstance(frame, astroid.ClassDef):
self.visit_classdef(frame)
elif isinstance(frame, astroid.FunctionDef):
self.visit_functiondef(frame)
else:
self.visit_module(frame)
current = frame.locals_type[node.name]
values = set(node.infer())
frame.locals_type[node.name] = list(set(current) | values)
except astroid.InferenceError:
pass
@staticmethod
def handle_assattr_type(node, parent):
"""handle an astroid.AssAttr node
handle instance_attrs_type
"""
try:
values = set(node.infer())
current = set(parent.instance_attrs_type[node.attrname])
parent.instance_attrs_type[node.attrname] = list(current | values)
except astroid.InferenceError:
pass
def visit_import(self, node):
"""visit an astroid.Import node
resolve module dependencies
"""
context_file = node.root().file
for name in node.names:
relative = modutils.is_relative(name[0], context_file)
self._imported_module(node, name[0], relative)
def visit_importfrom(self, node):
"""visit an astroid.From node
resolve module dependencies
"""
basename = node.modname
context_file = node.root().file
if context_file is not None:
relative = modutils.is_relative(basename, context_file)
else:
relative = False
for name in node.names:
if name[0] == '*':
continue
# analyze dependencies
fullname = '%s.%s' % (basename, name[0])
if fullname.find('.') > -1:
try:
# TODO: don't use get_module_part,
# missing package precedence
fullname = modutils.get_module_part(fullname,
context_file)
except ImportError:
continue
if fullname != basename:
self._imported_module(node, fullname, relative)
def compute_module(self, context_name, mod_path):
"""return true if the module should be added to dependencies"""
package_dir = os.path.dirname(self.project.path)
if context_name == mod_path:
return 0
elif modutils.is_standard_module(mod_path, (package_dir,)):
return 1
return 0
def _imported_module(self, node, mod_path, relative):
"""Notify an imported module, used to analyze dependencies"""
module = node.root()
context_name = module.name
if relative:
mod_path = '%s.%s' % ('.'.join(context_name.split('.')[:-1]),
mod_path)
if self.compute_module(context_name, mod_path):
# handle dependencies
if not hasattr(module, 'depends'):
module.depends = []
mod_paths = module.depends
if mod_path not in mod_paths:
mod_paths.append(mod_path)
class Project(object):
"""a project handle a set of modules / packages"""
def __init__(self, name=''):
self.name = name
self.path = None
self.modules = []
self.locals = {}
self.__getitem__ = self.locals.__getitem__
self.__iter__ = self.locals.__iter__
self.values = self.locals.values
self.keys = self.locals.keys
self.items = self.locals.items
def add_module(self, node):
self.locals[node.name] = node
self.modules.append(node)
def get_module(self, name):
return self.locals[name]
def get_children(self):
return self.modules
def __repr__(self):
return '<Project %r at %s (%s modules)>' % (self.name, id(self),
len(self.modules))
def project_from_files(files, func_wrapper=_astroid_wrapper,
project_name="no name",
black_list=('CVS',)):
"""return a Project from a list of files or modules"""
# build the project representation
astroid_manager = manager.AstroidManager()
project = Project(project_name)
for something in files:
if not os.path.exists(something):
fpath = modutils.file_from_modpath(something.split('.'))
elif os.path.isdir(something):
fpath = os.path.join(something, '__init__.py')
else:
fpath = something
ast = func_wrapper(astroid_manager.ast_from_file, fpath)
if ast is None:
continue
# XXX why is first file defining the project.path ?
project.path = project.path or ast.file
project.add_module(ast)
base_name = ast.name
# recurse in package except if __init__ was explicitly given
if ast.package and something.find('__init__') == -1:
# recurse on others packages / modules if this is a package
for fpath in modutils.get_module_files(os.path.dirname(ast.file),
black_list):
ast = func_wrapper(astroid_manager.ast_from_file, fpath)
if ast is None or ast.name == base_name:
continue
project.add_module(ast)
return project
| {
"content_hash": "2cbf493620296e03aa8d62bae486c7f4",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 79,
"avg_line_length": 33.896067415730336,
"alnum_prop": 0.5849838402254082,
"repo_name": "mith1979/ansible_automation",
"id": "4fd86b8a1e5eae9d6c6e14f8aae69dbb3a2130ff",
"size": "12929",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "applied_python/applied_python/lib/python2.7/site-packages/pylint/pyreverse/inspector.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "C",
"bytes": "84868"
},
{
"name": "CSS",
"bytes": "50289"
},
{
"name": "HTML",
"bytes": "70428"
},
{
"name": "JavaScript",
"bytes": "105262"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "19073705"
},
{
"name": "Shell",
"bytes": "3747"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
} |
"""Support to help onboard new users."""
from typing import TYPE_CHECKING
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import ConfigType
from homeassistant.loader import bind_hass
from . import views
from .const import (
DOMAIN,
STEP_ANALYTICS,
STEP_CORE_CONFIG,
STEP_INTEGRATION,
STEP_USER,
STEPS,
)
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 4
class OnboadingStorage(Store):
"""Store onboarding data."""
async def _async_migrate_func(self, old_major_version, old_minor_version, old_data):
"""Migrate to the new version."""
# From version 1 -> 2, we automatically mark the integration step done
if old_major_version < 2:
old_data["done"].append(STEP_INTEGRATION)
if old_major_version < 3:
old_data["done"].append(STEP_CORE_CONFIG)
if old_major_version < 4:
old_data["done"].append(STEP_ANALYTICS)
return old_data
@bind_hass
@callback
def async_is_onboarded(hass: HomeAssistant) -> bool:
"""Return if Home Assistant has been onboarded."""
data = hass.data.get(DOMAIN)
return data is None or data is True
@bind_hass
@callback
def async_is_user_onboarded(hass: HomeAssistant) -> bool:
"""Return if a user has been created as part of onboarding."""
return async_is_onboarded(hass) or STEP_USER in hass.data[DOMAIN]["done"]
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the onboarding component."""
store = OnboadingStorage(hass, STORAGE_VERSION, STORAGE_KEY, private=True)
if (data := await store.async_load()) is None:
data = {"done": []}
if TYPE_CHECKING:
assert isinstance(data, dict)
if STEP_USER not in data["done"]:
# Users can already have created an owner account via the command line
# If so, mark the user step as done.
has_owner = False
for user in await hass.auth.async_get_users():
if user.is_owner:
has_owner = True
break
if has_owner:
data["done"].append(STEP_USER)
await store.async_save(data)
if set(data["done"]) == set(STEPS):
return True
hass.data[DOMAIN] = data
await views.async_setup(hass, data, store)
return True
| {
"content_hash": "11d754b235f1f2bc6bd71c084bc88906",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 88,
"avg_line_length": 28.710843373493976,
"alnum_prop": 0.6500209819555183,
"repo_name": "w1ll1am23/home-assistant",
"id": "c36f19fd28d9219941b6607f910ff5a356958d55",
"size": "2383",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/onboarding/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from vt_manager.communication.geni.v3.tests.mockers.resource import ResourceMocker
class VTAMDriverMocker():
def __init__(self):
pass
def get_specific_server_and_vms(self, urn):
resources = list()
for i in range(0,10):
r = ResourceMocker()
r.set_component_id("this_is_a_urn_WITH_SLIVER_%d" %i)
if i > 5:
r.set_operational_state("geni_ready")
else:
r.set_operational_state("geni_failed")
resources.append(r)
return resources
def get_all_servers(self):
resources = list()
for i in range(0,5):
r = ResourceMocker()
r.set_component_id("this_is_a_urn_%d" %i)
resources.append(r)
return resources
def create_vms(self, urn):
resources = list()
for i in range(0,2):
r = ResourceMocker()
r.set_component_id("this_is_a_urn_%d" %i)
r.set_operational_state = "geni_notready"
resources.append(r)
return resources
def reserve_vms(self, slice_urn, reservation, expiration=None):
resources = list()
for i in range(0,2):
r = ResourceMocker()
r.set_component_id("this_is_a_urn_%d" %i)
resources.append(r)
return resources
def start_vm(self, urn):
r = ResourceMocker()
r.set_operational_state("geni_ready")
return r
def stop_vm(self, urn):
r = ResourceMocker()
r.set_operational_state("geni_notready")
return r
def reboot_vm(self, urn):
r = ResourceMocker()
r.set_operational_state("geni_ready")
return r
def delete_vm(self, urn):
r = ResourceMocker()
r.set_operational_state("geni_notready")
return r
def renew_vms(self, expiration, urn):
r = ResourceMocker()
r.set_operational_state("geni_ready")
return r
def get_geni_best_effort_mode(self):
return True
def set_geni_best_effort_mode(self, value):
return True
| {
"content_hash": "dcb16c98f77b7e35b9b1965a4d049759",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 82,
"avg_line_length": 27.558441558441558,
"alnum_prop": 0.553722902921772,
"repo_name": "dana-i2cat/felix",
"id": "9ad59378d39b403a3c9f9eec29db1f12cc4849bd",
"size": "2122",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vt_manager/src/python/vt_manager/communication/geni/v3/tests/mockers/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "337811"
},
{
"name": "DTrace",
"bytes": "370"
},
{
"name": "Elixir",
"bytes": "17243"
},
{
"name": "Emacs Lisp",
"bytes": "1098"
},
{
"name": "Groff",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "660363"
},
{
"name": "Java",
"bytes": "18362"
},
{
"name": "JavaScript",
"bytes": "838960"
},
{
"name": "Makefile",
"bytes": "11211"
},
{
"name": "Perl",
"bytes": "5416"
},
{
"name": "Python",
"bytes": "7875883"
},
{
"name": "Shell",
"bytes": "258079"
}
],
"symlink_target": ""
} |
import signal
import socket
from time import monotonic
import stomp
from stomp.listener import TestListener
from .testutils import *
@pytest.fixture()
def testlistener():
yield TestListener("123", print_to_log=True)
@pytest.fixture()
def conn(testlistener):
conn = stomp.Connection11(get_default_host())
conn.set_listener("testlistener", testlistener)
conn.connect(get_default_user(), get_default_password(), wait=True)
yield conn
conn.disconnect(receipt=None)
@pytest.fixture()
def invalidconn(testlistener):
conn = stomp.Connection([("192.0.2.0", 60000)], timeout=5, reconnect_attempts_max=1)
conn.set_listener("testlistener", testlistener)
yield conn
@pytest.fixture()
def ipv4only(monkeypatch):
"""
Filter DNS requests to return only IPv4 results. This is useful to avoid long timeouts
when tests attempt to connect to the IPv6 address, but the service is only listening
on the IPv4 address.
"""
real_getaddrinfo = socket.getaddrinfo
def getaddrinfo_ipv4(*args, **kw):
return [a for a in real_getaddrinfo(*args, **kw) if a[0] == socket.AF_INET]
monkeypatch.setattr(socket, "getaddrinfo", getaddrinfo_ipv4)
class TestBasic(object):
def test_subscribe_and_send(self, conn, testlistener):
queuename = "/queue/test1-%s" % testlistener.timestamp
conn.subscribe(destination=queuename, id=1, ack="auto")
conn.send(body='{"val": "this is a test"}', destination=queuename,
content_type="application/json", receipt="123")
validate_send(conn)
(headers, body) = testlistener.get_latest_message()
assert "content-type" in headers
assert headers["content-type"] == "application/json"
def test_default_to_localhost(self, ipv4only):
conn = stomp.Connection()
listener = TestListener("123", print_to_log=True)
queuename = "/queue/test1-%s" % listener.timestamp
conn.set_listener("testlistener", listener)
conn.connect(get_rabbitmq_user(), get_rabbitmq_password(), wait=True)
conn.send(body="this is a test", destination=queuename, receipt="123")
conn.disconnect(receipt=None)
def test_commit(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/test2-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="auto")
trans_id = conn.begin()
conn.send(body="this is a test1", destination=queuename, transaction=trans_id)
conn.send(body="this is a test2", destination=queuename, transaction=trans_id)
conn.send(body="this is a test3", destination=queuename, transaction=trans_id, receipt="123")
time.sleep(3)
listener = conn.get_listener("testlistener")
assert listener.connections == 1, "should have received 1 connection acknowledgement"
assert listener.messages == 0, "should not have received any messages"
conn.commit(transaction=trans_id)
listener.wait_for_message()
time.sleep(3)
assert listener.messages == 3, "should have received 3 messages"
assert listener.errors == 0, "should not have received any errors"
def test_abort(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/test3-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="auto")
trans_id = conn.begin()
conn.send(body="this is a test1", destination=queuename, transaction=trans_id)
conn.send(body="this is a test2", destination=queuename, transaction=trans_id)
conn.send(body="this is a test3", destination=queuename, transaction=trans_id)
time.sleep(3)
listener = conn.get_listener("testlistener")
assert listener.connections == 1, "should have received 1 connection acknowledgement"
assert listener.messages == 0, "should not have received any messages"
conn.abort(transaction=trans_id)
time.sleep(3)
assert listener.messages == 0, "should not have received any messages"
assert listener.errors == 0, "should not have received any errors"
def test_timeout(self, invalidconn):
ms = monotonic()
try:
invalidconn.connect("test", "test")
pytest.fail("shouldn't happen")
except stomp.exception.ConnectFailedException:
pass # success!
ms = monotonic() - ms
assert ms > 5.0, "connection timeout should have been at least 5 seconds"
def test_childinterrupt(self, conn):
def childhandler(signum, frame):
print("received child signal")
oldhandler = signal.signal(signal.SIGCHLD, childhandler)
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/test5-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="auto", receipt="123")
listener = conn.get_listener("testlistener")
listener.wait_on_receipt()
conn.send(body="this is an interrupt test 1", destination=queuename)
print("causing signal by starting child process")
os.system("sleep 1")
time.sleep(1)
signal.signal(signal.SIGCHLD, oldhandler)
print("completed signal section")
conn.send(body="this is an interrupt test 2", destination=queuename, receipt="123")
listener.wait_for_message()
assert listener.connections == 1, "should have received 1 connection acknowledgment"
assert listener.errors == 0, "should not have received any errors"
assert conn.is_connected(), "should still be connected to STOMP provider"
def test_clientack(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/testclientack-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="client")
conn.send(body="this is a test", destination=queuename, receipt="123")
listener = conn.get_listener("testlistener")
listener.wait_for_message()
(headers, _) = listener.get_latest_message()
message_id = headers["message-id"]
subscription = headers["subscription"]
conn.ack(message_id, subscription)
def test_clientnack(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/testclientnack-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="client")
conn.send(body="this is a test", destination=queuename, receipt="123")
listener = conn.get_listener("testlistener")
listener.wait_for_message()
(headers, _) = listener.get_latest_message()
message_id = headers["message-id"]
subscription = headers["subscription"]
conn.nack(message_id, subscription)
def test_specialchars(self, conn):
timestamp = time.strftime("%Y%m%d%H%M%S")
queuename = "/queue/testspecialchars-%s" % timestamp
conn.subscribe(destination=queuename, id=1, ack="client")
hdrs = {
"special-1": "test with colon : test",
"special-2": "test with backslash \\ test",
"special-3": "test with newline \n"
}
conn.send(body="this is a test", headers=hdrs, destination=queuename, receipt="123")
listener = conn.get_listener("testlistener")
listener.wait_for_message()
(headers, _) = listener.get_latest_message()
_ = headers["message-id"]
_ = headers["subscription"]
assert "special-1" in headers
assert "test with colon : test" == headers["special-1"]
assert "special-2" in headers
assert "test with backslash \\ test" == headers["special-2"]
assert "special-3" in headers
assert "test with newline \n" == headers["special-3"]
def test_host_bind_port(self, ipv4only):
conn = stomp.Connection(bind_host_port=("localhost", next_free_port()))
listener = TestListener("981", print_to_log=True)
queuename = "/queue/testbind-%s" % listener.timestamp
conn.set_listener("testlistener", listener)
conn.connect(get_rabbitmq_user(), get_rabbitmq_password(), wait=True)
conn.send(body="this is a test using local bind port", destination=queuename, receipt="981")
conn.disconnect(receipt=None)
class TestConnectionErrors(object):
def test_connect_wait_error(self):
conn = stomp.Connection(get_default_host())
try:
conn.connect("invalid", "user", True)
pytest.fail("Shouldn't happen")
except:
pass
def test_connect_nowait_error(self):
conn = stomp.Connection(get_default_host())
try:
conn.connect("invalid", "user", False)
assert not conn.is_connected(), "Should not be connected"
except:
pytest.fail("Shouldn't happen")
| {
"content_hash": "ecd9ea65ce18d8582d0506fb4a61cd03",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 101,
"avg_line_length": 37.05416666666667,
"alnum_prop": 0.6439896547846621,
"repo_name": "jasonrbriggs/stomp.py",
"id": "75147ca7c1918f6022ca1dc233d8dfc32d7f5790",
"size": "8893",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/test_basic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2450"
},
{
"name": "Makefile",
"bytes": "5120"
},
{
"name": "Python",
"bytes": "229717"
},
{
"name": "Shell",
"bytes": "76"
}
],
"symlink_target": ""
} |
from os import path
from json import dumps
from flask import Flask, redirect, request
from flask.ext.autodoc import Autodoc
app = Flask(__name__)
app.debug = True
auto = Autodoc(app)
users = []
posts = []
class User(object):
def __init__(self, username):
self.username = username
users.append(self)
self.id = users.index(self)
def __repr__(self):
return dumps(self.__dict__)
class Post(object):
def __init__(self, title, content, author):
self.title = title
self.content = content
posts.append(self)
self.id = posts.index(self)
def __repr__(self):
return dumps(self.__dict__)
u = User('acoomans')
Post('First post', 'This is the first awesome post', u)
Post('Second post', 'This is another even more awesome post', u)
@app.route('/')
@app.route('/posts')
@auto.doc(groups=['posts', 'public', 'private'])
def get_posts():
"""Return all posts."""
return '%s' % posts
@app.route('/post/<int:id>')
@auto.doc(groups=['posts', 'public', 'private'])
def get_post(id):
"""Return the post for the given id."""
return '%s' % posts[id]
@app.route('/post', methods=["POST"])
@auto.doc(groups=['posts', 'private'],
form_data=['title', 'content', 'authorid'])
def post_post():
"""Create a new post."""
authorid = request.form.get('authorid', None)
Post(request.form['title'],
request.form['content'],
users[authorid])
return redirect("/posts")
@app.route('/users')
@auto.doc(groups=['users', 'public', 'private'])
def get_users():
"""Return all users."""
return '%s' % users
@app.route('/user/<int:id>')
@auto.doc(groups=['users', 'public', 'private'])
def get_user(id):
"""Return the user for the given id."""
return '%s' % users[id]
@app.route('/users', methods=['POST'])
@auto.doc(groups=['users', 'private'],
form_data=['username'])
def post_user(id):
"""Creates a new user."""
User(request.form['username'])
redirect('/users')
@app.route('/admin', methods=['GET'])
@auto.doc(groups=['private'])
def admin():
"""Admin interface."""
return 'Admin interface'
@app.route('/doc/')
@app.route('/doc/public')
def public_doc():
return auto.html(groups=['public'], title='Blog Documentation with Custom template', template="autodoc_custom.html")
@app.route('/doc/private')
def private_doc():
return auto.html(groups=['private'], title='Private Documentation with Custom template', template="autodoc_custom.html")
if __name__ == '__main__':
app.run()
| {
"content_hash": "a273854249d3087dc08b90863fc44033",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 124,
"avg_line_length": 22.654867256637168,
"alnum_prop": 0.613671875,
"repo_name": "acoomans/flask-autodoc",
"id": "f32436c897d00d4004ea25ee06a1db13a37ede43",
"size": "2560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/custom/blog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2619"
},
{
"name": "Python",
"bytes": "18869"
}
],
"symlink_target": ""
} |
"""
Circular Date & Time Picker for Kivy
====================================
(currently only time, date coming soon)
Based on [CircularLayout](https://github.com/kivy-garden/garden.circularlayout).
The main aim is to provide a date and time selector similar to the
one found in Android KitKat+.
Simple usage
------------
Import the widget with
```python
from kivy.garden.circulardatetimepicker import CircularTimePicker
```
then use it! That's it!
```python
c = CircularTimePicker()
c.bind(time=self.set_time)
root.add_widget(c)
```
in Kv language:
```
<TimeChooserPopup@Popup>:
BoxLayout:
orientation: "vertical"
CircularTimePicker
Button:
text: "Dismiss"
size_hint_y: None
height: "40dp"
on_release: root.dismiss()
```
"""
from kivy.animation import Animation
from kivy.clock import Clock
from kivymd.vendor.circleLayout import CircularLayout
from kivy.graphics import Line, Color, Ellipse
from kivy.lang import Builder
from kivy.properties import NumericProperty, BoundedNumericProperty, \
ObjectProperty, StringProperty, DictProperty, \
ListProperty, OptionProperty, BooleanProperty, \
ReferenceListProperty, AliasProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.metrics import dp
from kivymd.theming import ThemableBehavior
from math import atan, pi, radians, sin, cos
import sys
import datetime
if sys.version_info[0] > 2:
def xrange(first=None, second=None, third=None):
if third:
return range(first, second, third)
else:
return range(first, second)
def map_number(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
def rgb_to_hex(*color):
tor = "#"
for col in color:
tor += "{:>02}".format(hex(int(col * 255))[2:])
return tor
Builder.load_string("""
<Number>:
text_size: self.size
valign: "middle"
halign: "center"
font_size: self.height * self.size_factor
<CircularNumberPicker>:
canvas.before:
PushMatrix
Scale:
origin: self.center_x + self.padding[0] - self.padding[2], self.center_y + self.padding[3] - self.padding[1]
x: self.scale
y: self.scale
canvas.after:
PopMatrix
<CircularTimePicker>:
orientation: "vertical"
spacing: "20dp"
FloatLayout:
anchor_x: "center"
anchor_y: "center"
size_hint_y: 1./3
size_hint_x: 1
size: root.size
pos: root.pos
GridLayout:
cols: 2
spacing: "10dp"
size_hint_x: None
width: self.minimum_width
pos_hint: {'center_x': .5, 'center_y': .5}
Label:
id: timelabel
text: root.time_text
markup: True
halign: "right"
valign: "middle"
# text_size: self.size
size_hint_x: None #.6
width: self.texture_size[0]
font_size: self.height * .75
Label:
id: ampmlabel
text: root.ampm_text
markup: True
halign: "left"
valign: "middle"
# text_size: self.size
size_hint_x: None #.4
width: self.texture_size[0]
font_size: self.height * .3
FloatLayout:
id: picker_container
#size_hint_y: 2./3
_bound: {}
""")
class Number(Label):
"""The class used to show the numbers in the selector.
"""
size_factor = NumericProperty(.5)
"""Font size scale.
:attr:`size_factor` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.5.
"""
class CircularNumberPicker(CircularLayout):
"""A circular number picker based on CircularLayout. A selector will
help you pick a number. You can also set :attr:`multiples_of` to make
it show only some numbers and use the space in between for the other
numbers.
"""
min = NumericProperty(0)
"""The first value of the range.
:attr:`min` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
"""
max = NumericProperty(0)
"""The last value of the range. Note that it behaves like xrange, so
the actual last displayed value will be :attr:`max` - 1.
:attr:`max` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
"""
range = ReferenceListProperty(min, max)
"""Packs :attr:`min` and :attr:`max` into a list for convenience. See
their documentation for further information.
:attr:`range` is a :class:`~kivy.properties.ReferenceListProperty`.
"""
multiples_of = NumericProperty(1)
"""Only show numbers that are multiples of this number. The other numbers
will be selectable, but won't have their own label.
:attr:`multiples_of` is a :class:`~kivy.properties.NumericProperty` and
defaults to 1.
"""
# selector_color = ListProperty([.337, .439, .490])
selector_color = ListProperty([1, 1, 1])
"""Color of the number selector. RGB.
:attr:`selector_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [.337, .439, .490] (material green).
"""
color = ListProperty([0, 0, 0])
"""Color of the number labels and of the center dot. RGB.
:attr:`color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1] (white).
"""
selector_alpha = BoundedNumericProperty(.3, min=0, max=1)
"""Alpha value for the transparent parts of the selector.
:attr:`selector_alpha` is a :class:`~kivy.properties.BoundedNumericProperty` and
defaults to 0.3 (min=0, max=1).
"""
selected = NumericProperty(None)
"""Currently selected number.
:attr:`selected` is a :class:`~kivy.properties.NumericProperty` and
defaults to :attr:`min`.
"""
number_size_factor = NumericProperty(.5)
"""Font size scale factor fot the :class:`Number`s.
:attr:`number_size_factor` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.5.
"""
number_format_string = StringProperty("{}")
"""String that will be formatted with the selected number as the first argument.
Can be anything supported by :meth:`str.format` (es. "{:02d}").
:attr:`number_format_string` is a :class:`~kivy.properties.StringProperty` and
defaults to "{}".
"""
scale = NumericProperty(1)
"""Canvas scale factor. Used in :class:`CircularTimePicker` transitions.
:attr:`scale` is a :class:`~kivy.properties.NumericProperty` and
defaults to 1.
"""
_selection_circle = ObjectProperty(None)
_selection_line = ObjectProperty(None)
_selection_dot = ObjectProperty(None)
_selection_dot_color = ObjectProperty(None)
_selection_color = ObjectProperty(None)
_center_dot = ObjectProperty(None)
_center_color = ObjectProperty(None)
def _get_items(self):
return self.max - self.min
items = AliasProperty(_get_items, None)
def _get_shown_items(self):
sh = 0
for i in xrange(*self.range):
if i % self.multiples_of == 0:
sh += 1
return sh
shown_items = AliasProperty(_get_shown_items, None)
def __init__(self, **kw):
self._trigger_genitems = Clock.create_trigger(self._genitems, -1)
self.bind(min=self._trigger_genitems,
max=self._trigger_genitems,
multiples_of=self._trigger_genitems)
super(CircularNumberPicker, self).__init__(**kw)
self.selected = self.min
self.bind(selected=self.on_selected,
pos=self.on_selected,
size=self.on_selected)
cx = self.center_x + self.padding[0] - self.padding[2]
cy = self.center_y + self.padding[3] - self.padding[1]
sx, sy = self.pos_for_number(self.selected)
epos = [i - (self.delta_radii * self.number_size_factor) for i in (sx, sy)]
esize = [self.delta_radii * self.number_size_factor * 2] * 2
dsize = [i * .3 for i in esize]
dpos = [i + esize[0] / 2. - dsize[0] / 2. for i in epos]
csize = [i * .05 for i in esize]
cpos = [i - csize[0] / 2. for i in (cx, cy)]
dot_alpha = 0 if self.selected % self.multiples_of == 0 else 1
color = list(self.selector_color)
with self.canvas:
self._selection_color = Color(*(color + [self.selector_alpha]))
self._selection_circle = Ellipse(pos=epos, size=esize)
self._selection_line = Line(points=[cx, cy, sx, sy], width=dp(1.25))
self._selection_dot_color = Color(*(color + [dot_alpha]))
self._selection_dot = Ellipse(pos=dpos, size=dsize)
self._center_color = Color(*self.color)
self._center_dot = Ellipse(pos=cpos, size=csize)
self.bind(selector_color=lambda ign, u: setattr(self._selection_color, "rgba", u + [self.selector_alpha]))
self.bind(selector_color=lambda ign, u: setattr(self._selection_dot_color, "rgb", u))
self.bind(selector_color=lambda ign, u: self.dot_is_none())
self.bind(color=lambda ign, u: setattr(self._center_color, "rgb", u))
Clock.schedule_once(self._genitems)
Clock.schedule_once(self.on_selected) # Just to make sure pos/size are set
def dot_is_none(self, *args):
dot_alpha = 0 if self.selected % self.multiples_of == 0 else 1
if self._selection_dot_color:
self._selection_dot_color.a = dot_alpha
def _genitems(self, *a):
self.clear_widgets()
for i in xrange(*self.range):
if i % self.multiples_of != 0:
continue
n = Number(text=self.number_format_string.format(i), size_factor=self.number_size_factor, color=self.color)
self.bind(color=n.setter("color"))
self.add_widget(n)
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
touch.grab(self)
self.selected = self.number_at_pos(*touch.pos)
if self.selected == 60:
self.selected = 0
def on_touch_move(self, touch):
if touch.grab_current is not self:
return super(CircularNumberPicker, self).on_touch_move(touch)
self.selected = self.number_at_pos(*touch.pos)
if self.selected == 60:
self.selected = 0
def on_touch_up(self, touch):
if touch.grab_current is not self:
return super(CircularNumberPicker, self).on_touch_up(touch)
touch.ungrab(self)
def on_selected(self, *a):
cx = self.center_x + self.padding[0] - self.padding[2]
cy = self.center_y + self.padding[3] - self.padding[1]
sx, sy = self.pos_for_number(self.selected)
epos = [i - (self.delta_radii * self.number_size_factor) for i in (sx, sy)]
esize = [self.delta_radii * self.number_size_factor * 2] * 2
dsize = [i * .3 for i in esize]
dpos = [i + esize[0] / 2. - dsize[0] / 2. for i in epos]
csize = [i * .05 for i in esize]
cpos = [i - csize[0] / 2. for i in (cx, cy)]
dot_alpha = 0 if self.selected % self.multiples_of == 0 else 1
if self._selection_circle:
self._selection_circle.pos = epos
self._selection_circle.size = esize
if self._selection_line:
self._selection_line.points = [cx, cy, sx, sy]
if self._selection_dot:
self._selection_dot.pos = dpos
self._selection_dot.size = dsize
if self._selection_dot_color:
self._selection_dot_color.a = dot_alpha
if self._center_dot:
self._center_dot.pos = cpos
self._center_dot.size = csize
def pos_for_number(self, n):
"""Returns the center x, y coordinates for a given number.
"""
if self.items == 0:
return 0, 0
radius = min(self.width - self.padding[0] - self.padding[2],
self.height - self.padding[1] - self.padding[3]) / 2.
middle_r = radius * sum(self.radius_hint) / 2.
cx = self.center_x + self.padding[0] - self.padding[2]
cy = self.center_y + self.padding[3] - self.padding[1]
sign = +1.
angle_offset = radians(self.start_angle)
if self.direction == 'cw':
angle_offset = 2 * pi - angle_offset
sign = -1.
quota = 2 * pi / self.items
mult_quota = 2 * pi / self.shown_items
angle = angle_offset + n * sign * quota
if self.items == self.shown_items:
angle += quota / 2
else:
angle -= mult_quota / 2
# kived: looking it up, yes. x = cos(angle) * radius + centerx; y = sin(angle) * radius + centery
x = cos(angle) * middle_r + cx
y = sin(angle) * middle_r + cy
return x, y
def number_at_pos(self, x, y):
"""Returns the number at a given x, y position. The number is found
using the widget's center as a starting point for angle calculations.
Not thoroughly tested, may yield wrong results.
"""
if self.items == 0:
return self.min
cx = self.center_x + self.padding[0] - self.padding[2]
cy = self.center_y + self.padding[3] - self.padding[1]
lx = x - cx
ly = y - cy
quota = 2 * pi / self.items
mult_quota = 2 * pi / self.shown_items
if lx == 0 and ly > 0:
angle = pi / 2
elif lx == 0 and ly < 0:
angle = 3 * pi / 2
else:
angle = atan(ly / lx)
if lx < 0 < ly:
angle += pi
if lx > 0 > ly:
angle += 2 * pi
if lx < 0 and ly < 0:
angle += pi
angle += radians(self.start_angle)
if self.direction == "cw":
angle = 2 * pi - angle
if mult_quota != quota:
angle -= mult_quota / 2
if angle < 0:
angle += 2 * pi
elif angle > 2 * pi:
angle -= 2 * pi
return int(angle / quota) + self.min
class CircularMinutePicker(CircularNumberPicker):
""":class:`CircularNumberPicker` implementation for minutes.
"""
def __init__(self, **kw):
super(CircularMinutePicker, self).__init__(**kw)
self.min = 0
self.max = 60
self.multiples_of = 5
self.number_format_string = "{:02d}"
self.direction = "cw"
self.bind(shown_items=self._update_start_angle)
Clock.schedule_once(self._update_start_angle)
Clock.schedule_once(self.on_selected)
def _update_start_angle(self, *a):
self.start_angle = -(360. / self.shown_items / 2) - 90
class CircularHourPicker(CircularNumberPicker):
""":class:`CircularNumberPicker` implementation for hours.
"""
# military = BooleanProperty(False)
def __init__(self, **kw):
super(CircularHourPicker, self).__init__(**kw)
self.min = 1
self.max = 13
# 25 if self.military else 13
# self.inner_radius_hint = .8 if self.military else .6
self.multiples_of = 1
self.number_format_string = "{}"
self.direction = "cw"
self.bind(shown_items=self._update_start_angle)
# self.bind(military=lambda v: setattr(self, "max", 25 if v else 13))
# self.bind(military=lambda v: setattr(self, "inner_radius_hint", .8 if self.military else .6))
# Clock.schedule_once(self._genitems)
Clock.schedule_once(self._update_start_angle)
Clock.schedule_once(self.on_selected)
def _update_start_angle(self, *a):
self.start_angle = (360. / self.shown_items / 2) - 90
class CircularTimePicker(BoxLayout, ThemableBehavior):
"""Widget that makes use of :class:`CircularHourPicker` and
:class:`CircularMinutePicker` to create a user-friendly, animated
time picker like the one seen on Android.
See module documentation for more details.
"""
primary_dark = ListProperty([1, 1, 1])
hours = NumericProperty(0)
"""The hours, in military format (0-23).
:attr:`hours` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0 (12am).
"""
minutes = NumericProperty(0)
"""The minutes.
:attr:`minutes` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
"""
time_list = ReferenceListProperty(hours, minutes)
"""Packs :attr:`hours` and :attr:`minutes` in a list for convenience.
:attr:`time_list` is a :class:`~kivy.properties.ReferenceListProperty`.
"""
# military = BooleanProperty(False)
time_format = StringProperty(
"[color={hours_color}][ref=hours]{hours}[/ref][/color][color={primary_dark}][ref=colon]:[/ref][/color]\
[color={minutes_color}][ref=minutes]{minutes:02d}[/ref][/color]")
"""String that will be formatted with the time and shown in the time label.
Can be anything supported by :meth:`str.format`. Make sure you don't
remove the refs. See the default for the arguments passed to format.
:attr:`time_format` is a :class:`~kivy.properties.StringProperty` and
defaults to "[color={hours_color}][ref=hours]{hours}[/ref][/color]:[color={minutes_color}][ref=minutes]\
{minutes:02d}[/ref][/color]".
"""
ampm_format = StringProperty(
"[color={am_color}][ref=am]AM[/ref][/color]\n[color={pm_color}][ref=pm]PM[/ref][/color]")
"""String that will be formatted and shown in the AM/PM label.
Can be anything supported by :meth:`str.format`. Make sure you don't
remove the refs. See the default for the arguments passed to format.
:attr:`ampm_format` is a :class:`~kivy.properties.StringProperty` and
defaults to "[color={am_color}][ref=am]AM[/ref][/color]\n[color={pm_color}][ref=pm]PM[/ref][/color]".
"""
picker = OptionProperty("hours", options=("minutes", "hours"))
"""Currently shown time picker. Can be one of "minutes", "hours".
:attr:`picker` is a :class:`~kivy.properties.OptionProperty` and
defaults to "hours".
"""
# selector_color = ListProperty([.337, .439, .490])
selector_color = ListProperty([0, 0, 0])
"""Color of the number selector and of the highlighted text. RGB.
:attr:`selector_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [.337, .439, .490] (material green).
"""
color = ListProperty([1, 1, 1])
"""Color of the number labels and of the center dot. RGB.
:attr:`color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1] (white).
"""
selector_alpha = BoundedNumericProperty(.3, min=0, max=1)
"""Alpha value for the transparent parts of the selector.
:attr:`selector_alpha` is a :class:`~kivy.properties.BoundedNumericProperty` and
defaults to 0.3 (min=0, max=1).
"""
_am = BooleanProperty(True)
_h_picker = ObjectProperty(None)
_m_picker = ObjectProperty(None)
_bound = DictProperty({})
def _get_time(self):
try:
return datetime.time(*self.time_list)
except ValueError:
self.time_list = [self.hours, 0]
return datetime.time(*self.time_list)
def set_time(self, dt):
if dt.hour >= 12:
dt.strftime("%I:%M")
self._am = False
self.time_list = [dt.hour, dt.minute]
time = AliasProperty(_get_time, set_time, bind=("time_list",))
"""Selected time as a datetime.time object.
:attr:`time` is an :class:`~kivy.properties.AliasProperty`.
"""
def _get_picker(self):
if self.picker == "hours":
return self._h_picker
return self._m_picker
_picker = AliasProperty(_get_picker, None)
def _get_time_text(self):
hc = rgb_to_hex(0, 0, 0) if self.picker == "hours" else rgb_to_hex(*self.primary_dark)
mc = rgb_to_hex(0, 0, 0) if self.picker == "minutes" else rgb_to_hex(*self.primary_dark)
h = self.hours == 0 and 12 or self.hours <= 12 and self.hours or self.hours - 12
m = self.minutes
primary_dark = rgb_to_hex(*self.primary_dark)
return self.time_format.format(hours_color=hc,
minutes_color=mc,
hours=h,
minutes=m,
primary_dark=primary_dark)
time_text = AliasProperty(_get_time_text, None, bind=("hours", "minutes", "time_format", "picker"))
def _get_ampm_text(self, *args):
amc = rgb_to_hex(0, 0, 0) if self._am else rgb_to_hex(*self.primary_dark)
pmc = rgb_to_hex(0, 0, 0) if not self._am else rgb_to_hex(*self.primary_dark)
return self.ampm_format.format(am_color=amc,
pm_color=pmc)
ampm_text = AliasProperty(_get_ampm_text, None, bind=("hours", "ampm_format", "_am"))
def __init__(self, **kw):
super(CircularTimePicker, self).__init__(**kw)
self.selector_color = self.theme_cls.primary_color[0], self.theme_cls.primary_color[1], \
self.theme_cls.primary_color[2]
self.color = self.theme_cls.text_color
self.primary_dark = self.theme_cls.primary_dark[0] / 2, self.theme_cls.primary_dark[1] / 2, \
self.theme_cls.primary_dark[2] / 2
self.on_ampm()
if self.hours >= 12:
self._am = False
self.bind(time_list=self.on_time_list,
picker=self._switch_picker,
_am=self.on_ampm,
primary_dark=self._get_ampm_text)
self._h_picker = CircularHourPicker()
self.h_picker_touch = False
self._m_picker = CircularMinutePicker()
self.animating = False
Clock.schedule_once(self.on_selected)
Clock.schedule_once(self.on_time_list)
Clock.schedule_once(self._init_later)
Clock.schedule_once(lambda *a: self._switch_picker(noanim=True))
def _init_later(self, *args):
self.ids.timelabel.bind(on_ref_press=self.on_ref_press)
self.ids.ampmlabel.bind(on_ref_press=self.on_ref_press)
def on_ref_press(self, ign, ref):
if not self.animating:
if ref == "hours":
self.picker = "hours"
elif ref == "minutes":
self.picker = "minutes"
if ref == "am":
self._am = True
elif ref == "pm":
self._am = False
def on_selected(self, *a):
if not self._picker:
return
if self.picker == "hours":
hours = self._picker.selected if self._am else self._picker.selected + 12
if hours == 24 and not self._am:
hours = 12
elif hours == 12 and self._am:
hours = 0
self.hours = hours
elif self.picker == "minutes":
self.minutes = self._picker.selected
def on_time_list(self, *a):
if not self._picker:
return
self._h_picker.selected = self.hours == 0 and 12 or self._am and self.hours or self.hours - 12
self._m_picker.selected = self.minutes
self.on_selected()
def on_ampm(self, *a):
if self._am:
self.hours = self.hours if self.hours < 12 else self.hours - 12
else:
self.hours = self.hours if self.hours >= 12 else self.hours + 12
def is_animating(self, *args):
self.animating = True
def is_not_animating(self, *args):
self.animating = False
def on_touch_down(self, touch):
if not self._h_picker.collide_point(*touch.pos):
self.h_picker_touch = False
else:
self.h_picker_touch = True
super(CircularTimePicker, self).on_touch_down(touch)
def on_touch_up(self, touch):
try:
if not self.h_picker_touch:
return
if not self.animating:
if touch.grab_current is not self:
if self.picker == "hours":
self.picker = "minutes"
except AttributeError:
pass
super(CircularTimePicker, self).on_touch_up(touch)
def _switch_picker(self, *a, **kw):
noanim = "noanim" in kw
if noanim:
noanim = kw["noanim"]
try:
container = self.ids.picker_container
except (AttributeError, NameError):
Clock.schedule_once(lambda *a: self._switch_picker(noanim=noanim))
if self.picker == "hours":
picker = self._h_picker
prevpicker = self._m_picker
elif self.picker == "minutes":
picker = self._m_picker
prevpicker = self._h_picker
if len(self._bound) > 0:
prevpicker.unbind(selected=self.on_selected)
self.unbind(**self._bound)
picker.bind(selected=self.on_selected)
self._bound = {"selector_color": picker.setter("selector_color"),
"color": picker.setter("color"),
"selector_alpha": picker.setter("selector_alpha")}
self.bind(**self._bound)
if len(container._bound) > 0:
container.unbind(**container._bound)
container._bound = {"size": picker.setter("size"),
"pos": picker.setter("pos")}
container.bind(**container._bound)
picker.pos = container.pos
picker.size = container.size
picker.selector_color = self.selector_color
picker.color = self.color
picker.selector_alpha = self.selector_alpha
if noanim:
if prevpicker in container.children:
container.remove_widget(prevpicker)
if picker.parent:
picker.parent.remove_widget(picker)
container.add_widget(picker)
else:
self.is_animating()
if prevpicker in container.children:
anim = Animation(scale=1.5, d=.5, t="in_back") & Animation(opacity=0, d=.5, t="in_cubic")
anim.start(prevpicker)
Clock.schedule_once(lambda *y: container.remove_widget(prevpicker), .5) # .31)
picker.scale = 1.5
picker.opacity = 0
if picker.parent:
picker.parent.remove_widget(picker)
container.add_widget(picker)
anim = Animation(scale=1, d=.5, t="out_back") & Animation(opacity=1, d=.5, t="out_cubic")
anim.bind(on_complete=self.is_not_animating)
Clock.schedule_once(lambda *y: anim.start(picker), .3)
if __name__ == "__main__":
from kivy.base import runTouchApp
c = CircularTimePicker()
runTouchApp(c)
| {
"content_hash": "fe3221aa9d144b85d4291a9c215e0c7f",
"timestamp": "",
"source": "github",
"line_count": 768,
"max_line_length": 120,
"avg_line_length": 34.946614583333336,
"alnum_prop": 0.5836283020976937,
"repo_name": "cruor99/KivyMD",
"id": "fbc739546c9d6799da2785e8439def6741de204a",
"size": "26864",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "kivymd/vendor/circularTimePicker/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "296545"
}
],
"symlink_target": ""
} |
import unittest
import transaction
from pyramid import testing
from .models import DBSession
from .assets import (
get_current_time,
convert_to_utc_seconds,
convert_to_datetime,
)
class TestTunerSetup(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
from sqlalchemy import create_engine
from .models import (
Base,
DBSession,
Tuner,
)
engine = create_engine('sqlite://')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
model = Tuner(name='Base Tuner')
DBSession.add(model)
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_it(self):
from .views import index
from .models import (
Tuner,
)
request = testing.DummyRequest()
page = index(request)
tuners = DBSession.query(Tuner).all()
self.assertEqual(page['recordings'], [])
self.assertEqual(page['tuners'], tuners)
class TestSetRecording(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
import dvr
from sqlalchemy import create_engine
from .models import (
Base,
DBSession,
Tuner,
)
dvr.assets.get_current_time = lambda: convert_to_datetime(0)
engine = create_engine('sqlite://')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
model = Tuner(name='Base Tuner')
DBSession.add(model)
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_it(self):
import dvr
from .views import (
api_get_recordings,
api_post_recordings,
)
# Make sure no recordings exist
get_request = testing.DummyRequest()
page = api_get_recordings(get_request)
self.assertEqual(page, [])
self.assertEqual(dvr.assets.get_current_time(), convert_to_datetime(0))
# Create Recording
start_time = convert_to_utc_seconds(dvr.assets.get_current_time())
end_time = start_time + 300
post_request = testing.DummyRequest(post={
'channel': 3,
'start_time': start_time,
'end_time': end_time,
})
page = api_post_recordings(post_request)
self.assertEqual(page, {
'id': 1,
'channel': 3,
'start_time': start_time,
'end_time': end_time,
'tuner': 1,
})
# Ensure that two recordings cannot happen at the same time
page = api_post_recordings(post_request)
self.assertEqual(page, {
"status": "failed",
"message": "No tuner is available.",
})
# Ensure that two recordings cannot overlap
start_time = 150
end_time = 400
post_request = testing.DummyRequest(post={
'channel': 3,
'start_time': start_time,
'end_time': end_time,
})
page = api_post_recordings(post_request)
self.assertEqual(page, {
"status": "failed",
"message": "No tuner is available.",
})
# Ensure that we cannot push recording to a non-existent tuner
post_request = testing.DummyRequest(post={
'channel': 3,
'start_time': start_time,
'end_time': end_time,
'tuner': 2,
})
page = api_post_recordings(post_request)
self.assertEqual(page, {
"status": "failed",
"message": "Tuner does not exist",
})
# Ensure that Time formats are correct
post_request = testing.DummyRequest(post={
'channel': 3,
'start_time': "12:00am",
'end_time': "7:00pm",
})
page = api_post_recordings(post_request)
self.assertEqual(page, {
"status": "failed",
"message": "Invalid start time",
})
# Ensure that channel is an int
post_request = testing.DummyRequest(post={
'channel': "a",
'start_time': start_time,
'end_time': end_time,
})
page = api_post_recordings(post_request)
self.assertEqual(page, {
"status": "failed",
"message": "Invalid channel",
})
# Ensure Recording Exists
get_request = testing.DummyRequest()
page = api_get_recordings(get_request)
self.assertEqual(page, [{
'id': 1,
'channel': 3,
'start_time': 0,
'end_time': 300,
'tuner': 1,
}])
# Create Recording with start date before current time
dvr.assets.get_current_time = lambda: convert_to_datetime(1300)
start_time = convert_to_utc_seconds(dvr.assets.get_current_time()) - 250
end_time = 1550
post_request = testing.DummyRequest(post={
'channel': 3,
'start_time': start_time,
'end_time': end_time,
})
page = api_post_recordings(post_request)
self.assertEqual(page, {
'id': 2,
'channel': 3,
'start_time': 1300,
'end_time': 1550,
'tuner': 1,
})
# Check for invalid time ranges
dvr.assets.get_current_time = lambda: convert_to_datetime(300)
start_time = 900
end_time = 600
post_request = testing.DummyRequest(post={
'channel': 3,
'start_time': start_time,
'end_time': end_time,
})
page = api_post_recordings(post_request)
self.assertEqual(page, {
"status": "failed",
"message": "Invalid time range",
})
class TestMultipleTunerRecording(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
import dvr
from sqlalchemy import create_engine
from .models import (
Base,
DBSession,
Tuner,
)
dvr.assets.get_current_time = lambda: convert_to_datetime(0)
engine = create_engine('sqlite://')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
model = Tuner(name='Base Tuner')
DBSession.add(model)
model = Tuner(name='Secondary Tuner')
DBSession.add(model)
def tearDown(self):
DBSession.remove()
testing.tearDown()
def test_it(self):
import dvr
from .views import (
api_get_recordings,
api_post_recordings,
)
# Make sure no recordings exist
get_request = testing.DummyRequest()
page = api_get_recordings(get_request)
self.assertEqual(page, [])
self.assertEqual(dvr.assets.get_current_time(), convert_to_datetime(0))
# Create Recording on first tuner
start_time = convert_to_utc_seconds(dvr.assets.get_current_time())
end_time = start_time + 300
post_request = testing.DummyRequest(post={
'channel': 3,
'start_time': start_time,
'end_time': end_time,
})
page = api_post_recordings(post_request)
self.assertEqual(page, {
'id': 1,
'channel': 3,
'start_time': start_time,
'end_time': end_time,
'tuner': 1,
})
dvr.assets.get_current_time = lambda: convert_to_datetime(150)
# Create Recording on second tuner with overlap
start_time = convert_to_utc_seconds(dvr.assets.get_current_time())
end_time = start_time + 300
post_request = testing.DummyRequest(post={
'channel': 3,
'start_time': start_time,
'end_time': end_time,
})
page = api_post_recordings(post_request)
self.assertEqual(page, {
'id': 2,
'channel': 3,
'start_time': start_time,
'end_time': end_time,
'tuner': 2,
})
dvr.assets.get_current_time = lambda: convert_to_datetime(200)
# Ensure that three recordings cannot happen at the same time
page = api_post_recordings(post_request)
self.assertEqual(page, {
"status": "failed",
"message": "No tuner is available.",
})
| {
"content_hash": "95b8e5282e87b0b6e0be02ac32aba248",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 80,
"avg_line_length": 29.651724137931033,
"alnum_prop": 0.5384347017095011,
"repo_name": "demophoon/dvr",
"id": "4b37c06a38356f962cb70b9b53923e28c4bf1e36",
"size": "8599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dvr/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2874"
},
{
"name": "Python",
"bytes": "24090"
}
],
"symlink_target": ""
} |
import datetime
import json
import logging
import re
from typing import Literal, Optional
import requests
from backend.common.models.keys import Year
from backend.common.sitevars.fms_api_secrets import FMSApiSecrets
TCompLevel = Literal["qual", "playoff"]
class FRCAPI:
STORAGE_BUCKET_PATH = "tbatv-prod-hrd.appspot.com"
STORAGE_BUCKET_BASE_DIR = "frc-api-response"
class ValidationError(Exception):
pass
@classmethod
def with_credentials(cls, username: str, authkey: str):
auth_token = FMSApiSecrets.generate_auth_token(username, authkey)
return cls(auth_token)
def __init__(
self,
auth_token: Optional[str] = None,
sim_time: Optional[datetime.datetime] = None,
sim_api_version: Optional[str] = None,
):
# Load auth_token from Sitevar if not specified
if not auth_token:
auth_token = FMSApiSecrets.auth_token()
if not auth_token:
raise Exception(
f"Missing FRC API auth token. Setup {FMSApiSecrets.key()} sitevar."
)
self.session = requests.Session()
self.session.headers.update({"Authorization": f"Basic {auth_token}"})
self._sim_time = sim_time
self._sim_api_version = sim_api_version
def root(self) -> requests.Response:
return self._get("/")
def team_details(self, year: Year, team_number: int) -> requests.Response:
endpoint = f"/{year}/teams?teamNumber={team_number}"
return self._get(endpoint)
def team_avatar(self, year: Year, team_number: int) -> requests.Response:
endpoint = f"/{year}/avatars?teamNumber={team_number}"
return self._get(endpoint)
def event_list(self, year: Year) -> requests.Response:
endpoint = f"/{year}/events"
return self._get(endpoint)
def event_info(self, year: Year, event_short: str) -> requests.Response:
endpoint = f"/{year}/events?eventCode={event_short}"
return self._get(endpoint)
def event_teams(self, year: Year, event_short: str, page: int) -> requests.Response:
endpoint = f"/{year}/teams?eventCode={event_short}&page={page}"
return self._get(endpoint)
def event_team_avatars(
self, year: Year, event_short: str, page: int
) -> requests.Response:
endpoint = f"/{year}/avatars?eventCode={event_short}&page={page}"
return self._get(endpoint)
def alliances(self, year: Year, event_short: str) -> requests.Response:
endpoint = f"/{year}/alliances/{event_short}"
return self._get(endpoint)
def rankings(self, year: Year, event_short: str) -> requests.Response:
endpoint = f"/{year}/rankings/{event_short}"
return self._get(endpoint)
def match_schedule(self, year: Year, event_short: str, level: TCompLevel):
# This does not include results
endpoint = f"/{year}/schedule/{event_short}?tournamentLevel={level}"
return self._get(endpoint)
def matches(self, year: Year, event_short: str, level: TCompLevel):
# This includes both played/unplayed matches at the event
# but doesn't include full results
endpoint = f"/{year}/matches/{event_short}?tournamentLevel={level}"
return self._get(endpoint)
def match_scores(
self, year: Year, event_short: str, level: TCompLevel
) -> requests.Response:
# technically "qual"/"playoff" are invalid tournament levels as per the docs,
# but they seem to work?
endpoint = f"/{year}/scores/{event_short}/{level}"
return self._get(endpoint)
def awards(
self,
year: Year,
event_code: Optional[str] = None,
team_number: Optional[int] = None,
) -> requests.Response:
if not event_code and not team_number:
raise FRCAPI.ValidationError(
"awards expects either an event_code, team_number, or both"
)
if event_code is not None and team_number is not None:
endpoint = f"/{year}/awards/eventteam/{event_code}/{team_number}"
elif event_code is not None:
endpoint = f"/{year}/awards/event/{event_code}"
else: # team_number is not None
endpoint = f"/{year}/awards/team/{team_number}"
return self._get(endpoint)
def district_list(self, year: Year) -> requests.Response:
endpoint = f"/{year}/districts"
return self._get(endpoint)
def district_rankings(
self, year: Year, district_short: str, page: int
) -> requests.Response:
endpoint = (
f"/{year}/rankings/district?districtCode={district_short}&page={page}"
)
return self._get(endpoint)
""" Attempt to fetch the endpoint from the FRC API
Returns:
The Flask response object - should be used by the consumer.
"""
def _get(self, endpoint: str, version: str = "v3.0") -> requests.Response:
# Remove any leading / - we'll add it later (safer then adding a slash)
versioned_endpoint = f"{version}/{endpoint.lstrip('/')}"
if self._sim_time is not None:
return self._get_simulated(endpoint, self._sim_api_version or version)
url = f"https://frc-api.firstinspires.org/{versioned_endpoint}"
headers = {
"Accept": "application/json",
"Cache-Control": "no-cache, max-age=10",
"Pragma": "no-cache",
}
return self.session.get(url, headers=headers)
def _get_simulated(self, endpoint: str, version: str) -> requests.Response:
from unittest.mock import Mock
from backend.common.storage import (
get_files as cloud_storage_get_files,
read as cloud_storage_read,
)
if version == "v2.0" and "/schedule" in endpoint and "hybrid" not in endpoint:
# The hybrid schedule endpoint doesn't exist in newer versions,
# so hack the URLs to make things work with older data
# /2022/schedule/CADA/qual/hybrid
# /2022/schedule/CADA?tournamentLevel=qual
regex = re.search(r"/(\d+)/schedule/(\w+)\?tournamentLevel=(\w+)", endpoint)
if regex:
endpoint = f"/{regex.group(1)}/schedule/{regex.group(2)}/{regex.group(3)}/hybrid"
# Get list of responses
try:
gcs_dir_name = (
f"{self.STORAGE_BUCKET_BASE_DIR}/{version}/{endpoint.lstrip('/')}/"
)
gcs_files = cloud_storage_get_files(gcs_dir_name)
# Find appropriate timed response
last_file_name = None
for filename in gcs_files:
time_str = (
filename.replace(gcs_dir_name, "").replace(".json", "").strip()
)
file_time = datetime.datetime.strptime(time_str, "%Y-%m-%d %H:%M:%S.%f")
if file_time <= self._sim_time:
last_file_name = filename
else:
break
# Fetch response
content: Optional[str] = None
if last_file_name:
content = cloud_storage_read(last_file_name)
if content is None:
empty_response = Mock(spec=requests.Response)
empty_response.status_code = 200
empty_response.json.return_value = {}
empty_response.url = ""
return empty_response
full_response = Mock(spec=requests.Response)
full_response.status_code = 200
full_response.json.return_value = json.loads(content)
full_response.url = ""
return full_response
except Exception:
logging.exception("Error fetching sim frc api")
error_response = Mock(spec=requests.Response)
error_response.status_code = 500
error_response.url = ""
return error_response
| {
"content_hash": "6ea78eb979bdc021990b9577bf3fb5fe",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 97,
"avg_line_length": 37.14418604651163,
"alnum_prop": 0.5954169797145004,
"repo_name": "the-blue-alliance/the-blue-alliance",
"id": "14604026d0c5319e2df34a618e68640c211922f7",
"size": "7986",
"binary": false,
"copies": "1",
"ref": "refs/heads/py3",
"path": "src/backend/common/frc_api/frc_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "359032"
},
{
"name": "Dockerfile",
"bytes": "2503"
},
{
"name": "HTML",
"bytes": "5877313"
},
{
"name": "JavaScript",
"bytes": "755910"
},
{
"name": "Less",
"bytes": "244218"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Pug",
"bytes": "1857"
},
{
"name": "Python",
"bytes": "4321885"
},
{
"name": "Ruby",
"bytes": "4677"
},
{
"name": "Shell",
"bytes": "27698"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stein', '0040_auto_20171105_1439'),
]
operations = [
migrations.AlterField(
model_name='quizanswer',
name='feedback_correct',
field=models.CharField(blank=True, max_length=500, null=True, verbose_name='feedback if answered correctly'),
),
migrations.AlterField(
model_name='quizanswer',
name='feedback_incorrect',
field=models.CharField(blank=True, max_length=500, null=True, verbose_name='feedback if answered incorrectly'),
),
migrations.AlterField(
model_name='quizanswer',
name='feedback_correct',
field=models.CharField(blank=True, default='', max_length=500, verbose_name='feedback if answered correctly'),
),
migrations.AlterField(
model_name='quizanswer',
name='feedback_incorrect',
field=models.CharField(blank=True, default='', max_length=500, verbose_name='feedback if answered incorrectly'),
),
]
| {
"content_hash": "30fc5837525a8f2a940a3a16f5ff554f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 124,
"avg_line_length": 36.903225806451616,
"alnum_prop": 0.6118881118881119,
"repo_name": "GeoMatDigital/django-geomat",
"id": "492659b4fc6193e63ccecdd23ff64e0fdfd35ff0",
"size": "1219",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "geomat/stein/migrations/0041_auto_20171102_1755_squashed_0042_auto_20171105_1732.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16597"
},
{
"name": "Dockerfile",
"bytes": "1091"
},
{
"name": "HTML",
"bytes": "14474"
},
{
"name": "JavaScript",
"bytes": "31354"
},
{
"name": "Makefile",
"bytes": "371"
},
{
"name": "Python",
"bytes": "197468"
},
{
"name": "Shell",
"bytes": "674"
}
],
"symlink_target": ""
} |
'''
whoisLib.py
.checks for PKL/CSV existence, if not notifies user and gives
chance to cancel, then performs the full scrape of tld/whois pairings
-u --update runs updateTLD to update and then export
-w --whois runs getWhoisServer to find tld passed, if not found, scrapes
iana for the TLD, adds it to dict, then runs getWhois
-i --ip runs getRWhois
.then calls dictToCSV and dictToPKL to export dictionary
.then returns results of whatever was called, then exits
#######################
# TO DO LIST
#
# XXMove main into respective functions
# XXBuild out args and test
# XXBuild out PKL and CSV check
# --Build out updateTLD func and logic
# --Build out getWhoisServer to check imported
# CSV/PKL for existing, if not lookup
# TLD and add its whois server to the
# CSV/PKL. Set flag that it has been updated
# in order to call export at end.
# --Build out getWhois - perform whois lookup
# --Build out TLDWhois Class - See notes below
# --Build out getRWhois - perform IP lookup
# XXBuild out dictToCSV
# XXBuild out dictToPKL
# --Tidy up documentation and comments
#
# #####################
# #
# # Class for all info for a TLD
# # Will include TLD suffix, WHOIS
# # server, name servers, rWHOIS
# # server, Sponsoring Org, admin
# # contact info, and technical
# # contact info.
# #
# # Pass a TLD suffix, and it will
# # build the rest as possible
# #
# # follow defs:
# # __init__(self, suffix) - pass suffix and
# # then initialize all attributes
# # build* - one for each bit of info, called
# # from within init to fully initialize
# # attributes
# # getInfo - gets background info,
# # all contained in single string, maybe
# # getWhois - returns whois server
# # getRWhois - returns rwhois serveread
# # getNS - returns lsit of Name Servers
# #
# #####################
#
#######################
'''
#########
# IMPORTS
#########
import requests
import re
import sys
import argparse
import os.path
import csv
import timeit
import pickle
#########
# VARS
#########
programName="whoisLib.py"
programDescription="WHOIS utility module"
programVersion="1.0"
tldDB='https://www.iana.org/domains/root/db'
tldURIList = [] #Init list for list for all TLD URIs
whoisDictionary = {}
verbose=False
##################################################
# FUNCTIONS
##################################################
#############
# GET ARGS
#############
def getArgs():
parser = argparse.ArgumentParser(prog=programName, description=programDescription)
parser.add_argument("-a","--arg",help="ARG HELP",required=False)
parser.add_argument("-v","--verbose",help="Increase verbosity",action="store_true",required=False)
parser.add_argument("-u","--update",help="Update TLD/Whois pairing",action="store_true",required=False)
parser.add_argument("-w","--whois",help="Perform whois lookup",required=False)
parser.add_argument("-i","--ip",help="Perform IP lookup",required=False)
return parser.parse_args()
###############################################
# OTHER NOTES
#
# For groups of args [in this case one of the two is required]:
# group = parser.add_mutually_exclusive_group(required=True)
# group.add_argument("-a1", "--arg1", help="ARG HELP")
# group.add_argument("-a2", "--arg2", help="ARG HELP")
#
# To make a bool thats true:
# parser.add_argument("-a","--arg",help="ARG HELP", action="store_true")
#
###############################################
#############
# END OF ARGS
#############
#############
# MAIN
#############
def main(args):
if args.verbose:
startTime = timeit.default_timer()
if checkForFile("tldwhois.pkl") == True:
tldPKLExists = True
if args.verbose:
print "PKL file exists."
#If it exists, import PKL to dictionary
if checkForFile("tldwhois.csv") == True:
tldCSVExists = True
if args.verbose:
print "CSV file exists."
#If it exists, but not PKL, import CSV to dictionary
if (not checkForFile("tldwhois.pkl")) and (not checkForFile("tldwhois.csv")):
if args.verbose:
print "Neither the PKL or CSV file exist. Full update is needed..."
updateTLD()
global whoisDictionary
dictToPKL(whoisDictionary,"tldwhois.pkl")
dictToCSV(whoisDictionary,"tldwhois.csv")
#If neither exists. Notify user, and ask if want to create. Then update, export to PKL and CSV, then continue
if (checkForFile("tldwhois.pkl") == True) and (checkForFile("tldwhois.csv") == False):
#Import PKL.
if args.verbose:
print "PKL exists... Importing..."
whoisDictionary = pickle.load(open("tldwhois.pkl", "rb"))
#Export to CSV.
if args.verbose:
print "CSV doesn't exist... Exporting the PKL to CSV..."
dictToCSV(whoisDictionary,"tldwhois.csv")
if (checkForFile("tldwhois.pkl") == False) and (checkForFile("tldwhois.csv") == True):
#Import CSV.
if args.verbose:
print "CSV exists... Importing..."
reader = csv.reader(open('tldwhois.csv', 'rb'))
whoisDictionary = dict(x for x in reader)
#Export to PKL.
if args.verbose:
print "PKL doesn't exist... Exporting the CSV to PKL..."
dictToPKL(whoisDictionary,"tldwhois.pkl")
if (checkForFile("tldwhois.pkl") == True) and (checkForFile("tldwhois.csv") == True):
#Both true, continue.
if args.verbose:
print "PKL and CSV exist. Continuing."
if args.update:
#go through entire process to create PKL and CSV files. Might have to clear existing. Who knows
updateTLD()
if args.whois:
getWhois(getWhoisServer(args.whois))
if args.ip:
getRWhois(ip)
endTime = timeit.default_timer()
if args.verbose:
print endTime - startTime
# Once completed print out the dictionary
#print whoisDictionary
#############
# END OF MAIN
#############
#############
# checkForPKL
#############
def checkForFile(filename):
if os.path.isfile(sys.path[0] + "/" + filename):
return True
else:
return False
#############
# END OF checkForPKL
#############
#############
# updateTLD
#############
def updateTLD():
global whoisDictionary
if args.verbose:
print "Fully updating TLD list..."
r = requests.get(tldDB)
regex = '<span class="domain tld">+<a href="\/\w*\/\w*\/\w*\/\w*\.\w*">'
matches = re.findall(regex, r.text)
# For each entry in the list of matches, trim off the
# unncessary bits and slap 'em into the tldURIList list
for match in matches:
tldURIList.append(match[34:-2])
# For every URI found on the main page, loop
progress = 0
for uri in tldURIList:
# Set each 'key' in dict to the '.*' value
key = '.' + uri[17:-5]
# Generate a full URI for each TLD and retrieve
tldURI = 'https://www.iana.org' + uri
r = requests.get(tldURI)
# Find place in page for the WHOIS server and match
regex = 'WHOIS Server:.*'
matches = re.findall(regex, r.text)
# If a match is found continue, or else the TLD
# Does not have a WHOIS server and should be
# ignored.
if matches:
# Entry will be the first, trim the first 18
# Characters to remove the B.S.
match = matches[0]
match = match[18:]
# Add entry to the dictionary of the '.*' for the key
# and the WHOIS server for the match
whoisDictionary[key] = match
# The following is for progress notification
progress += 1
if args.verbose:
print 'WHOIS server found for ' + key + ' at ' + match
if (progress % 50) == 0:
print 'Number of domains found: ' + str(progress)
#############
# END OF updateTLD
#############
#############
# getWhoisServer
#############
def getWhoisServer(tldSuffix):
print "getWhoisServer"
#############
# END OF getWhoisServer
#############
#############
# getWhois
#############
def getWhois(tld):
print "getWhois"
#############
# END OF getWhois
#############
#############
# getRWhois
#############
def getRWhois(ip):
print "getRWhois"
#############
# END OF getRWhois
#############
#############
# dictToCSV
#############
def dictToCSV(dictionary,filename):
if args.verbose:
print "Exporting dictionary to CSV..."
writer = csv.writer(open(filename, 'wb'))
for key, value in dictionary.items():
writer.writerow([key, value])
#############
# END OF dictToCSV
#############
#############
# dictToPKL
#############
def dictToPKL(dictionary,pklname):
if args.verbose:
print "Exporting dictionary to PKL..."
pickle.dump(dictionary,open(pklname,"wb"))
#############
# END OF dictToPKL
#############
##################################################
# END OF FUNCTIONS
##################################################
###########################
# PROG DECLARE
###########################
if __name__ == '__main__':
args = getArgs()
main(args)
| {
"content_hash": "ca6e0fe89eb26abc0c6a06aafd776d80",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 111,
"avg_line_length": 25.497041420118343,
"alnum_prop": 0.6109306103504293,
"repo_name": "teekayzed/whopys",
"id": "9f3fb7cec185a95a971e99983e6fd8621aa835ec",
"size": "9487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "whoisLib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9487"
}
],
"symlink_target": ""
} |
"""This module contains all of the core logic for beets' command-line
interface. To invoke the CLI, just call beets.ui.main(). The actual
CLI commands are implemented in the ui.commands module.
"""
from __future__ import print_function
import locale
import optparse
import textwrap
import sys
from difflib import SequenceMatcher
import logging
import sqlite3
import errno
import re
import struct
import traceback
from beets import library
from beets import plugins
from beets import util
from beets.util.functemplate import Template
from beets import config
from beets.util import confit
from beets.autotag import mb
# On Windows platforms, use colorama to support "ANSI" terminal colors.
if sys.platform == 'win32':
try:
import colorama
except ImportError:
pass
else:
colorama.init()
# Constants.
PF_KEY_QUERIES = {
'comp': 'comp:true',
'singleton': 'singleton:true',
}
# UI exception. Commands should throw this in order to display
# nonrecoverable errors to the user.
class UserError(Exception):
pass
# Main logger.
log = logging.getLogger('beets')
# Utilities.
def _encoding():
"""Tries to guess the encoding used by the terminal."""
# Configured override?
encoding = config['terminal_encoding'].get()
if encoding:
return encoding
# Determine from locale settings.
try:
return locale.getdefaultlocale()[1] or 'utf8'
except ValueError:
# Invalid locale environment variable setting. To avoid
# failing entirely for no good reason, assume UTF-8.
return 'utf8'
def decargs(arglist):
"""Given a list of command-line argument bytestrings, attempts to
decode them to Unicode strings.
"""
return [s.decode(_encoding()) for s in arglist]
def print_(*strings):
"""Like print, but rather than raising an error when a character
is not in the terminal's encoding's character set, just silently
replaces it.
"""
if strings:
if isinstance(strings[0], unicode):
txt = u' '.join(strings)
else:
txt = ' '.join(strings)
else:
txt = u''
if isinstance(txt, unicode):
txt = txt.encode(_encoding(), 'replace')
print(txt)
def input_(prompt=None):
"""Like `raw_input`, but decodes the result to a Unicode string.
Raises a UserError if stdin is not available. The prompt is sent to
stdout rather than stderr. A printed between the prompt and the
input cursor.
"""
# raw_input incorrectly sends prompts to stderr, not stdout, so we
# use print() explicitly to display prompts.
# http://bugs.python.org/issue1927
if prompt:
if isinstance(prompt, unicode):
prompt = prompt.encode(_encoding(), 'replace')
print(prompt, end=' ')
try:
resp = raw_input()
except EOFError:
raise UserError('stdin stream ended while input required')
return resp.decode(sys.stdin.encoding or 'utf8', 'ignore')
def input_options(options, require=False, prompt=None, fallback_prompt=None,
numrange=None, default=None, max_width=72):
"""Prompts a user for input. The sequence of `options` defines the
choices the user has. A single-letter shortcut is inferred for each
option; the user's choice is returned as that single, lower-case
letter. The options should be provided as lower-case strings unless
a particular shortcut is desired; in that case, only that letter
should be capitalized.
By default, the first option is the default. `default` can be provided to
override this. If `require` is provided, then there is no default. The
prompt and fallback prompt are also inferred but can be overridden.
If numrange is provided, it is a pair of `(high, low)` (both ints)
indicating that, in addition to `options`, the user may enter an
integer in that inclusive range.
`max_width` specifies the maximum number of columns in the
automatically generated prompt string.
"""
# Assign single letters to each option. Also capitalize the options
# to indicate the letter.
letters = {}
display_letters = []
capitalized = []
first = True
for option in options:
# Is a letter already capitalized?
for letter in option:
if letter.isalpha() and letter.upper() == letter:
found_letter = letter
break
else:
# Infer a letter.
for letter in option:
if not letter.isalpha():
continue # Don't use punctuation.
if letter not in letters:
found_letter = letter
break
else:
raise ValueError('no unambiguous lettering found')
letters[found_letter.lower()] = option
index = option.index(found_letter)
# Mark the option's shortcut letter for display.
if not require and ((default is None and not numrange and first) or
(isinstance(default, basestring) and
found_letter.lower() == default.lower())):
# The first option is the default; mark it.
show_letter = '[%s]' % found_letter.upper()
is_default = True
else:
show_letter = found_letter.upper()
is_default = False
# Colorize the letter shortcut.
show_letter = colorize('turquoise' if is_default else 'blue',
show_letter)
# Insert the highlighted letter back into the word.
capitalized.append(
option[:index] + show_letter + option[index+1:]
)
display_letters.append(found_letter.upper())
first = False
# The default is just the first option if unspecified.
if require:
default = None
elif default is None:
if numrange:
default = numrange[0]
else:
default = display_letters[0].lower()
# Make a prompt if one is not provided.
if not prompt:
prompt_parts = []
prompt_part_lengths = []
if numrange:
if isinstance(default, int):
default_name = str(default)
default_name = colorize('turquoise', default_name)
tmpl = '# selection (default %s)'
prompt_parts.append(tmpl % default_name)
prompt_part_lengths.append(len(tmpl % str(default)))
else:
prompt_parts.append('# selection')
prompt_part_lengths.append(len(prompt_parts[-1]))
prompt_parts += capitalized
prompt_part_lengths += [len(s) for s in options]
# Wrap the query text.
prompt = ''
line_length = 0
for i, (part, length) in enumerate(zip(prompt_parts,
prompt_part_lengths)):
# Add punctuation.
if i == len(prompt_parts) - 1:
part += '?'
else:
part += ','
length += 1
# Choose either the current line or the beginning of the next.
if line_length + length + 1 > max_width:
prompt += '\n'
line_length = 0
if line_length != 0:
# Not the beginning of the line; need a space.
part = ' ' + part
length += 1
prompt += part
line_length += length
# Make a fallback prompt too. This is displayed if the user enters
# something that is not recognized.
if not fallback_prompt:
fallback_prompt = 'Enter one of '
if numrange:
fallback_prompt += '%i-%i, ' % numrange
fallback_prompt += ', '.join(display_letters) + ':'
resp = input_(prompt)
while True:
resp = resp.strip().lower()
# Try default option.
if default is not None and not resp:
resp = default
# Try an integer input if available.
if numrange:
try:
resp = int(resp)
except ValueError:
pass
else:
low, high = numrange
if low <= resp <= high:
return resp
else:
resp = None
# Try a normal letter input.
if resp:
resp = resp[0]
if resp in letters:
return resp
# Prompt for new input.
resp = input_(fallback_prompt)
def input_yn(prompt, require=False):
"""Prompts the user for a "yes" or "no" response. The default is
"yes" unless `require` is `True`, in which case there is no default.
"""
sel = input_options(
('y', 'n'), require, prompt, 'Enter Y or N:'
)
return sel == 'y'
def human_bytes(size):
"""Formats size, a number of bytes, in a human-readable way."""
suffices = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'HB']
for suffix in suffices:
if size < 1024:
return "%3.1f %s" % (size, suffix)
size /= 1024.0
return "big"
def human_seconds(interval):
"""Formats interval, a number of seconds, as a human-readable time
interval using English words.
"""
units = [
(1, 'second'),
(60, 'minute'),
(60, 'hour'),
(24, 'day'),
(7, 'week'),
(52, 'year'),
(10, 'decade'),
]
for i in range(len(units)-1):
increment, suffix = units[i]
next_increment, _ = units[i+1]
interval /= float(increment)
if interval < next_increment:
break
else:
# Last unit.
increment, suffix = units[-1]
interval /= float(increment)
return "%3.1f %ss" % (interval, suffix)
def human_seconds_short(interval):
"""Formats a number of seconds as a short human-readable M:SS
string.
"""
interval = int(interval)
return u'%i:%02i' % (interval // 60, interval % 60)
# ANSI terminal colorization code heavily inspired by pygments:
# http://dev.pocoo.org/hg/pygments-main/file/b2deea5b5030/pygments/console.py
# (pygments is by Tim Hatch, Armin Ronacher, et al.)
COLOR_ESCAPE = "\x1b["
DARK_COLORS = ["black", "darkred", "darkgreen", "brown", "darkblue",
"purple", "teal", "lightgray"]
LIGHT_COLORS = ["darkgray", "red", "green", "yellow", "blue",
"fuchsia", "turquoise", "white"]
RESET_COLOR = COLOR_ESCAPE + "39;49;00m"
def _colorize(color, text):
"""Returns a string that prints the given text in the given color
in a terminal that is ANSI color-aware. The color must be something
in DARK_COLORS or LIGHT_COLORS.
"""
if color in DARK_COLORS:
escape = COLOR_ESCAPE + "%im" % (DARK_COLORS.index(color) + 30)
elif color in LIGHT_COLORS:
escape = COLOR_ESCAPE + "%i;01m" % (LIGHT_COLORS.index(color) + 30)
else:
raise ValueError('no such color %s', color)
return escape + text + RESET_COLOR
def colorize(color, text):
"""Colorize text if colored output is enabled. (Like _colorize but
conditional.)
"""
if config['color']:
return _colorize(color, text)
else:
return text
def _colordiff(a, b, highlight='red', minor_highlight='lightgray'):
"""Given two values, return the same pair of strings except with
their differences highlighted in the specified color. Strings are
highlighted intelligently to show differences; other values are
stringified and highlighted in their entirety.
"""
if not isinstance(a, basestring) or not isinstance(b, basestring):
# Non-strings: use ordinary equality.
a = unicode(a)
b = unicode(b)
if a == b:
return a, b
else:
return colorize(highlight, a), colorize(highlight, b)
if isinstance(a, bytes) or isinstance(b, bytes):
# A path field.
a = util.displayable_path(a)
b = util.displayable_path(b)
a_out = []
b_out = []
matcher = SequenceMatcher(lambda x: False, a, b)
for op, a_start, a_end, b_start, b_end in matcher.get_opcodes():
if op == 'equal':
# In both strings.
a_out.append(a[a_start:a_end])
b_out.append(b[b_start:b_end])
elif op == 'insert':
# Right only.
b_out.append(colorize(highlight, b[b_start:b_end]))
elif op == 'delete':
# Left only.
a_out.append(colorize(highlight, a[a_start:a_end]))
elif op == 'replace':
# Right and left differ. Colorise with second highlight if
# it's just a case change.
if a[a_start:a_end].lower() != b[b_start:b_end].lower():
color = highlight
else:
color = minor_highlight
a_out.append(colorize(color, a[a_start:a_end]))
b_out.append(colorize(color, b[b_start:b_end]))
else:
assert(False)
return u''.join(a_out), u''.join(b_out)
def colordiff(a, b, highlight='red'):
"""Colorize differences between two values if color is enabled.
(Like _colordiff but conditional.)
"""
if config['color']:
return _colordiff(a, b, highlight)
else:
return unicode(a), unicode(b)
def color_diff_suffix(a, b, highlight='red'):
"""Colorize the differing suffix between two strings."""
a, b = unicode(a), unicode(b)
if not config['color']:
return a, b
# Fast path.
if a == b:
return a, b
# Find the longest common prefix.
first_diff = None
for i in range(min(len(a), len(b))):
if a[i] != b[i]:
first_diff = i
break
else:
first_diff = min(len(a), len(b))
# Colorize from the first difference on.
return a[:first_diff] + colorize(highlight, a[first_diff:]), \
b[:first_diff] + colorize(highlight, b[first_diff:])
def get_path_formats(subview=None):
"""Get the configuration's path formats as a list of query/template
pairs.
"""
path_formats = []
subview = subview or config['paths']
for query, view in subview.items():
query = PF_KEY_QUERIES.get(query, query) # Expand common queries.
path_formats.append((query, Template(view.get(unicode))))
return path_formats
def get_replacements():
"""Confit validation function that reads regex/string pairs.
"""
replacements = []
for pattern, repl in config['replace'].get(dict).items():
repl = repl or ''
try:
replacements.append((re.compile(pattern), repl))
except re.error:
raise UserError(
u'malformed regular expression in replace: {0}'.format(
pattern
)
)
return replacements
def get_plugin_paths():
"""Get the list of search paths for plugins from the config file.
The value for "pluginpath" may be a single string or a list of
strings.
"""
pluginpaths = config['pluginpath'].get()
if isinstance(pluginpaths, basestring):
pluginpaths = [pluginpaths]
if not isinstance(pluginpaths, list):
raise confit.ConfigTypeError(
u'pluginpath must be string or a list of strings'
)
return map(util.normpath, pluginpaths)
def _pick_format(album, fmt=None):
"""Pick a format string for printing Album or Item objects,
falling back to config options and defaults.
"""
if fmt:
return fmt
if album:
return config['list_format_album'].get(unicode)
else:
return config['list_format_item'].get(unicode)
def print_obj(obj, lib, fmt=None):
"""Print an Album or Item object. If `fmt` is specified, use that
format string. Otherwise, use the configured template.
"""
album = isinstance(obj, library.Album)
fmt = _pick_format(album, fmt)
if isinstance(fmt, Template):
template = fmt
else:
template = Template(fmt)
print_(obj.evaluate_template(template))
def term_width():
"""Get the width (columns) of the terminal."""
fallback = config['ui']['terminal_width'].get(int)
# The fcntl and termios modules are not available on non-Unix
# platforms, so we fall back to a constant.
try:
import fcntl
import termios
except ImportError:
return fallback
try:
buf = fcntl.ioctl(0, termios.TIOCGWINSZ, ' '*4)
except IOError:
return fallback
try:
height, width = struct.unpack('hh', buf)
except struct.error:
return fallback
return width
# Subcommand parsing infrastructure.
# This is a fairly generic subcommand parser for optparse. It is
# maintained externally here:
# http://gist.github.com/462717
# There you will also find a better description of the code and a more
# succinct example program.
class Subcommand(object):
"""A subcommand of a root command-line application that may be
invoked by a SubcommandOptionParser.
"""
def __init__(self, name, parser=None, help='', aliases=()):
"""Creates a new subcommand. name is the primary way to invoke
the subcommand; aliases are alternate names. parser is an
OptionParser responsible for parsing the subcommand's options.
help is a short description of the command. If no parser is
given, it defaults to a new, empty OptionParser.
"""
self.name = name
self.parser = parser or optparse.OptionParser()
self.aliases = aliases
self.help = help
class SubcommandsOptionParser(optparse.OptionParser):
"""A variant of OptionParser that parses subcommands and their
arguments.
"""
# A singleton command used to give help on other subcommands.
_HelpSubcommand = Subcommand('help', optparse.OptionParser(),
help='give detailed help on a specific sub-command',
aliases=('?',))
def __init__(self, *args, **kwargs):
"""Create a new subcommand-aware option parser. All of the
options to OptionParser.__init__ are supported in addition
to subcommands, a sequence of Subcommand objects.
"""
# The subcommand array, with the help command included.
self.subcommands = list(kwargs.pop('subcommands', []))
self.subcommands.append(self._HelpSubcommand)
# A more helpful default usage.
if 'usage' not in kwargs:
kwargs['usage'] = """
%prog COMMAND [ARGS...]
%prog help COMMAND"""
# Super constructor.
optparse.OptionParser.__init__(self, *args, **kwargs)
# Adjust the help-visible name of each subcommand.
for subcommand in self.subcommands:
subcommand.parser.prog = '%s %s' % \
(self.get_prog_name(), subcommand.name)
# Our root parser needs to stop on the first unrecognized argument.
self.disable_interspersed_args()
def add_subcommand(self, cmd):
"""Adds a Subcommand object to the parser's list of commands.
"""
self.subcommands.append(cmd)
# Add the list of subcommands to the help message.
def format_help(self, formatter=None):
# Get the original help message, to which we will append.
out = optparse.OptionParser.format_help(self, formatter)
if formatter is None:
formatter = self.formatter
# Subcommands header.
result = ["\n"]
result.append(formatter.format_heading('Commands'))
formatter.indent()
# Generate the display names (including aliases).
# Also determine the help position.
disp_names = []
help_position = 0
for subcommand in self.subcommands:
name = subcommand.name
if subcommand.aliases:
name += ' (%s)' % ', '.join(subcommand.aliases)
disp_names.append(name)
# Set the help position based on the max width.
proposed_help_position = len(name) + formatter.current_indent + 2
if proposed_help_position <= formatter.max_help_position:
help_position = max(help_position, proposed_help_position)
# Add each subcommand to the output.
for subcommand, name in zip(self.subcommands, disp_names):
# Lifted directly from optparse.py.
name_width = help_position - formatter.current_indent - 2
if len(name) > name_width:
name = "%*s%s\n" % (formatter.current_indent, "", name)
indent_first = help_position
else:
name = "%*s%-*s " % (formatter.current_indent, "",
name_width, name)
indent_first = 0
result.append(name)
help_width = formatter.width - help_position
help_lines = textwrap.wrap(subcommand.help, help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (help_position, "", line)
for line in help_lines[1:]])
formatter.dedent()
# Concatenate the original help message with the subcommand
# list.
return out + "".join(result)
def _subcommand_for_name(self, name):
"""Return the subcommand in self.subcommands matching the
given name. The name may either be the name of a subcommand or
an alias. If no subcommand matches, returns None.
"""
for subcommand in self.subcommands:
if name == subcommand.name or \
name in subcommand.aliases:
return subcommand
return None
def parse_args(self, a=None, v=None):
"""Like OptionParser.parse_args, but returns these four items:
- options: the options passed to the root parser
- subcommand: the Subcommand object that was invoked
- suboptions: the options passed to the subcommand parser
- subargs: the positional arguments passed to the subcommand
"""
options, args = optparse.OptionParser.parse_args(self, a, v)
if not args:
# No command given.
self.print_help()
self.exit()
else:
cmdname = args.pop(0)
subcommand = self._subcommand_for_name(cmdname)
if not subcommand:
self.error('unknown command ' + cmdname)
suboptions, subargs = subcommand.parser.parse_args(args)
if subcommand is self._HelpSubcommand:
if subargs:
# particular
cmdname = subargs[0]
helpcommand = self._subcommand_for_name(cmdname)
if not helpcommand:
self.error('no command named {0}'.format(cmdname))
helpcommand.parser.print_help()
self.exit()
else:
# general
self.print_help()
self.exit()
return options, subcommand, suboptions, subargs
optparse.Option.ALWAYS_TYPED_ACTIONS += ('callback',)
def vararg_callback(option, opt_str, value, parser):
"""Callback for an option with variable arguments.
Manually collect arguments right of a callback-action
option (ie. with action="callback"), and add the resulting
list to the destination var.
Usage:
parser.add_option("-c", "--callback", dest="vararg_attr",
action="callback", callback=vararg_callback)
Details:
http://docs.python.org/2/library/optparse.html#callback-example-6-variable-arguments
"""
value = [value]
def floatable(str):
try:
float(str)
return True
except ValueError:
return False
for arg in parser.rargs:
# stop on --foo like options
if arg[:2] == "--" and len(arg) > 2:
break
# stop on -a, but not on -3 or -3.0
if arg[:1] == "-" and len(arg) > 1 and not floatable(arg):
break
value.append(arg)
del parser.rargs[:len(value)]
setattr(parser.values, option.dest, value)
# The root parser and its main function.
def _raw_main(args):
"""A helper function for `main` without top-level exception
handling.
"""
# Temporary: Migrate from 1.0-style configuration.
from beets.ui import migrate
migrate.automigrate()
# Get the default subcommands.
from beets.ui.commands import default_commands
# Add plugin paths.
sys.path += get_plugin_paths()
# Load requested plugins.
plugins.load_plugins(config['plugins'].as_str_seq())
plugins.send("pluginload")
# Construct the root parser.
commands = list(default_commands)
commands += plugins.commands()
commands.append(migrate.migrate_cmd) # Temporary.
parser = SubcommandsOptionParser(subcommands=commands)
parser.add_option('-l', '--library', dest='library',
help='library database file to use')
parser.add_option('-d', '--directory', dest='directory',
help="destination music directory")
parser.add_option('-v', '--verbose', dest='verbose', action='store_true',
help='print debugging information')
# Parse the command-line!
options, subcommand, suboptions, subargs = parser.parse_args(args)
config.set_args(options)
# Open library file.
dbpath = config['library'].as_filename()
try:
lib = library.Library(
dbpath,
config['directory'].as_filename(),
get_path_formats(),
get_replacements(),
)
except sqlite3.OperationalError:
raise UserError(u"database file {0} could not be opened".format(
util.displayable_path(dbpath)
))
plugins.send("library_opened", lib=lib)
# Configure the logger.
if config['verbose'].get(bool):
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
log.debug(u'data directory: {0}\n'
u'library database: {1}\n'
u'library directory: {2}'.format(
util.displayable_path(config.config_dir()),
util.displayable_path(lib.path),
util.displayable_path(lib.directory),
))
# Configure the MusicBrainz API.
mb.configure()
# Invoke the subcommand.
subcommand.func(lib, suboptions, subargs)
plugins.send('cli_exit', lib=lib)
def main(args=None):
"""Run the main command-line interface for beets. Includes top-level
exception handlers that print friendly error messages.
"""
try:
_raw_main(args)
except UserError as exc:
message = exc.args[0] if exc.args else None
log.error(u'error: {0}'.format(message))
sys.exit(1)
except util.HumanReadableException as exc:
exc.log(log)
sys.exit(1)
except confit.ConfigError as exc:
log.error(u'configuration error: {0}'.format(exc))
except IOError as exc:
if exc.errno == errno.EPIPE:
# "Broken pipe". End silently.
pass
else:
raise
except KeyboardInterrupt:
# Silently ignore ^C except in verbose mode.
log.debug(traceback.format_exc())
| {
"content_hash": "2007dbbfbb76d379a8599c54322d40d2",
"timestamp": "",
"source": "github",
"line_count": 814,
"max_line_length": 88,
"avg_line_length": 33.56879606879607,
"alnum_prop": 0.5959377859103385,
"repo_name": "google-code-export/beets",
"id": "488974deec487f6cf05aac1cefeecbc40036d7a0",
"size": "27972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beets/ui/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2643"
},
{
"name": "HTML",
"bytes": "3133"
},
{
"name": "JavaScript",
"bytes": "85314"
},
{
"name": "Python",
"bytes": "936710"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
from datetime import date, datetime, timedelta
def get_sql_engine(db_dict):
import sqlalchemy
connection_str = 'mysql://{0}:{1}@{2}/{3}'\
.format(db_dict['user'], db_dict['password'],
db_dict['host'], db_dict['db'])
return sqlalchemy.create_engine(connection_str+'?charset=utf8')
def monitor_db_today(db_property, dt):
"""Monitor DB's input data today.
Args:
db_property: A dictionary. Example:
{
"your_db": {
"host": "your_host",
"user": "user",
"password": "psw",
"db": "your_db",
"tables": ["table1", "table2"]
}
dt: A string. Shared column for filtering data by date.
Returns:
msg_ls: A list. Messages for DB tables with no today data.
"""
db_table_ls = [(db, table)
for db in db_property
for table in db_property[db]['tables']]
msg_ls = []
for db, table in db_table_ls:
engine = get_sql_engine(db_property[db])
table_df = pd.read_sql_table(table, engine)
today_str = datetime.today().strftime('%Y%m%d')
today_flag = table_df[dt].str.contains(today_str)
today_df = table_df[today_flag]
if len(today_df) == 0:
msg_ls.append(
'DB: {0}, Table: {1}, Rows: {2}'.format(db, table, 0))
return msg_ls
| {
"content_hash": "01a2b5f1b645f0a137a24151f9ff3afe",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 70,
"avg_line_length": 30.64,
"alnum_prop": 0.5502610966057441,
"repo_name": "bowen0701/python_handson",
"id": "90a1021cfe399f5925d6e13067a724ca47343699",
"size": "1532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db_monitor.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "62005"
}
],
"symlink_target": ""
} |
import os
import unittest
from telemetry.internal.results import base_test_results_unittest
from telemetry.internal.results import page_test_results
from telemetry import page as page_module
from telemetry.page import page_set
from telemetry.timeline import trace_data
from telemetry.value import failure
from telemetry.value import histogram
from telemetry.value import scalar
from telemetry.value import skip
from telemetry.value import trace
class PageTestResultsTest(base_test_results_unittest.BaseTestResultsUnittest):
def setUp(self):
ps = page_set.PageSet(base_dir=os.path.dirname(__file__))
ps.AddStory(page_module.Page("http://www.bar.com/", ps, ps.base_dir))
ps.AddStory(page_module.Page("http://www.baz.com/", ps, ps.base_dir))
ps.AddStory(page_module.Page("http://www.foo.com/", ps, ps.base_dir))
self.page_set = ps
@property
def pages(self):
return self.page_set.pages
def testFailures(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(
failure.FailureValue(self.pages[0], self.CreateException()))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.DidRunPage(self.pages[1])
self.assertEqual(set([self.pages[0]]), results.pages_that_failed)
self.assertEqual(set([self.pages[1]]), results.pages_that_succeeded)
self.assertEqual(2, len(results.all_page_runs))
self.assertTrue(results.all_page_runs[0].failed)
self.assertTrue(results.all_page_runs[1].ok)
def testSkips(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(skip.SkipValue(self.pages[0], 'testing reason'))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.DidRunPage(self.pages[1])
self.assertTrue(results.all_page_runs[0].skipped)
self.assertEqual(self.pages[0], results.all_page_runs[0].story)
self.assertEqual(set([self.pages[0], self.pages[1]]),
results.pages_that_succeeded)
self.assertEqual(2, len(results.all_page_runs))
self.assertTrue(results.all_page_runs[0].skipped)
self.assertTrue(results.all_page_runs[1].ok)
def testBasic(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(scalar.ScalarValue(self.pages[1], 'a', 'seconds', 3))
results.DidRunPage(self.pages[1])
results.PrintSummary()
values = results.FindPageSpecificValuesForPage(self.pages[0], 'a')
self.assertEquals(1, len(values))
v = values[0]
self.assertEquals(v.name, 'a')
self.assertEquals(v.page, self.pages[0])
values = results.FindAllPageSpecificValuesNamed('a')
assert len(values) == 2
def testUrlIsInvalidValue(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
self.assertRaises(
AssertionError,
lambda: results.AddValue(scalar.ScalarValue(
self.pages[0], 'url', 'string', 'foo')))
def testAddSummaryValueWithPageSpecified(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
self.assertRaises(
AssertionError,
lambda: results.AddSummaryValue(scalar.ScalarValue(self.pages[0],
'a', 'units', 3)))
def testUnitChange(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
self.assertRaises(
AssertionError,
lambda: results.AddValue(scalar.ScalarValue(
self.pages[1], 'a', 'foobgrobbers', 3)))
def testTypeChange(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
self.assertRaises(
AssertionError,
lambda: results.AddValue(histogram.HistogramValue(
self.pages[1], 'a', 'seconds',
raw_value_json='{"buckets": [{"low": 1, "high": 2, "count": 1}]}')))
def testGetPagesThatSucceededAllPagesFail(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3))
results.AddValue(failure.FailureValue.FromMessage(self.pages[0], 'message'))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(scalar.ScalarValue(self.pages[1], 'a', 'seconds', 7))
results.AddValue(failure.FailureValue.FromMessage(self.pages[1], 'message'))
results.DidRunPage(self.pages[1])
results.PrintSummary()
self.assertEquals(0, len(results.pages_that_succeeded))
def testGetSuccessfulPageValuesMergedNoFailures(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3))
self.assertEquals(1, len(results.all_page_specific_values))
results.DidRunPage(self.pages[0])
def testGetAllValuesForSuccessfulPages(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
value1 = scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3)
results.AddValue(value1)
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
value2 = scalar.ScalarValue(self.pages[1], 'a', 'seconds', 3)
results.AddValue(value2)
results.DidRunPage(self.pages[1])
results.WillRunPage(self.pages[2])
value3 = scalar.ScalarValue(self.pages[2], 'a', 'seconds', 3)
results.AddValue(value3)
results.DidRunPage(self.pages[2])
self.assertEquals(
[value1, value2, value3], results.all_page_specific_values)
def testGetAllValuesForSuccessfulPagesOnePageFails(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
value1 = scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3)
results.AddValue(value1)
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
value2 = failure.FailureValue.FromMessage(self.pages[1], 'Failure')
results.AddValue(value2)
results.DidRunPage(self.pages[1])
results.WillRunPage(self.pages[2])
value3 = scalar.ScalarValue(self.pages[2], 'a', 'seconds', 3)
results.AddValue(value3)
results.DidRunPage(self.pages[2])
self.assertEquals(
[value1, value2, value3], results.all_page_specific_values)
def testTraceValue(self):
results = page_test_results.PageTestResults()
results.WillRunPage(self.pages[0])
results.AddValue(trace.TraceValue(None, trace_data.TraceData({'test' : 1})))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(trace.TraceValue(None, trace_data.TraceData({'test' : 2})))
results.DidRunPage(self.pages[1])
results.PrintSummary()
values = results.FindAllTraceValues()
self.assertEquals(2, len(values))
def testCleanUpCleansUpTraceValues(self):
results = page_test_results.PageTestResults()
v0 = trace.TraceValue(None, trace_data.TraceData({'test': 1}))
v1 = trace.TraceValue(None, trace_data.TraceData({'test': 2}))
results.WillRunPage(self.pages[0])
results.AddValue(v0)
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(v1)
results.DidRunPage(self.pages[1])
results.CleanUp()
self.assertTrue(v0.cleaned_up)
self.assertTrue(v1.cleaned_up)
def testNoTracesLeftAfterCleanUp(self):
results = page_test_results.PageTestResults()
v0 = trace.TraceValue(None, trace_data.TraceData({'test': 1}))
v1 = trace.TraceValue(None, trace_data.TraceData({'test': 2}))
results.WillRunPage(self.pages[0])
results.AddValue(v0)
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(v1)
results.DidRunPage(self.pages[1])
results.CleanUp()
self.assertFalse(results.FindAllTraceValues())
class PageTestResultsFilterTest(unittest.TestCase):
def setUp(self):
ps = page_set.PageSet(base_dir=os.path.dirname(__file__))
ps.AddStory(page_module.Page('http://www.foo.com/', ps, ps.base_dir))
ps.AddStory(page_module.Page('http://www.bar.com/', ps, ps.base_dir))
self.page_set = ps
@property
def pages(self):
return self.page_set.pages
def testFilterValue(self):
def AcceptValueNamed_a(value, _):
return value.name == 'a'
results = page_test_results.PageTestResults(
value_can_be_added_predicate=AcceptValueNamed_a)
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3))
results.AddValue(scalar.ScalarValue(self.pages[0], 'b', 'seconds', 3))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(scalar.ScalarValue(self.pages[1], 'a', 'seconds', 3))
results.AddValue(scalar.ScalarValue(self.pages[1], 'd', 'seconds', 3))
results.DidRunPage(self.pages[1])
results.PrintSummary()
self.assertEquals(
[('a', 'http://www.foo.com/'), ('a', 'http://www.bar.com/')],
[(v.name, v.page.url) for v in results.all_page_specific_values])
def testFilterIsFirstResult(self):
def AcceptSecondValues(_, is_first_result):
return not is_first_result
results = page_test_results.PageTestResults(
value_can_be_added_predicate=AcceptSecondValues)
# First results (filtered out)
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 7))
results.AddValue(scalar.ScalarValue(self.pages[0], 'b', 'seconds', 8))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(scalar.ScalarValue(self.pages[1], 'a', 'seconds', 5))
results.AddValue(scalar.ScalarValue(self.pages[1], 'd', 'seconds', 6))
results.DidRunPage(self.pages[1])
# Second results
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'a', 'seconds', 3))
results.AddValue(scalar.ScalarValue(self.pages[0], 'b', 'seconds', 4))
results.DidRunPage(self.pages[0])
results.WillRunPage(self.pages[1])
results.AddValue(scalar.ScalarValue(self.pages[1], 'a', 'seconds', 1))
results.AddValue(scalar.ScalarValue(self.pages[1], 'd', 'seconds', 2))
results.DidRunPage(self.pages[1])
results.PrintSummary()
expected_values = [
('a', 'http://www.foo.com/', 3),
('b', 'http://www.foo.com/', 4),
('a', 'http://www.bar.com/', 1),
('d', 'http://www.bar.com/', 2)]
actual_values = [(v.name, v.page.url, v.value)
for v in results.all_page_specific_values]
self.assertEquals(expected_values, actual_values)
def testFailureValueCannotBeFiltered(self):
def AcceptValueNamed_a(value, _):
return value.name == 'a'
results = page_test_results.PageTestResults(
value_can_be_added_predicate=AcceptValueNamed_a)
results.WillRunPage(self.pages[0])
results.AddValue(scalar.ScalarValue(self.pages[0], 'b', 'seconds', 8))
failure_value = failure.FailureValue.FromMessage(self.pages[0], 'failure')
results.AddValue(failure_value)
results.DidRunPage(self.pages[0])
results.PrintSummary()
# Although predicate says only accept values named 'a', the failure value is
# added anyway.
self.assertEquals(len(results.all_page_specific_values), 1)
self.assertIn(failure_value, results.all_page_specific_values)
def testSkipValueCannotBeFiltered(self):
def AcceptValueNamed_a(value, _):
return value.name == 'a'
results = page_test_results.PageTestResults(
value_can_be_added_predicate=AcceptValueNamed_a)
results.WillRunPage(self.pages[0])
skip_value = skip.SkipValue(self.pages[0], 'skip for testing')
results.AddValue(scalar.ScalarValue(self.pages[0], 'b', 'seconds', 8))
results.AddValue(skip_value)
results.DidRunPage(self.pages[0])
results.PrintSummary()
# Although predicate says only accept value with named 'a', skip value is
# added anyway.
self.assertEquals(len(results.all_page_specific_values), 1)
self.assertIn(skip_value, results.all_page_specific_values)
| {
"content_hash": "cb86dd7fb765732b88a34d166162a254",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 80,
"avg_line_length": 38.16265060240964,
"alnum_prop": 0.6951854775059195,
"repo_name": "SaschaMester/delicium",
"id": "dcfbb6488950331d4472d9e4482f6ef4f54f1710",
"size": "12833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/internal/results/page_test_results_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "23829"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "4171711"
},
{
"name": "C++",
"bytes": "243066171"
},
{
"name": "CSS",
"bytes": "935112"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27211018"
},
{
"name": "Java",
"bytes": "14285999"
},
{
"name": "JavaScript",
"bytes": "20413885"
},
{
"name": "Makefile",
"bytes": "23496"
},
{
"name": "Objective-C",
"bytes": "1725804"
},
{
"name": "Objective-C++",
"bytes": "9880229"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "178732"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "478406"
},
{
"name": "Python",
"bytes": "8261413"
},
{
"name": "Shell",
"bytes": "482077"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
from asyncio import iscoroutinefunction
from typing import Any, Callable
__all__ = ("VirtualStep",)
class VirtualStep:
def __init__(self, orig_step: Callable[..., None]) -> None:
self._orig_step = orig_step
@property
def name(self) -> str:
return self._orig_step.__name__
def is_coro(self) -> bool:
return iscoroutinefunction(self._orig_step)
def __call__(self, *args: Any, **kwargs: Any) -> Any:
return self._orig_step(*args, **kwargs)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self._orig_step.__name__!r}>"
def __eq__(self, other: Any) -> bool:
return isinstance(other, self.__class__) and (self.__dict__ == other.__dict__)
| {
"content_hash": "dd79df21895c1a329e23dcc1915cfd59",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 86,
"avg_line_length": 29.28,
"alnum_prop": 0.5819672131147541,
"repo_name": "nikitanovosibirsk/vedro",
"id": "6f5893ce9f46511622327e72f288653fc5f653e4",
"size": "732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vedro/core/_virtual_step.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1384"
},
{
"name": "Python",
"bytes": "416588"
}
],
"symlink_target": ""
} |
import sys, os,getopt,shutil
from subprocess import call, Popen, PIPE
def precompile():
mydir = os.getcwd();
os.chdir('../data.common')
if not os.path.isfile('lineNumber100000h2.csv.gz'):
print 'File does not exist, creating.'
sys.path.append(os.getcwd())
amp = __import__('ampFile')
amp.createGzipFile('lineNumber',100000,2)
del amp
del sys.modules['ampFile']
sys.path.remove(os.getcwd())
os.chdir(mydir);
| {
"content_hash": "13f2f574db4bf2d89c8b08e731b2f2de",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 55,
"avg_line_length": 30.3125,
"alnum_prop": 0.6144329896907217,
"repo_name": "hildrum/streamsx.fastcsv",
"id": "cd80ab03dd72c6dc86d827e10ffa0b0c3a6ff9fe",
"size": "507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/source/compressedHeader/scenario.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "7241"
},
{
"name": "Perl",
"bytes": "1063"
},
{
"name": "Python",
"bytes": "15294"
}
],
"symlink_target": ""
} |
import numpy as np
from .base import LinearClassifierMixin, SparseCoefMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..svm.base import BaseLibLinear
class LogisticRegression(BaseLibLinear, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses a one-vs.-all (OvA)
scheme, rather than the "true" multinomial LR.
This class implements L1 and L2 regularized logistic regression using the
`liblinear` library. It can handle both dense and sparse input. Use
C-ordered arrays or CSR matrices containing 64-bit floats for optimal
performance; any other input format will be converted (and copied).
Parameters
----------
penalty : string, 'l1' or 'l2'
Used to specify the norm used in the penalization.
dual : boolean
Dual or primal formulation. Dual formulation is only
implemented for l2 penalty. Prefer dual=False when
n_samples > n_features.
C : float, optional (default=1.0)
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added the decision function.
intercept_scaling : float, default: 1
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
class_weight : {dict, 'auto'}, optional
Over-/undersamples the samples of each class according to the given
weights. If not given, all classes are supposed to have weight one.
The 'auto' mode selects weights inversely proportional to class
frequencies in the training set.
tol: float, optional
Tolerance for stopping criteria.
Attributes
----------
`coef_` : array, shape = [n_classes, n_features]
Coefficient of the features in the decision function.
`coef_` is readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
`intercept_` : array, shape = [n_classes]
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
See also
--------
LinearSVC
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
References:
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None):
super(LogisticRegression, self).__init__(
penalty=penalty, dual=dual, loss='lr', tol=tol, C=C,
fit_intercept=fit_intercept, intercept_scaling=intercept_scaling,
class_weight=class_weight, random_state=random_state)
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
return self._predict_proba_lr(X)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
| {
"content_hash": "70b9b655066de68800f078350e860dfb",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 78,
"avg_line_length": 37.98571428571429,
"alnum_prop": 0.6572019556224145,
"repo_name": "treycausey/scikit-learn",
"id": "e347c37dc4deb45a9bcf697e0f41be33c9845ce0",
"size": "5401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/linear_model/logistic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18150950"
},
{
"name": "C++",
"bytes": "1807769"
},
{
"name": "JavaScript",
"bytes": "20564"
},
{
"name": "Python",
"bytes": "5083789"
},
{
"name": "Shell",
"bytes": "3768"
}
],
"symlink_target": ""
} |
"""A widget to select a file path.
The widget stores previously selected paths in QSettings
Example
-------
def func(t):
print(t)
from cmt.widgets.filepathwidget import FilePathWidget
widget = FilePathWidget(label='My File',
file_mode=FilePathWidget.existing_file,
name='unique_name',
file_filter='Python Files (*.py)')
widget.path_changed.connect(func)
widget.show()
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from PySide2.QtCore import Signal
from PySide2.QtWidgets import (
QWidget,
QHBoxLayout,
QLabel,
QComboBox,
QSizePolicy,
QPushButton,
QDialog,
QFileDialog,
)
from cmt.ui.stringcache import StringCache
class FilePathWidget(QWidget):
"""Widget allowing file path selection with a persistent cache.
Users should connect to the path_changed signal.
"""
any_file = 0
existing_file = 1
directory = 2
path_changed = Signal(str)
def __init__(
self, label=None, file_mode=any_file, file_filter=None, name=None, parent=None
):
"""Constructor
:param label: Optional label text.
:param file_mode: Sets the file dialog mode. One of
FilePathWidget.[any_file|existing_file|directory].
:param file_filter: File filter text example 'Python Files (*.py)'.
:param name: Unique name used to query persistent data.
:param parent: Parent QWidget.
"""
super(FilePathWidget, self).__init__(parent)
self.file_mode = file_mode
if file_filter is None:
file_filter = "Any File (*)"
self.file_filter = file_filter
self.cache = StringCache("cmt.filepathwidget.{}".format(name), parent=self)
self._layout = QHBoxLayout(self)
self._layout.setContentsMargins(0, 0, 0, 0)
self.setLayout(self._layout)
if label:
label = QLabel(label, self)
label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self._layout.addWidget(label)
self._combo_box = QComboBox(self)
self._combo_box.setEditable(True)
self._combo_box.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)
self._combo_box.setInsertPolicy(QComboBox.InsertAtTop)
self._combo_box.setMinimumWidth(50)
self._combo_box.setModel(self.cache)
self._combo_box.editTextChanged.connect(self.edit_changed)
self._layout.addWidget(self._combo_box)
button = QPushButton("Browse", self)
button.released.connect(self.show_dialog)
button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self._layout.addWidget(button)
@property
def path(self):
return self._combo_box.currentText()
@path.setter
def path(self, value):
self._combo_box.setEditText(value)
def edit_changed(self, text):
"""Slot called whenever the text changes in the combobox.
:param text: New text.
"""
if not text:
return
text = text.replace("\\", "/")
if (os.path.isfile(text) and self.file_mode != FilePathWidget.directory) or (
os.path.isdir(text) and self.file_mode == FilePathWidget.directory
):
self.path_changed.emit(text)
self._combo_box.blockSignals(True)
self._push(text)
self._combo_box.blockSignals(False)
def show_dialog(self):
"""Show the file browser dialog."""
dialog = QFileDialog(self)
dialog.setNameFilter(self.file_filter)
file_mode = [
QFileDialog.AnyFile,
QFileDialog.ExistingFile,
QFileDialog.Directory,
][self.file_mode]
dialog.setFileMode(file_mode)
dialog.setModal(True)
if self.cache:
dialog.setHistory(self.cache.stringList())
for value in self.cache.stringList():
if os.path.exists(value):
if os.path.isfile(value):
directory = os.path.dirname(value)
dialog.selectFile(value)
else:
directory = value
dialog.setDirectory(directory)
break
if dialog.exec_() == QDialog.Accepted:
path = dialog.selectedFiles()
if path:
self._push(path[0])
def _push(self, path):
"""Push a new path onto the cache.
:param path: Path value.
"""
self.cache.push(path)
self._combo_box.setCurrentIndex(0)
| {
"content_hash": "794de0ea875e027d29c1991bd4df017a",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 86,
"avg_line_length": 31.06,
"alnum_prop": 0.605494741360807,
"repo_name": "chadmv/cmt",
"id": "c854000d13861c4fb4c8bb2156618c2531949cb8",
"size": "4659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/cmt/ui/widgets/filepathwidget.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "439"
},
{
"name": "C",
"bytes": "1335"
},
{
"name": "C++",
"bytes": "125760"
},
{
"name": "CMake",
"bytes": "1790"
},
{
"name": "Python",
"bytes": "624011"
}
],
"symlink_target": ""
} |
import abc
import collections
import threading
import queue
import os
import time
import types
from nltk import BlanklineTokenizer, PunktSentenceTokenizer, WhitespaceTokenizer
from xml.dom.minidom import Node, parseString
import re
class ComparableMixin(object):
"""
This mixin is meant to make implementing the comparison interface easier without having to clutter up
custom class implementations that would only like to delegate their comparison to comparable a member.
This class is Python3 compatible.
NOTE: This is a slightly modified version of the one suggested by the following blog:
https://regebro.wordpress.com/2010/12/13/python-implementing-rich-comparison-the-correct-way/
"""
def _compare(self, other, method):
self._cmp_checks(other)
try:
return method(self._cmp_key(), other._cmp_key())
except (AttributeError, TypeError):
# _cmpkey not implemented, or return different type,
# so I can't compare with "other".
return NotImplemented
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _cmp_key(self):
"""
Implement the delegation method.
:return: comparable member
"""
raise NotImplementedError()
def _cmp_checks(self, other):
"""
Extra checks that need to be fulfilled in order for the comparison to make sense.
Any custom exceptions thrown here are preserved and propagated in original form.
:param other:
:return:
"""
pass
def __hash__(self):
return object.__hash__(self)
class RingBufferWithCallback(collections.deque):
"""
This class calls a callback when an item is falling out of the buffer due to removal.
On manual removal it does not. That is the user's responsibility.
"""
_callback = None
def __init__(self, iterable=(), maxlen=None, callback=None):
if callback is not None and not callable(callback):
raise ValueError('Callback: {} is not callable'.format(callback))
self._callback = callback
super(RingBufferWithCallback, self).__init__(iterable, maxlen)
def append(self, item):
if len(self) >= self.maxlen:
if self._callback is not None:
self._callback(self.popleft())
super(RingBufferWithCallback, self).append(item)
class StoppableThread(threading.Thread):
"""
Thread class with a stop() method. The thread itself has to check
regularly for the stopped() condition.
"""
def __init__(self, *args, **kwargs):
super(StoppableThread, self).__init__(*args, **kwargs)
self._stopper = threading.Event()
def stop(self):
self._stopper.set()
def stopped(self):
return self._stopper.isSet()
class RotatingFileBufferStopped(Exception):
pass
class RotatingFileBuffer(RingBufferWithCallback):
"""
This class holds the given number of file names and when they are pushed out of the buffer it deletes
them asynchronously. Preferably just the names and not open file handles.
"""
_deletion_thread = None
_deletion_queue = None
def __init__(self, maxlen, asynchronous=True):
super(RotatingFileBuffer, self).__init__(maxlen=maxlen, callback=self.delete_file)
# In this case threads make sense since it is I/O we are going to be waiting for and that is releasing the GIL.
# Deletion is the means for us to send down files for deletion to the other thread(maybe process later)....
self._deletion_queue = queue.Queue()
if asynchronous is True:
self._deletion_thread = StoppableThread(
target=self._delete_thread_loop,
kwargs={'q': self._deletion_queue}
)
# Ensuring the thread will not leave us hanging
self._deletion_thread.daemon = True
self._deletion_thread.start()
@classmethod
def _do_delete(cls, files_waiting):
failed_files = []
# Now we can try to see if we have anything to delete
while files_waiting:
item = files_waiting.pop()
full_path = os.path.abspath(item)
if os.path.exists(item):
# File is still there try to delete it
# If not we do nothing. The loop discards the name
try:
os.remove(full_path)
except IOError:
# Horrible! Quick, put it back... NEXT
failed_files.append(item)
return failed_files
@classmethod
def _do_consume(cls, q, files_waiting, default_wait):
try:
files_waiting.append(q.get(timeout=default_wait))
except queue.Empty:
pass
failed_files = cls._do_delete(files_waiting)
return failed_files
@classmethod
def _delete_thread_loop(cls, q):
files_waiting = []
default_wait = 0.2
while not threading.current_thread().stopped() or not q.empty():
files_waiting = cls._do_consume(q=q, files_waiting=files_waiting, default_wait=default_wait)
time.sleep(0.1)
while files_waiting:
files_waiting = cls._do_delete(files_waiting=files_waiting)
time.sleep(0.1)
def delete_file(self, item):
"""
This function hands the file down to our worker thread to deal with it.
:param item:
:return:
"""
self._deletion_queue.put(item)
if self._deletion_thread is None:
files_waiting = []
default_wait = 0.1
while files_waiting or not self._deletion_queue.empty():
files_waiting = self._do_consume(
q=self._deletion_queue,
files_waiting=files_waiting,
default_wait=default_wait
)
def append(self, item):
"""
This override makes sure that we don't add to an asynchronously managed buffer that is about to be shut down.
:param item: The file name
:return:
"""
if self._deletion_thread is not None:
if self._deletion_thread.stopped():
raise RotatingFileBufferStopped('File deletion thread is stopped!')
super(RotatingFileBuffer, self).append(item)
def tokenize_english_document(input_text):
"""
This is a crude tokenizer for input conversations in English.
:param input_text:
:return:
"""
end_list = []
block_tokenizer = BlanklineTokenizer()
sentence_tokenizer = PunktSentenceTokenizer()
word_tokenizer = WhitespaceTokenizer()
# using the 38 characters in one line rule from ITV subtitle guidelines
characters_per_line = 38
lines_per_subtitle = 2
blocks = block_tokenizer.tokenize(input_text)
for block in blocks:
# We have one speaker
sentences = sentence_tokenizer.tokenize(block)
# We have the sentences
for sentence in sentences:
words = word_tokenizer.tokenize(sentence)
reverse_words = words[::-1]
lines = []
current_line = ''
line_full = False
while reverse_words:
word = reverse_words.pop()
longer_line = ' '.join([current_line, word]).strip()
if len(longer_line) > characters_per_line and len(current_line):
# The longer line is overreaching boundaries
reverse_words.append(word)
line_full = True
elif len(word) >= characters_per_line:
# Very long words
current_line = longer_line
line_full = True
else:
current_line = longer_line
if line_full:
lines.append(current_line)
current_line = ''
line_full = False
if len(lines) >= lines_per_subtitle:
end_list.append(lines)
lines = []
if current_line:
lines.append(current_line)
if lines:
end_list.append(lines)
return end_list
def _assert_asm_is_defined(value, member_name, class_name):
if value in (None, NotImplemented):
raise TypeError(
'Abstract static member: \`{}.{}\` does not match the criteria'.format(
class_name,
member_name
)
)
def validate_types_only(value, member_name, class_name):
if not isinstance(value, tuple):
value = (value,)
for item in value:
if not isinstance(item, type) and item is not ANY:
raise TypeError(
'Abstract static member: \'{}.{}\' is not a type or class'.format(
class_name,
member_name
)
)
class AnyType(object):
"A helper object that compares equal to everything."
def __eq__(self, other):
return True
def __ne__(self, other):
return False
def __repr__(self):
return '<ANY>'
ANY = AnyType()
class AbstractStaticMember(object):
"""
This allows me to require the subclasses to define some attributes using a customizeable
validator. The idea is that all static members should be initialized to a value by the time
abstract functions have all been implemented.
"""
_validation_func = None
def __init__(self, validation_func=None):
if validation_func is None:
self._validation_func = _assert_asm_is_defined
else:
self._validation_func = validation_func
def validate(self, value, member_name, class_name):
self._validation_func(value, member_name, class_name)
class AutoRegisteringABCMeta(abc.ABCMeta):
"""
This metaclass gets us automatic class registration and cooperates with AbstractStaticMember.
If none of the 2 features are needed it just provides the basic abc.ABCMeta functionality.
For the auto registration an abstract class needs to implement the auto_register_impl classmethod.
"""
def __new__(mcls, name, bases, namespace):
cls = super(AutoRegisteringABCMeta, mcls).__new__(mcls, name, bases, namespace)
abstract_members = set(name
for name, value in list(namespace.items())
if isinstance(value, AbstractStaticMember))
abstracts = getattr(cls, "__abstractmethods__", set())
if not abstracts:
# This means the class is not abstract so we should not have any abstract static members
validated_members = set()
for base in bases:
if isinstance(base, mcls):
for base_member in getattr(base, '_abc_static_members', set()):
if base_member in validated_members:
continue
value = getattr(cls, base_member, NotImplemented)
if isinstance(value, AbstractStaticMember) or value is NotImplemented:
abstract_members.add(base_member)
else:
getattr(base, base_member).validate(value, base_member, name)
validated_members.add(base_member)
base.auto_register_impl(cls)
if abstract_members:
raise TypeError('{} must implement abstract static members: [{}]'.format(
name,
', '.join(abstract_members)
))
if namespace.get('auto_register_impl') is None:
cls.auto_register_impl = classmethod(lambda x, y: None)
cls._abc_static_members = frozenset(abstract_members)
cls._abc_interface = '__metaclass__' in list(namespace.keys())
return cls
def __call__(cls, *args, **kwargs):
if cls._abc_interface is True:
raise TypeError('Can\'t instantiate {} is an abstract base class.'.format(cls))
instance = super(AutoRegisteringABCMeta, cls).__call__(*args, **kwargs)
return instance
HTTPProxyConfig = collections.namedtuple('HTTPProxyConfig', ['host', 'port'])
# The following section is taken from https://github.com/django/django/blob/master/django/test/utils.py
# This is a relatively simple XML comparator implementation based on Python's minidom library.
# NOTE: different namespace aliases can break this code. The code superficial on namespaces. It ignores them
# In very rare cases when an element has 2 attributes with the same localName but their namespaces differ
# this implementation might say the document differs. It also avoids attribute sorting by comparing
# and attr_dict that it builds from minidom attributes.
#
# The Django Project is protected by the BSD Licence.
def strip_quotes(want, got):
"""
Strip quotes of doctests output values:
>>> strip_quotes("'foo'")
"foo"
>>> strip_quotes('"foo"')
"foo"
"""
def is_quoted_string(s):
s = s.strip()
return len(s) >= 2 and s[0] == s[-1] and s[0] in ('"', "'")
def is_quoted_unicode(s):
s = s.strip()
return len(s) >= 3 and s[0] == 'u' and s[1] == s[-1] and s[1] in ('"', "'")
if is_quoted_string(want) and is_quoted_string(got):
want = want.strip()[1:-1]
got = got.strip()[1:-1]
elif is_quoted_unicode(want) and is_quoted_unicode(got):
want = want.strip()[2:-1]
got = got.strip()[2:-1]
return want, got
def compare_xml(want, got):
"""Tries to do a 'xml-comparison' of want and got. Plain string
comparison doesn't always work because, for example, attribute
ordering should not be important. Comment nodes are not considered in the
comparison. Leading and trailing whitespace is ignored on both chunks.
Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py
This function is a close but not full implementation of fn:deep-equals.
Possible scenario where this will yield a false positive result is where an element can have 2 arguments with
the same name but different namespaces:
i.e.: <elem ns1:myattr="1" /> != <elem ns2:myattr="1" /> if ns1 != ns2
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(list(element.attributes.items()))
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
for want, got in zip(want_children, got_children):
if not check_element(want, got):
return False
return True
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want, got = strip_quotes(want, got)
want = want.strip().replace('\\n', '\n')
got = got.strip().replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root) | {
"content_hash": "e5ecc5345782c509c0e484b066284035",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 119,
"avg_line_length": 35.692796610169495,
"alnum_prop": 0.6010565679349439,
"repo_name": "bbc/ebu-tt-live-toolkit",
"id": "00b3bc09c832a879cc405ed6a093f0d85faaf6dd",
"size": "16847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ebu_tt_live/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "827"
},
{
"name": "CSS",
"bytes": "1835"
},
{
"name": "Gherkin",
"bytes": "184126"
},
{
"name": "HTML",
"bytes": "16970"
},
{
"name": "JavaScript",
"bytes": "156508"
},
{
"name": "Makefile",
"bytes": "1320"
},
{
"name": "Python",
"bytes": "665429"
}
],
"symlink_target": ""
} |
"""
Test the analysis code for the model chat task.
"""
import glob
import json
import os
import re
from typing import Any, Dict, List
import pytest
from pytest_regressions.file_regression import FileRegressionFixture
import parlai.utils.testing as testing_utils
try:
from parlai.crowdsourcing.tasks.model_chat.analysis.compile_results import (
ModelChatResultsCompiler,
)
from parlai.crowdsourcing.utils.tests import check_stdout
class TestModelChatResultsCompiler(ModelChatResultsCompiler):
def get_task_data(self) -> List[Dict[str, Any]]:
fake_jsons = []
# Load paths
date_strings = sorted(
[
obj
for obj in os.listdir(self.results_folder)
if os.path.isdir(os.path.join(self.results_folder, obj))
and re.fullmatch(r'\d\d\d\d_\d\d_\d\d', obj)
]
)
folders = [os.path.join(self.results_folder, str_) for str_ in date_strings]
for folder in folders:
for file_name in sorted(os.listdir(folder)):
# Read in file
with open(os.path.join(folder, file_name), 'rb') as f:
data = json.load(f)
worker_id = data['workers'][0]
assignment_id = data['assignment_ids'][0]
fake_jsons.append(
{
'data': {'save_data': {'custom_data': data}},
'worker_id': worker_id,
'assignment_id': assignment_id,
'status': 'completed',
}
)
return fake_jsons
class TestCompileResults:
"""
Test the analysis code for the model chat task.
"""
# Dictionary of cases to test, as well as flags to use with those cases
CASES = {'basic': '--problem-buckets None', 'with_personas_and_buckets': ''}
@pytest.fixture(scope="module")
def setup_teardown(self):
"""
Call code to set up and tear down tests.
Run this only once because we'll be running all analysis code before
checking any results.
"""
outputs = {}
for case, flag_string in self.CASES.items():
# Paths
analysis_samples_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'analysis_samples', case
)
analysis_outputs_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'test_model_chat_analysis',
)
outputs[f'{case}__expected_stdout_path'] = os.path.join(
analysis_outputs_folder, f'{case}__test_stdout.txt'
)
prefixes = ['results', 'worker_results']
with testing_utils.tempdir() as tmpdir:
# Run analysis
with testing_utils.capture_output() as output:
arg_string = f"""--output-folder {tmpdir} {flag_string}"""
parser_ = TestModelChatResultsCompiler.setup_args()
args_ = parser_.parse_args(arg_string.split())
compiler = TestModelChatResultsCompiler(vars(args_))
compiler.results_folder = analysis_samples_folder
compiler.compile_and_save_results()
stdout = output.getvalue()
# Define output structure
filtered_stdout = '\n'.join(
[
line
for line in stdout.split('\n')
if not line.endswith('.csv')
]
)
# Don't track lines that record where a file was saved to, because
# filenames are timestamped
outputs[f'{case}__stdout'] = filtered_stdout
for prefix in prefixes:
results_path = list(
glob.glob(os.path.join(tmpdir, f'{prefix}_*'))
)[0]
with open(results_path) as f:
outputs[f'{case}__{prefix}'] = f.read()
yield outputs
# All code after this will be run upon teardown
def test_stdout(self, setup_teardown):
"""
Check the output against what it should be.
"""
outputs = setup_teardown
for case in self.CASES.keys():
check_stdout(
actual_stdout=outputs[f'{case}__stdout'],
expected_stdout_path=outputs[f'{case}__expected_stdout_path'],
)
def test_results_file(
self, setup_teardown, file_regression: FileRegressionFixture
):
"""
Check the results file against what it should be.
We don't use DataFrameRegression fixture because the results might include
non-numeric data.
"""
for case in self.CASES.keys():
prefix = f'{case}__results'
outputs = setup_teardown
file_regression.check(outputs[prefix], basename=prefix)
def test_worker_results_file(
self, setup_teardown, file_regression: FileRegressionFixture
):
"""
Check the worker_results file against what it should be.
We don't use DataFrameRegression fixture because the results might include
non-numeric data.
"""
for case in self.CASES.keys():
prefix = f'{case}__worker_results'
outputs = setup_teardown
file_regression.check(outputs[prefix], basename=prefix)
except ImportError:
pass
| {
"content_hash": "04da2051ee2a517b26e4d27fd2126d6b",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 88,
"avg_line_length": 37.457317073170735,
"alnum_prop": 0.4956861468337946,
"repo_name": "facebookresearch/ParlAI",
"id": "d2d2a1c3a85a886eab6c0fb8ff0dab3983ab4da4",
"size": "6342",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/crowdsourcing/tasks/model_chat/test_model_chat_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
} |
from ._deepgram import Deepgram
| {
"content_hash": "c82e34df62ad7229299a9d992f2d912f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 32,
"alnum_prop": 0.8125,
"repo_name": "crdunwel/deepgram-python-wrapper",
"id": "3e2f101f2c02b72433fd50ee2ac203afb4ec4d25",
"size": "32",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepgram/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2289"
}
],
"symlink_target": ""
} |
"""
Rename status field values
Create Date: 2016-04-22 14:38:04.330718
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = "5599d1769f25"
down_revision = "33459bd8b70d"
TRANSLATION_TABLE = {
"Open": "Not Started",
"Finished": "Ready for Review",
"Final": "Completed"
}
TABLES = ["assessments", "requests"]
def upgrade():
for table in TABLES:
op.execute("""
ALTER TABLE {table} CHANGE status status
ENUM("Open","In Progress","Finished", "Verified", "Final", "Not Started",
"Ready for Review", "Completed") NOT NULL;""".format(
table=table))
for old_value, new_value in TRANSLATION_TABLE.items():
op.execute("""
UPDATE {table} SET status="{new_value}" WHERE status="{old_value}";""".format(
table=table,
new_value=new_value,
old_value=old_value
))
op.execute("""
ALTER TABLE {table} CHANGE status status
ENUM("Not Started", "In Progress", "Ready for Review", "Verified",
"Completed") NOT NULL;""".format(
table=table))
def downgrade():
for table in TABLES:
op.execute("""
ALTER TABLE {table} CHANGE status status
ENUM("Not Started", "In Progress", "Ready for Review", "Verified",
"Completed", "Open", "Finished", "Final") NOT NULL;""".format(
table=table))
for old_value, new_value in TRANSLATION_TABLE.items():
op.execute("""
UPDATE {table} SET status="{new_value}" WHERE status="{old_value}";""".format(
table=table,
new_value=old_value,
old_value=new_value
))
op.execute("""
ALTER TABLE {table} CHANGE status status
ENUM("Open","In Progress","Finished", "Verified", "Final") NOT NULL;""".format(
table=table))
| {
"content_hash": "3074e5d8b449805b243aaf571b237eff",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 27.953846153846154,
"alnum_prop": 0.6461199779856907,
"repo_name": "prasannav7/ggrc-core",
"id": "4f3dde67cf0f9f051661b9493261ae2fa464336b",
"size": "2059",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/ggrc/migrations/versions/20160422143804_5599d1769f25_rename_status_field_values.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "167445"
},
{
"name": "Cucumber",
"bytes": "139629"
},
{
"name": "HTML",
"bytes": "1098331"
},
{
"name": "JavaScript",
"bytes": "1447363"
},
{
"name": "Makefile",
"bytes": "6225"
},
{
"name": "Mako",
"bytes": "2559"
},
{
"name": "Python",
"bytes": "2370461"
},
{
"name": "Shell",
"bytes": "33089"
}
],
"symlink_target": ""
} |
class User:
def __init__(self, _id, username, password):
self.id = _id
self.username = username
self.password = password | {
"content_hash": "2bc12a89240b8622a71aaa0ae6027439",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 48,
"avg_line_length": 29.6,
"alnum_prop": 0.581081081081081,
"repo_name": "ysabel31/Python",
"id": "17acc43870e95b70f043da89141fcf572457ad78",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask-04-RESTful/code/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2376"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "93466"
}
],
"symlink_target": ""
} |
import codecs
import lzma
import argparse
import sys
def main(wordmap,infile,outfile,wordsep):
wordmap = {p.split()[0]: p.strip().split(None,1)[1] for p in wordmap}
wordmap["<s>"] = "<s>"
wordmap["</s>"] = "</s>"
for line in infile:
parts = line.strip().split()
if parts[0] != "<s>":
parts = ["<s>"] + parts
if parts[-1] != "</s>":
parts.append("</s>")
print(wordsep.join((wordmap[w] if w in wordmap else "<UNK>") for w in parts), file=outfile)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('wordmap', type=argparse.FileType('r', encoding='utf-8'))
parser.add_argument('infile', nargs='?', type=argparse.FileType('r', encoding='utf-8'), default=codecs.getreader('utf-8')(sys.stdin.buffer))
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w', encoding='utf-8'), default=codecs.getwriter('utf-8')(sys.stdout.buffer))
parser.add_argument('wordsep', nargs='?', default=" ")
args = parser.parse_args()
main(args.wordmap, args.infile, args.outfile, args.wordsep)
| {
"content_hash": "60903c1e18ccdcf491879c81a06fff40",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 146,
"avg_line_length": 33.029411764705884,
"alnum_prop": 0.6073018699910953,
"repo_name": "psmit/kaldi-recipes",
"id": "1105f1089ada9a2a777a63aceafe86b2994ccb7b",
"size": "1146",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "common/matched_morph_approach_stage3.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "5556"
},
{
"name": "Python",
"bytes": "83019"
},
{
"name": "Shell",
"bytes": "367059"
}
],
"symlink_target": ""
} |
from pyspark.mllib.classification import NaiveBayes
from pyspark.mllib.regression import LabeledPoint
from numpy import array
from pyspark import SparkContext, SparkConf
import sys
import timeit
appName = "DDE"
# Setup Spark configuration
conf = SparkConf().setAppName(appName).setMaster("local")
sc = SparkContext(conf=conf)
def parsePoint(line):
values = [float(x) for x in line.split(',')]
return LabeledPoint(values[0], values[1:])
start = timeit.timeit()
# Read the input data file
data = sc.textFile("../Data/"+sys.argv[1])
# Parse the data to construct a classification dataset
parsedData = data.map(parsePoint)
# Build the classification model
model = NaiveBayes.train(parsedData)
end = timeit.timeit()
print "Training Time = " + str(end-start)
# Evaluating the model on training data
start = timeit.timeit()
labelsAndPreds = parsedData.map(lambda p: (p.label, model.predict(p.features)))
trainErr = labelsAndPreds.filter(lambda (v, p): v != p).count() / float(parsedData.count())
end = timeit.timeit()
print("Training Error = " + str(trainErr))
print "Training Time = " + str(end-start)
| {
"content_hash": "71d61a1b7d94f886c32fa332bc6eac3d",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 91,
"avg_line_length": 25.431818181818183,
"alnum_prop": 0.7363717605004468,
"repo_name": "monkeyGoCrazy/cloudComputing",
"id": "0e478b7c9ec5b7af9cafdd1f8ed3192ac45d0775",
"size": "1119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/machinelearning/NaiveBayes.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "252351"
},
{
"name": "HTML",
"bytes": "3022149"
},
{
"name": "JavaScript",
"bytes": "8137376"
},
{
"name": "Python",
"bytes": "19318"
}
],
"symlink_target": ""
} |
import yaml
def format_assignment_yaml(content):
return '---\n' + yaml.safe_dump(content, default_flow_style=False)
| {
"content_hash": "f36943c52577d1c31846a0d67732cd82",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 70,
"avg_line_length": 24.4,
"alnum_prop": 0.7131147540983607,
"repo_name": "StoDevX/cs251-toolkit",
"id": "18f4406086583b471a59b2f6a19b670433d2d1ea",
"size": "122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cs251tk/formatters/yaml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3299"
},
{
"name": "Dockerfile",
"bytes": "1946"
},
{
"name": "Makefile",
"bytes": "180"
},
{
"name": "Python",
"bytes": "132033"
},
{
"name": "Ruby",
"bytes": "1867"
},
{
"name": "Shell",
"bytes": "4723"
}
],
"symlink_target": ""
} |
"""Here is defined the EArray class."""
import numpy
from tables.utils import convert_to_np_atom2, SizeType
from tables.carray import CArray
from tables._past import previous_api, previous_api_property
# default version for EARRAY objects
# obversion = "1.0" # initial version
# obversion = "1.1" # support for complex datatypes
# obversion = "1.2" # This adds support for time datatypes.
# obversion = "1.3" # This adds support for enumerated datatypes.
obversion = "1.4" # Numeric and numarray flavors are gone.
class EArray(CArray):
"""This class represents extendable, homogeneous datasets in an HDF5 file.
The main difference between an EArray and a CArray (see
:ref:`CArrayClassDescr`), from which it inherits, is that the former
can be enlarged along one of its dimensions, the *enlargeable
dimension*. That means that the :attr:`Leaf.extdim` attribute (see
:class:`Leaf`) of any EArray instance will always be non-negative.
Multiple enlargeable dimensions might be supported in the future.
New rows can be added to the end of an enlargeable array by using the
:meth:`EArray.append` method.
Parameters
----------
parentnode
The parent :class:`Group` object.
.. versionchanged:: 3.0
Renamed from *parentNode* to *parentnode*.
name : str
The name of this node in its parent group.
atom
An `Atom` instance representing the *type* and *shape*
of the atomic objects to be saved.
shape
The shape of the new array. One (and only one) of
the shape dimensions *must* be 0. The dimension being 0
means that the resulting `EArray` object can be extended
along it. Multiple enlargeable dimensions are not supported
right now.
title
A description for this node (it sets the ``TITLE``
HDF5 attribute on disk).
filters
An instance of the `Filters` class that provides information
about the desired I/O filters to be applied during the life
of this object.
expectedrows
A user estimate about the number of row elements that will
be added to the growable dimension in the `EArray` node.
If not provided, the default value is ``EXPECTED_ROWS_EARRAY``
(see ``tables/parameters.py``). If you plan to create either
a much smaller or a much bigger `EArray` try providing a guess;
this will optimize the HDF5 B-Tree creation and management
process time and the amount of memory used.
chunkshape
The shape of the data chunk to be read or written in a single
HDF5 I/O operation. Filters are applied to those chunks of data.
The dimensionality of `chunkshape` must be the same as that of
`shape` (beware: no dimension should be 0 this time!).
If ``None``, a sensible value is calculated based on the
`expectedrows` parameter (which is recommended).
byteorder
The byteorder of the data *on disk*, specified as 'little' or
'big'. If this is not specified, the byteorder is that of the
platform.
Examples
--------
See below a small example of the use of the `EArray` class. The
code is available in ``examples/earray1.py``::
import tables
import numpy
fileh = tables.open_file('earray1.h5', mode='w')
a = tables.StringAtom(itemsize=8)
# Use ``a`` as the object type for the enlargeable array.
array_c = fileh.create_earray(fileh.root, 'array_c', a, (0,),
\"Chars\")
array_c.append(numpy.array(['a'*2, 'b'*4], dtype='S8'))
array_c.append(numpy.array(['a'*6, 'b'*8, 'c'*10], dtype='S8'))
# Read the string ``EArray`` we have created on disk.
for s in array_c:
print('array_c[%s] => %r' % (array_c.nrow, s))
# Close the file.
fileh.close()
The output for the previous script is something like::
array_c[0] => 'aa'
array_c[1] => 'bbbb'
array_c[2] => 'aaaaaa'
array_c[3] => 'bbbbbbbb'
array_c[4] => 'cccccccc'
"""
# Class identifier.
_c_classid = 'EARRAY'
_c_classId = previous_api_property('_c_classid')
# Special methods
# ~~~~~~~~~~~~~~~
def __init__(self, parentnode, name,
atom=None, shape=None, title="",
filters=None, expectedrows=None,
chunkshape=None, byteorder=None,
_log=True):
# Specific of EArray
if expectedrows is None:
expectedrows = parentnode._v_file.params['EXPECTED_ROWS_EARRAY']
self._v_expectedrows = expectedrows
"""The expected number of rows to be stored in the array."""
# Call the parent (CArray) init code
super(EArray, self).__init__(parentnode, name, atom, shape, title,
filters, chunkshape, byteorder, _log)
# Public and private methods
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
def _g_create(self):
"""Create a new array in file (specific part)."""
# Pre-conditions and extdim computation
zerodims = numpy.sum(numpy.array(self.shape) == 0)
if zerodims > 0:
if zerodims == 1:
self.extdim = list(self.shape).index(0)
else:
raise NotImplementedError(
"Multiple enlargeable (0-)dimensions are not "
"supported.")
else:
raise ValueError(
"When creating EArrays, you need to set one of "
"the dimensions of the Atom instance to zero.")
# Finish the common part of the creation process
return self._g_create_common(self._v_expectedrows)
def _check_shape_append(self, nparr):
"Test that nparr shape is consistent with underlying EArray."
# The arrays conforms self expandibility?
myrank = len(self.shape)
narank = len(nparr.shape) - len(self.atom.shape)
if myrank != narank:
raise ValueError(("the ranks of the appended object (%d) and the "
"``%s`` EArray (%d) differ")
% (narank, self._v_pathname, myrank))
for i in range(myrank):
if i != self.extdim and self.shape[i] != nparr.shape[i]:
raise ValueError(("the shapes of the appended object and the "
"``%s`` EArray differ in non-enlargeable "
"dimension %d") % (self._v_pathname, i))
_checkShapeAppend = previous_api(_check_shape_append)
def append(self, sequence):
"""Add a sequence of data to the end of the dataset.
The sequence must have the same type as the array; otherwise a
TypeError is raised. In the same way, the dimensions of the
sequence must conform to the shape of the array, that is, all
dimensions must match, with the exception of the enlargeable
dimension, which can be of any length (even 0!). If the shape
of the sequence is invalid, a ValueError is raised.
"""
self._g_check_open()
self._v_file._check_writable()
# Convert the sequence into a NumPy object
nparr = convert_to_np_atom2(sequence, self.atom)
# Check if it has a consistent shape with underlying EArray
self._check_shape_append(nparr)
# If the size of the nparr is zero, don't do anything else
if nparr.size > 0:
self._append(nparr)
def _g_copy_with_stats(self, group, name, start, stop, step,
title, filters, chunkshape, _log, **kwargs):
"""Private part of Leaf.copy() for each kind of leaf."""
(start, stop, step) = self._process_range_read(start, stop, step)
# Build the new EArray object
maindim = self.maindim
shape = list(self.shape)
shape[maindim] = 0
# The number of final rows
nrows = len(xrange(0, stop - start, step))
# Build the new EArray object
object = EArray(
group, name, atom=self.atom, shape=shape, title=title,
filters=filters, expectedrows=nrows, chunkshape=chunkshape,
_log=_log)
# Now, fill the new earray with values from source
nrowsinbuf = self.nrowsinbuf
# The slices parameter for self.__getitem__
slices = [slice(0, dim, 1) for dim in self.shape]
# This is a hack to prevent doing unnecessary conversions
# when copying buffers
self._v_convert = False
# Start the copy itself
for start2 in xrange(start, stop, step * nrowsinbuf):
# Save the records on disk
stop2 = start2 + step * nrowsinbuf
if stop2 > stop:
stop2 = stop
# Set the proper slice in the extensible dimension
slices[maindim] = slice(start2, stop2, step)
object._append(self.__getitem__(tuple(slices)))
# Active the conversion again (default)
self._v_convert = True
nbytes = numpy.prod(self.shape, dtype=SizeType) * self.atom.itemsize
return (object, nbytes)
_g_copyWithStats = previous_api(_g_copy_with_stats)
## Local Variables:
## mode: python
## py-indent-offset: 4
## tab-width: 4
## fill-column: 72
## End:
| {
"content_hash": "313c58a32890cf12e6ea8ef9cff36838",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 78,
"avg_line_length": 37.963855421686745,
"alnum_prop": 0.6004443033957474,
"repo_name": "tp199911/PyTables",
"id": "82b6e987673d7139d145cf1bb1613477330a5c54",
"size": "9729",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tables/earray.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "865539"
},
{
"name": "C++",
"bytes": "97380"
},
{
"name": "CMake",
"bytes": "18981"
},
{
"name": "Gnuplot",
"bytes": "2104"
},
{
"name": "Makefile",
"bytes": "4159"
},
{
"name": "Objective-C",
"bytes": "31966"
},
{
"name": "Python",
"bytes": "3444753"
},
{
"name": "Shell",
"bytes": "18147"
}
],
"symlink_target": ""
} |
"""
Django settings for django-example project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_yqep=)2912$-ra55l^5!p*31i(0^3&7lcyb8_5vb9x#7l%cu+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'settings.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'settings.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "116e669ee56cbf39cd26c8d1e8c269e3",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 71,
"avg_line_length": 25.980392156862745,
"alnum_prop": 0.6916981132075471,
"repo_name": "madron/django-example",
"id": "19903519cc05ef23a2f768d796540783b87335b3",
"size": "2650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Nginx",
"bytes": "1510"
},
{
"name": "Python",
"bytes": "4111"
},
{
"name": "Shell",
"bytes": "372"
}
],
"symlink_target": ""
} |
'''
Created on Apr 3, 2014
@author: tangliuxiang
'''
from SmaliEntry import SmaliEntry
class SmaliClass(SmaliEntry):
'''
classdocs
'''
def getSimpleString(self):
return "%s %s" %(self.getType(), self.getClassName())
| {
"content_hash": "66954da044ecbb3bf79dd807050833e9",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 62,
"avg_line_length": 16.6,
"alnum_prop": 0.6265060240963856,
"repo_name": "HackerTool/tools",
"id": "8188e6fb6badfd896579a2e32ad290ed0fa48c2b",
"size": "249",
"binary": false,
"copies": "3",
"ref": "refs/heads/lollipop-5.0",
"path": "smaliparser/SmaliClass.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "16339"
},
{
"name": "Python",
"bytes": "366775"
},
{
"name": "Shell",
"bytes": "86584"
}
],
"symlink_target": ""
} |
import pygame
from core import addVectors
from math import pi as Pi, sin, cos, hypot, atan2
from surface import screen, width, height
#-- PARTICLE DEFINITION ---------------------------->>>
class Particle:
'''
All you need to is a coordinate tuple and a radius to instantiate.
e.g. particle = Particl((50, 50), 10)
'''
def __init__(self, (x, y), radius):
self.x = x
self.y = y
self.radius = radius
self.color = (255, 0, 0)
self.thickness = 2
self.speed = 1
self.angle = 0 #EAST
self.x_border = width - self.radius
self.y_border = height - self.radius
#-- constants --------------------------->>>
self.gravity = (3*Pi/2, -0.4)
self.drag = 0.9999
self.elasticity = 0.5
def move(self):
'''
A particle.angle of Pi/2, orients vertically at a given speed.
Use the unit circle for all other motions:
right => 0 rad.
left => Pi rad.
up => Pi/2 rad.
down => 3 * Pi/2 rad. ...
and all the angles betwixt
'''
self.x += cos(self.angle) * self.speed
self.y += sin(self.angle) * self.speed
(self.angle, self.speed) = addVectors((self.angle, self.speed), self.gravity)
self.speed *= self.drag
def bounce(self):
'''
The collision border is width|height - radius.
Displacement beyond the border is e.g. dx = x - x_border.
If position is beyond the border, displacement is positive...
To accomplish the "bounce", we essentially subtract displacement from border.
This effectively restores the position to the boundary.
Also, the angle needs to be inverted.
'''
if self.x > self.x_border:
self.x = 2 * (self.x_border) - self.x
self.angle = Pi - self.angle
self.speed *= self.elasticity
elif self.x < self.radius:
self.x = 2 * (self.radius) - self.x
self.angle = Pi - self.angle
self.speed *= self.elasticity
if self.y > self.y_border:
self.y = 2 * (self.y_border) - self.y
self.angle = - self.angle
self.speed *= self.elasticity
elif self.y < self.radius:
self.y = 2 * (self.radius) - self.y
self.angle = - self.angle
self.speed *= self.elasticity
def display(self, screen):
'''
Renders a particle on a `screen`.
'''
pygame.draw.circle(screen, self.color, (int(self.x), int(self.y)), self.radius, self.thickness)
| {
"content_hash": "cb36f6be115b5d334be8b2aeea0e7224",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 103,
"avg_line_length": 33.743589743589745,
"alnum_prop": 0.540273556231003,
"repo_name": "withtwoemms/pygame-explorations",
"id": "82c7110887f1572929f177ee2de731cd1bb57ced",
"size": "2632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "particle-physics/game_objects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28922"
}
],
"symlink_target": ""
} |
"""A model representing configuration config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from google.appengine.api import memcache
from google.appengine.ext import ndb
from loaner.web_app.backend.lib import utils
_CONFIG_NOT_FOUND_MSG = 'No such name "%s" exists in default configurations.'
class Config(ndb.Model):
"""Datastore model representing a config name.
The default values are stored in the config_defaults.yaml file. A config name
has an option to store a value of type string, integer, boolean, or list as
its value. The same config name can not have a value of multiple types.
Attributes:
string_value: str, value for a given config name.
integer_value: int, value for a given config name.
bool_value: bool, value for a given config name.
list_value: list, value for a given config name.
"""
string_value = ndb.StringProperty()
integer_value = ndb.IntegerProperty()
bool_value = ndb.BooleanProperty()
list_value = ndb.StringProperty(repeated=True)
@classmethod
def get(cls, name):
"""Checks memcache for name, if not available, check datastore.
Args:
name: str, name of config name.
Returns:
The config value from memcache, datastore, or config file.
Raises:
KeyError: An error occurred when name does not exist.
"""
memcache_config = memcache.get(name)
cached_config = None
if memcache_config:
return memcache_config
stored_config = cls.get_by_id(name, use_memcache=False)
if stored_config:
if stored_config.string_value:
cached_config = stored_config.string_value
elif stored_config.integer_value:
cached_config = stored_config.integer_value
elif stored_config.bool_value is not None:
cached_config = stored_config.bool_value
elif stored_config.list_value:
cached_config = stored_config.list_value
# Conversion from use_asset_tags to device_identifier_mode.
if name == 'device_identifier_mode' and not cached_config:
if cls.get('use_asset_tags'):
cached_config = DeviceIdentifierMode.BOTH_REQUIRED
cls.set(name, cached_config)
memcache.set(name, cached_config)
if cached_config is not None:
memcache.set(name, cached_config)
return cached_config
config_defaults = utils.load_config_from_yaml()
if name in config_defaults:
value = config_defaults[name]
cls.set(name, value)
return value
raise KeyError(_CONFIG_NOT_FOUND_MSG, name)
@classmethod
def set(cls, name, value, validate=True):
"""Stores values for a config name in memcache and datastore.
Args:
name: str, name of the config setting.
value: str, int, bool, list value to set or change config setting.
validate: bool, checks keys against config_defaults if enabled.
Raises:
KeyError: Error raised when name does not exist in config.py file.
"""
if validate:
config_defaults = utils.load_config_from_yaml()
if name not in config_defaults:
raise KeyError(_CONFIG_NOT_FOUND_MSG % name)
if isinstance(value, six.string_types):
stored_config = cls.get_or_insert(name)
stored_config.string_value = value
stored_config.put()
if isinstance(value, bool) and isinstance(value, int):
stored_config = cls.get_or_insert(name)
stored_config.bool_value = value
stored_config.put()
if isinstance(value, int) and not isinstance(value, bool):
stored_config = cls.get_or_insert(name)
stored_config.integer_value = value
stored_config.put()
if isinstance(value, list):
stored_config = cls.get_or_insert(name)
stored_config.list_value = value
stored_config.put()
memcache.set(name, value)
class DeviceIdentifierMode(object):
"""Constants defining supported means of identifying devices."""
ASSET_TAG = 'asset_tag'
SERIAL_NUMBER = 'serial_number'
BOTH_REQUIRED = 'both_required'
| {
"content_hash": "5941ee6cc26f2675288b8f32ea714b74",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 79,
"avg_line_length": 33.94957983193277,
"alnum_prop": 0.6915841584158415,
"repo_name": "google/loaner",
"id": "8df1a27fb56245a5e1a955ae55a56c12b955f91f",
"size": "4637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loaner/web_app/backend/models/config_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18441"
},
{
"name": "HTML",
"bytes": "95191"
},
{
"name": "JavaScript",
"bytes": "18378"
},
{
"name": "Python",
"bytes": "1039799"
},
{
"name": "Shell",
"bytes": "10190"
},
{
"name": "Starlark",
"bytes": "91293"
},
{
"name": "TypeScript",
"bytes": "851896"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import setup
setup(
name='pyddp',
version='0.4.0',
description='Distributed Data Protocol (DDP)',
author='Peter Sutton',
author_email='foxxy@foxdogstudios.com',
url='https://github.com/foxdog-studios/pyddp',
license='Apache License v2.0',
packages=[
'ddp',
'ddp.messages',
'ddp.messages.client',
'ddp.messages.server',
'ddp.pod',
'ddp.pubsub',
],
package_data={
'': ['LICENSE.txt'],
},
install_requires=[
'autobahn[asyncio,accelerate]',
'trollius==0.3',
],
test_suite='tests',
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| {
"content_hash": "9f4d270b9d5d0b05391507b98fb11dc2",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 71,
"avg_line_length": 26.58139534883721,
"alnum_prop": 0.5870516185476815,
"repo_name": "foxdog-studios/pyddp",
"id": "ea55b4825398d32f3987043514a61a85df1d9421",
"size": "1765",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "109"
},
{
"name": "Python",
"bytes": "182982"
},
{
"name": "Shell",
"bytes": "3484"
}
],
"symlink_target": ""
} |
import shelve
import random
import textwrap
from time import sleep, time
from collections import namedtuple
from bearlibterminal import terminal as term
import spaceship.strings as strings
from spaceship.scene import Scene
from spaceship.screen_functions import *
import spaceship.tools as tools
import spaceship.action as actions
from spaceship.gamelog import GameLogger
from spaceship.classes.item import Item, Potion, sort
from spaceship.classes.wild import wilderness
from spaceship.classes.player import Player
from spaceship.classes.world import World
from spaceship.classes.city import City
from spaceship.classes.cave import Cave
from spaceship.classes.point import Point, spaces
enter_maps = {
'cave': Cave,
'city': City,
'wild': wilderness
}
class Level: GLOBAL, WORLD, LOCAL = -1, 0, 1
class Maps: CITY, CAVE, WILD, WORLD = range(4)
class Start(Scene):
def __init__(self, sid='start_game'):
super().__init__(scene_id=sid)
def reset(self):
self.log = []
self.turns = 0
self.world = None
self.player = None
self.location = None
self.waiting = False
self.turn_inc = False
self.do_action = False
self.map_change = False
self.gamelog = None
self.reset_size()
def setup(self):
# self.reset()
self.actions = {
0: {
# '@': self.draw_screens,
'q': self.draw_screens,
'v': self.draw_screens,
'S': self.action_save,
'>': self.action_enter_map, # Done
},
1: {
# '@': self.draw_screens,
'q': self.draw_screens,
'v': self.draw_screens,
'<': self.action_stairs_up, # done
'>': self.action_stairs_down, # done
'c': self.action_door_close, # done
'o': self.action_door_open, # done
',': self.action_item_pickup,
'd': self.action_item_drop,
'u': self.action_item_use,
'e': self.action_item_eat,
't': self.action_unit_talk,
'T': self.actions_ranged,
'z': self.actions_ranged,
'l': self.actions_ranged,
's': self.draw_spells,
},
}
# player screen variables
self.row_spacing = 2 if self.height > 25 else 1
self.screen_col, self.screen_row = 1, 3
self.status_col, self.status_row = 0, 1
self.main_x, self.main_y = 14, 0
self.log_width, self.log_height = self.width, 2
self.main_width = self.width - self.main_x
self.main_height = self.height - self.log_height
def run(self):
# self.reset_size()
self.reset()
if isinstance(self.ret['kwargs']['player'], Player):
self.player = self.ret['kwargs']['player']
self.world = self.ret['kwargs']['world']
self.turns = self.ret['kwargs']['turns']
else:
player = self.ret['kwargs']['player']
name = self.ret['kwargs']['name']
world_map_path = strings.IMG_PATH + "worldmap.png"
self.player = Player(player, name)
self.location = self.world = World(map_name="Calabston",
map_link=world_map_path)
self.location.units_add([self.player])
self.gamelog = GameLogger(
width=self.main_width,
screenlinelimit=3 if self.height <= 25 else 4,
footer="_" + self.player.name + "_" + self.player.job)
while self.proceed and self.player.is_alive:
# term.clear()
self.draw()
term.refresh()
# self.location.process()
self.process_units()
if self.map_change:
self.change_map_location()
# term.delay (1000 // 75)
self.proceed = True
if hasattr(self, 'ret'):
return self.ret
def get_input(self):
'''Handles input reading and parsing unrecognized keys'''
key = term.read()
if key in (term.TK_SHIFT, term.TK_CONTROL, term.TK_ALT):
# skip any non-action keys
key = term.read()
shifted = term.state(term.TK_SHIFT)
return key, shifted
def key_input(self):
'''Handles keyboard input and keypress transformation
Cases:
Skips any pre-inputs and non-read keys
if key read is a close command -- close early or set proceed to false
Elif key is valid command return the command from command list with continue
Else return invalid action tuple with continue value
'''
action = tuple(None for _ in range(4))
key, shifted = self.get_input()
if key in (term.TK_ESCAPE, term.TK_CLOSE):
# exit command -- maybe need a back to menu screen?
if shifted:
exit('Early Exit')
elif self.player.height >= Level.WORLD:
self.draw_log('Escape key disabled.')
else:
self.ret['scene'] = 'main_menu'
self.proceed = False
try:
# discover the command and set as current action
action = actions.commands_player[(key, shifted)]
except KeyError:
pass
return action
def process_units(self):
# for unit in self.location.units:
# self.unit = unit
# self.process_turn()
if isinstance(self.location, World):
for unit in self.location.units:
self.unit = unit
self.process_turn()
elif len(list(self.location.units)) == 1:
self.unit = self.player
self.process_turn()
else:
# for unit in self.location.units:
# self.unit = unit
# self.process_turn()
for self.unit in self.location.units:
self.unit.energy.gain()
if any(u.energy.ready() for u in self.location.units):
# for self.unit in self.location.units:
# if self.unit == self.player and not self.unit.energy.ready():
# break
for self.unit in self.location.units:
for _ in range(self.unit.energy.turns):
# print(self.unit.unit_id, self.unit.race)
self.unit.energy.reset()
self.process_turn()
# else:
# for unit in self.location.units:
# unit.energy.gain()
# for unit in self.location.units:
# self.unit = unit
# for turn in range(self.unit.energy.turns):
# self.unit.energy.reset()
# self.process_turn()
if self.turn_inc:
self.turns += 1
self.turn_inc = False
# if isinstance(self.location, World):
# self.process_turn_player()
# else:
# for unit in self.location.units:
# self.unit = unit
# if self.unit.energy.ready():
# self.unit.energy.reset()
# self.process_turn_unit()
# else:
# self.unit.energy.gain()
# if self.player.energy.ready():
# self.process_turn_player()
# else:
# self.player.energy.gain()
def process_turn(self):
if isinstance(self.unit, Player):
self.process_turn_player()
else:
self.process_turn_unit()
def process(self):
action = None
if isinstance(self.unit, player):
action = self.key_input()
else:
if hasattr(self.unit, 'acts'):
units = {u.local: u for u in self.location.units if u != self.units}
positions = self.location.fov_calc_blocks(*self.unit.local,
self.unit.sight_norm)
tiles = {position: self.location.square(*position) for position in positions}
action = self.unit.acts(units, tiles)
if not self.player.is_alive:
self.proceed = False
return
if not self.proceed:
return
def process_turn_player(self):
action = self.key_input()
if not self.proceed:
return
self.process_handler(*action)
def process_turn_unit(self):
if hasattr(self.unit, 'acts'):
units = {u.local: u for u in self.location.units if u != self.unit}
# subset of positions possible that can be seen due to sight
positions = self.location.fov_calc_blocks(*self.unit.local,
self.unit.sight_norm)
# units = {self.location.unit_at(*position).position:
# self.location.unit_at(*position)
# for position in positions if self.location.unit_at(*position) }
# if self.player not in units.values():
# return
# tile info for every position that can be seen
tiles = { position: self.location.square(*position)
for position in positions }
# get the action variable after putting in
# all the info into unit.act
action = self.unit.acts(units, tiles)
if action:
self.process_handler_unit(*action)
if not self.player.is_alive:
self.process = False
return
def process_handler_unit(self, x, y, k, key):
if k is not None:
pass
elif all(z is not None for z in [x, y]):
self.process_movement_unit(x, y)
else:
return 'skipped-turn'
def process_handler(self, x, y, k, key,):
'''Checks actions linearly by case:
(1) processes non-movement action
Actions not in movement groupings
(2) processes movement action
Keyboard shortcut action grouping
(3) If action teplate is empty:
Return skip-action command
'''
if k is not None:
self.process_action(k)
elif all(z is not None for z in [x, y]):
self.process_movement(x, y)
else:
return 'skipped-turn'
def process_action(self, action):
'''
Player class should return a height method and position method
Position method should return position based on height
So height would be independent and position would be depenedent on height
'''
try:
divided = self.actions[max(0, min(self.player.height, 1))]
try:
self.unit, self.location, self.log = divided[action]()
except TypeError:
divided[action](action)
except KeyError:
raise
invalid_command = strings.cmd_invalid.format(action)
self.log.append(invalid_command)
except KeyError:
raise
invalid_command = strings.cmd_invalid.format(action)
self.log.append(invalid_command)
def process_move_unit_to_empty(self, x, y):
occupied_player = self.player.local == (x, y)
occupied_unit = self.location.occupied(x, y)
if not occupied_player and not occupied_unit:
self.unit.local = Point(x, y)
return None
return occupied_player, occupied_unit
def process_movement_unit(self, x, y):
if (x, y) != (0, 0):
point = self.unit.local + (x, y)
if self.location.walkable(*point):
unit_bools = self.process_move_unit_to_empty(*point)
if not unit_bools:
return
else:
occupied_player, occupied_unit = unit_bools
if occupied_unit:
unit = self.location.unit_at(*point)
else:
unit = self.player
player = isinstance(unit, Player)
safe_location = isinstance(self.location, City)
friendly_unit = unit.friendly(self.unit)
if safe_location or friendly_unit:
self.unit.displace(unit)
unit.energy.reset()
# log = strings.movement_unit_displace.format(
# self.unit.__class__.__name__,
# unit.race if not player else "you")
# self.log.append(log)
else:
chance = self.unit.calculate_attack_chance()
if chance == 0:
pass
log = "The {} tries attacking {} but misses".format(
self.unit.race,
"you" if player else "the " + unit.race)
self.log.append(log)
else:
damage = self.unit.calculate_attack_damage()
if chance == 2:
damage *= 2
unit.cur_hp -= damage
# if self.location.check_light_level(*point):
# term.layer(1)
# term.puts(
# *(point + (self.main_x, self.main_y)),
# '[c=red]*[/c]')
# term.refresh()
# term.clear_area(*(point + (self.main_x, self.main_y)),
# 1, 1)
# term.layer(0)
log = "The {} attacks {} for {} damage".format(
self.unit.race,
"you" if player else "the " + unit.race,
damage)
self.log.append(log)
if not unit.is_alive:
log = "The {} has killed {}!".format(
self.unit.race,
"you" if player else "the " + unit.race)
self.log.append(log)
# if player:
# exit('DEAD')
item = None
if hasattr(unit, 'drops'):
item = unit.drops()
if item:
self.location.item_add(*unit.local, item)
self.log.append("The {} has dropped {}".format(
unit.race, item.name))
self.location.unit_remove(unit)
def process_movement(self, x, y):
# moving on world map
if self.player.height == Level.WORLD:
if (x, y) == (0, 0):
self.log.append(strings.movement_wait_world)
else:
point = self.player.world + (x, y)
if self.location.walkable(*point):
self.player.save_location()
self.player.travel(x, y)
else:
self.log.append(strings.movement_move_error)
# moving on local map
else:
if (x, y) == (0, 0):
self.log.append(strings.movement_wait_local)
self.turn_inc = True
else:
point = self.player.local + (x, y)
if self.location.walkable(*point):
if not self.location.occupied(*point):
self.player.move(x, y)
msg_chance = random.randint(0, 5)
if self.location.items_at(*point) and msg_chance:
item_message = random.randint(
a=0,
b=len(strings.pass_by_item) - 1)
self.log.append(strings.pass_by_item[item_message])
else:
unit = self.location.unit_at(*point)
safe_location = isinstance(self.location, City)
friendly_unit = unit.friendly(self.unit)
if safe_location or friendly_unit:
self.player.displace(unit)
unit.energy.reset()
switch = "You switch places with the {}.".format(
unit.__class__.__name__.lower())
self.log.append(switch)
else:
chance = self.player.calculate_attack_chance()
if chance == 0:
log = f"You miss the {unit.race}."
self.log.append(log)
else:
damage = self.player.calculate_attack_damage()
# if chance returns crit ie. a value of 2
# then multiply damage by 2
if chance == 2:
damage *= 2
unit.cur_hp -= damage
# if self.location.check_light_level(*point):
# term.puts(
# *(point + (self.main_x, self.main_y)),
# '[c=red]*[/c]')
# term.refresh()
log = "You{}attack the {} for {} damage. ".format(
" crit and " if chance == 2 else " ",
unit.race,
damage)
self.log.append(log)
if unit.cur_hp < 1:
self.log.append("You have killed the {}! ".format(unit.race))
self.log.append("You gain {} exp.".format(unit.xp))
self.player.gain_exp(unit.xp)
if self.player.check_exp():
log = "You level up. You are now level {}.".format(self.player.level)
self.log.append(log)
self.log.append("You feel much stronger now.")
item = unit.drops()
if item:
self.location.item_add(*unit.local, item)
self.log.append("The {} has dropped {}.".format(unit.race,
item.name))
self.location.unit_remove(unit)
else:
log += "The {} has {} health left.".format(
unit.race,
max(0, unit.cur_hp))
self.log.append(log)
self.turn_inc = True
else:
'''
moving outside of current map
moving on top level (one level below world) then
try to move into the new map if it is not water
'''
if self.location.out_of_bounds(*point):
self.log.append(strings.movement_move_oob)
else:
ch = self.location.square(*point).char
if ch == "~":
log = strings.movement_move_swim
else:
log = strings.movement_move_block.format(
strings.movement_move_chars[ch])
self.log.append(log)
def draw(self):
self.draw_log(refresh=False)
self.draw_status()
self.draw_world()
def draw_log(self, log=None, color="white", refresh=False):
self.gamelog.draw(log if log else " ".join(self.log), color, refresh)
if self.log:
self.log = []
def draw_world(self):
'''Handles drawing of world features and map'''
point = self.player.local if self.player.height >= 1 \
else self.player.world
g0 = isinstance(self.location, World)
if g0:
sight = self.player.sight_world
elif isinstance(self.location, City):
sight = self.player.sight_city
else:
sight = self.player.sight_norm
self.location.fov_calc([(*point, sight)])
for (x, y), string in self.location.output(*point):
term.puts(x=x + self.main_x,
y=y + self.main_y,
s=string)
# sets the location name at the bottom of the status bar
if g0:
location = None
if self.player.world in self.world.cities.keys():
location = self.world.cities[self.player.world]
elif self.player.world in self.world.dungeons.keys():
location = self.world.dungeons[self.player.world]
if location:
self.draw_screen_header(location)
def draw_screen_header(self, header=None):
'''Draws a line across the top of the window'''
term.bkcolor('dark grey')
term.puts(self.main_x, 0, ' ' * (self.width - self.main_x))
term.bkcolor('black')
if header:
string = surround(header)
term.puts(center(string, self.width + self.main_x), 0, string)
def draw_status(self):
'''Handles player status screen'''
term.puts(self.status_col,
self.status_row,
strings.status.format(*self.player.status(), self.turns))
def clear_status(self):
term.clear_area(0, 0, self.width - self.main_width, self.height)
def draw_profile(self):
'''Handles player profile screen'''
# draws header border
term.puts(i, 0, '#' * self.width)
term.puts(center('profile ', self.width), 0, ' Profile ')
for colnum, column in enumerate(list(self.player.profile())):
term.puts(x=self.screen_col + (20 * colnum),
y=self.screen_row,
s=column)
def clear_main(self):
term.clear_area(self.main_x, 0,
self.width - self.main_x,
self.height - self.log_height - 1)
def clear_item_box(self):
term.clear_area(
self.width // 2,
2,
self.width // 2,
self.height - 5)
def draw_equipment(self):
'''Handles equipment screen'''
self.draw_screen_header('Equipment')
equipment = list(self.player.equipment)
for index, (part, item) in enumerate(equipment):
if item:
item = item.__str__()
else:
item = ""
body = ". {:<10}: ".format(
part.replace("eq_", "").replace("_", " "))
letter = chr(ord('a') + index)
term.puts(
x=self.screen_col + self.main_x,
y=self.screen_row + index * self.row_spacing,
s=letter + body + item)
def draw_item_grouping(self, group, items, index, item):
'''Handler to determine if we need to draw items or not'''
if items:
if group not in 'food others'.split():
group = list(group + 's')
group[0] = group[0].upper()
group = "".join(group)
term.puts(x=self.screen_col + self.main_x,
y=self.screen_row + index * self.row_spacing,
s=" __" + group + "__")
index += 1
for i in items:
letter = chr(ord('a') + item) + ". "
term.puts(x=self.screen_col + self.main_x,
y=self.screen_row + index * self.row_spacing,
s=letter + i.__str__())
index += 1
item += 1
return index + 1, item
return index, item
def draw_inventory(self, items, index, row, string=strings.cmd_inv_none):
'''Handles drawing of the inventory screen along with the specific
groupings of each item type and their modification effects
'''
self.draw_screen_header('Inventory')
if not items:
string_pos = center(string, self.main_width)
term.puts(string_pos + self.main_x, 3, string)
else:
for group, items in list(sort(items).items()):
index, row = self.draw_item_grouping(group, items, index, row)
return index, row
def draw_pickup(self, items, index, row):
'''Handles drawing of the items located on the ground along with
specific groupings of each item type and their modification effects
'''
self.draw_screen_header('Pickup Items')
for group, items in sort(items).items():
index, row = self.draw_item_grouping(group, items, index, row)
return index, row
def draw_screen_log(self, log):
strings = wrap(log, self.main_width)
for index, string in enumerate(strings):
term.puts(x=center(string, self.main_width) + self.main_x,
y=self.height + index - 5,
s=string)
def clear_screen_log(self):
term.clear_area(self.main_x, self.height - 5, self.main_width, 2)
def draw_item_border(self):
term.bkcolor('dark grey')
for y in (2, self.height - 5):
term.puts(
x=self.width // 2 + 1,
y=y,
s=' ' * (self.width // 2 - 2))
for x in (self.width // 2 + 1, self.width - 2):
for y in range(self.height - 7):
term.puts(x, y + 2, ' ')
term.bkcolor('black')
def draw_spells(self):
spells = [
('Fireball', "Cast a fireball at target area and deal aoe damage?"),
('Frost Bolt', "Cast a freezing missile at target and hit all enemies in its path?"),
('Lightning', "Cast a lightning bolt at target area and deal aoe damage?"),
]
log = ""
selected = None
update_status = False
spell_index = 0
self.clear_main()
self.draw_screen_header('Spells')
self.clear_screen_log()
self.draw_screen_log("What would you like to do?")
while True:
if log:
self.clear_screen_log()
self.draw_screen_log(log)
log = ""
for index, spell in enumerate(spells):
letter = chr(ord('a') + index) + '. '
term.puts(
x=self.screen_col + self.main_x,
y=self.screen_row + index * self.row_spacing,
s=letter + spell[0]
)
term.refresh()
code = term.read()
if code == term.TK_ESCAPE:
break
elif term.TK_A <= code < term.TK_A + len(spells):
selected = code - term.TK_A
log = spells[selected][1]
elif selected is not None and code in (term.TK_Y, term.TK_ENTER, selected + term.TK_A):
self.log.append("You cast {}".format(spells[selected][0]))
break
def draw_screens(self, key):
def unequip_item(code):
nonlocal log, update_status
try:
string = strings.cmd_unequip_confirm.format(item.name)
except AttributeError:
string = strings.cmd_unequip_confirm.format(item)
self.clear_screen_log()
self.draw_screen_log(string)
term.refresh()
confirm = term.read()
if confirm in (term.TK_Y, term.TK_ENTER, code):
self.player.unequip(part)
try:
log = strings.cmd_unequip.format(item.name)
except AttributeError:
log = strings.cmd_unequip.format(item)
update_status = True
else:
self.clear_screen_log()
def equip_item(part):
nonlocal log, update_status
if part in ('hand_left', 'hand_right'):
if self.player.holding_two_handed_weapon():
_, li = next(self.player.item_on('hand_left'))
_, ri = next(self.player.item_on('hand_right'))
print(li, ri)
log = strings.cmd_equip_two_hand.format(
part,
li if li else ri,
'left hand' if li else 'right hand',)
return
items = list(self.player.inventory_type(part))
if not items:
log = strings.cmd_equip_none
else:
while True:
self.clear_main()
self.draw_inventory(items)
self.clear_screen_log()
if log:
self.draw_screen_log(log)
else:
self.draw_screen_log(strings.cmd_equip_query)
term.refresh()
selection = term.read()
if selection == term.TK_ESCAPE:
self.clear_screen_log()
break
elif term.TK_A <= selection < term.TK_A + len(items):
item = items[selection - term.TK_A]
self.player.equip(part, item)
log = strings.cmd_equip.format(item)
update_status = True
break
else:
log = strings.cmd_equip_invalid
log = ""
current_screen = key
update_status = False
items = [item for _, inv in self.player.inventory for item in inv]
self.clear_main()
if current_screen == "q":
self.draw_equipment()
self.draw_screen_log(strings.cmd_switch_eq)
elif current_screen == "v":
self.draw_inventory(items)
self.clear_screen_log()
self.draw_screen_log(strings.cmd_switch_iv)
while True:
if log:
self.draw_log(log)
log = ""
if update_status:
self.clear_main()
self.clear_status()
self.draw_status()
update_status = False
if current_screen == "q":
self.draw_equipment()
self.draw_screen_log(strings.cmd_switch_eq)
elif current_screen == "v":
items = [item for _, inv in self.player.inventory
for item in inv]
self.draw_inventory(items)
self.draw_screen_log(strings.cmd_switch_iv)
term.refresh()
code = term.read()
if code in (term.TK_ESCAPE,):
break
elif code == term.TK_Q:
current_screen = 'q'
update_status = True
# log = ""
elif code == term.TK_V:
# V goes to inventory screen
current_screen = 'v'
update_status = True
elif current_screen == 'q' and term.TK_A <= code <= term.TK_L:
part, item = next(self.player.item_on(code - 4))
if item:
unequip_item(code)
else:
equip_item(part)
elif current_screen == 'v':
if term.TK_A <= code < term.TK_A + len(items):
item = items[code - 4]
self.clear_screen_log()
self.draw_screen_log(strings.cmd_inv_funcs.format(item))
# while True:
# term.refresh()
# selection = term.read()
# if selection == term.TK_U:
# if self.player.item_eat(item):
# self.draw_screen_log(
# strings.cmd_use_item.format(item))
# break
# else:
# self.draw_screen_log(
# strings.cmd_cannot_use_item)
#
# elif selection == term.TK_D:
# elif selection == term.TK_E:
# if self.player.item_eat(item):
# self.draw_screen_log(
# strings.cmd_eat_item.format(item))
# break
# else:
# self.draw_screen_log(
# strings.cmd_cannot_eat_item)
#
# elif selection == term.TK_Q:
# self.draw_screen_log('Equip it using the other way')
# else:
# self.draw_screen_log("Invalid instruction.")
# term.refresh()
# break
#
# self.draw_screen_log(strings.cmd_switch_iv)
else:
log = ""
# elif code == term.TK_2 and term.state(term.TK_SHIFT):
# @ goes to profile
# current_screen = '@'
# elif code == term.TK_UP:
# if current_range > 0: current_range -= 1
# elif code == term.TK_DOWN:
# if current_range < 10: current_range += 1
# term.clear()
def action_save(self):
'''Save command: checks save folder and saves the current game objects
to file before going back to the main menu
'''
self.draw_log(strings.cmd_save)
# User input -- confirm selection
code = term.read()
if code != term.TK_Y:
return
if not os.path.isdir('saves'):
os.makedirs('saves')
self.draw_log(strings.cmd_save_folder)
# prepare strings for file writing
# hash used for same name / different character saves
desc = self.player.desc
file_path = './spaceship/saves/{}'.format(desc)
with shelve.open(file_path, 'n') as save_file:
save_file['desc'] = desc
save_file['player'] = self.player
save_file['world'] = self.world
save_file['turns'] = self.turns
self.proceed = False
self.ret['scene'] = 'main_menu'
self.reset()
def change_map_location(self):
'''Given player coordinates, determines player current location and
adds player object to that location
'''
if self.player.height == Level.WORLD:
self.location = self.world
else:
self.location = self.world.location(*self.player.world)
if self.player.height > 1:
for i in range(self.player.height - 1):
self.location = self.location.sublevel
self.map_change = False
def action_enter_map(self):
return actions.enter_map(self.player, self.location, enter_maps)
def action_stairs_down(self):
return actions.go_down_stairs(self.player, self.location, Cave)
def action_stairs_up(self):
return actions.go_up_stairs(self.player, self.location, Maps)
def action_door_close(self):
return actions.close_door(self.player, self.location, self.draw_log)
def action_door_open(self):
return actions.open_door(self.player, self.location, self.draw_log)
def action_unit_talk(self):
return actions.converse(self.player, self.location, self.draw_log)
def action_item_pickup(self):
return actions.pickup_item(self.player,
self.location,
self.clear_main,
self.draw_pickup,
self.draw_log)
def action_item_drop(self):
return actions.drop_item(self.player,
self.location,
self.clear_main,
self.draw_inventory,
self.draw_log,
self.draw_screen_log)
def action_item_use(self):
return actions.use_item(self.player,
self.area,
self.clear_main,
self.draw_inventory,
self.draw_screen_log,
self.draw_status)
# def use_item(item):
# nonlocal log
# self.player.item_use(item)
# if hasattr(item, 'name'):
# item_name = item.name
# else:
# item_name = item
# log = strings.cmd_use_item.format(item_name)
# log = ""
# items = list(self.player.inventory_prop('use'))
# while True:
# self.clear_main()
# self.draw_inventory(items, strings.cmd_use_none)
# if items:
# self.draw_screen_log(strings.cmd_use_query)
# else:
# self.draw_screen_log(strings.cmd_use_none)
# if log:
# self.draw_screen_log(log)
# log = ""
# term.refresh()
# code = term.read()
# if code == term.TK_ESCAPE:
# break
# elif term.TK_A <= code < term.TK_A + len(items):
# use_item(items[code - 4])
# items = list(self.player.inventory_prop('use'))
# self.draw_status()
def action_item_eat(self):
def eat_item(item):
nonlocal log
self.player.item_eat(item)
if hasattr(item, 'name'):
item_name = item.name
else:
item_name = item
log = strings.cmd_eat_item.format(item_name)
log = ""
items = list(self.player.inventory_prop('eat'))
while True:
self.clear_main()
self.draw_inventory(items, strings.cmd_use_none)
if items:
self.draw_screen_log(strings.cmd_eat_query)
else:
self.draw_screen_log(strings.cmd_eat_none)
if log:
self.draw_log(log)
log = ""
term.refresh()
code = term.read()
if code == term.TK_ESCAPE:
break
elif term.TK_A <= code < term.TK_A + len(items):
eat_item(items[code - 4])
items = list(self.player.inventory_prop('eat'))
self.draw_status()
def actions_ranged(self, key):
def look():
nonlocal position, code, shifted
action = actions.commands_player[(code, shifted)]
new_pos = position + (action.x, action.y)
in_bounds = self.player.local.distance(new_pos) < sight
lighted = self.location.check_light_level(*new_pos) > 0
char, color = '', 'white'
if in_bounds and lighted:
if position == self.player.local:
char, color = '@', 'white'
else:
unit = self.location.unit_at(*position)
if unit:
char, color = unit.character, unit.foreground
square = self.location.square(*position)
if not char and square.items:
item = square.items[-1]
char, color = item.char, item.color
if not char and not square.items:
char, color = square.char, square.color
term.clear_area(*(position + (self.main_x, self.main_y)), 1, 1)
position = new_pos
def throw():
nonlocal position
points = tools.bresenhams(self.player.local, position)
term.layer(1)
for point in points:
translate = Point(self.main_x, self.main_y) + point
symbol = term.pick(*translate)
color = term.pick_color(*translate)
term.composition(False)
term.puts(*translate, "[c=red]/[/c]")
term.composition(True)
term.refresh()
term.clear_area(*translate, 1, 1)
term.refresh()
def zap():
nonlocal position
points = tools.bresenhams(self.player.local, position)
for point in points[1:]:
unit = self.location.unit_at(*point)
if unit:
unit.cur_hp -= 10
self.log.append(f"You zap the {unit.race} with lightning.")
if not unit.is_alive:
item = unit.drops()
self.location.unit_remove(unit)
self.log.append(f"The {unit.race} dies from shock.")
if item:
self.location.item_add(*point, item)
item_name = item.name if hasattr(item, "name") else item
self.log.append(f"The {unit.race} drops {item_name}.")
translate = Point(self.main_x, self.main_y) + point
term.puts(*translate, "[c=yellow]-[/c]")
term.refresh()
code, shifted = None, None
position, color, char = self.player.local, 'white', 'x'
proceed = True
if key == "T":
color = "red"
elif key == "z":
color = "yellow"
if (self.location, City):
sight = self.player.sight_city
else:
sight = self.player.sight_norm
term.layer(1)
# term.composition(False)
while proceed:
term.puts(*(position + (self.main_x, self.main_y)),
f'[c={color}]x[/c]')
term.refresh()
code = term.read()
shifted = term.state(term.TK_SHIFT)
if code == term.TK_ESCAPE:
proceed = False
elif term.TK_RIGHT <= code <= term.TK_UP:
look()
elif term.TK_KP_1 <= code <= term.TK_KP_9:
look()
elif code == term.TK_ENTER:
if key == "l":
self.draw_log("You look at the spot")
elif key == "T":
throw()
self.draw_log("You throw something")
proceed = False
else:
zap()
proceed = False
elif code == term.TK_T and key == "T":
throw()
self.draw_log("You throw something")
proceed = False
elif code == term.TK_Z and key == "z":
zap()
self.draw_log("You zap something")
proceed = False
# term.composition(True)
term.clear_area(self.main_x,
self.main_y,
self.width - self.main_x,
self.height - self.main_y)
term.layer(0)
print(self.log)
if __name__ == "__main__":
from .make import Create
term.open()
c = Create()
ret = c.run()
ret['kwargs']['name'] = 'grey'
print(ret)
s = Start()
s.add_args(**ret['kwargs'])
s.run() | {
"content_hash": "251be27750b0478ab51561dfc5c51507",
"timestamp": "",
"source": "github",
"line_count": 1252,
"max_line_length": 109,
"avg_line_length": 36.3370607028754,
"alnum_prop": 0.4568514529388491,
"repo_name": "whitegreyblack/Spaceship",
"id": "b158cb30e708401ab0b90507dc0cab654e542745",
"size": "45494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spaceship/game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1872"
},
{
"name": "Python",
"bytes": "398867"
}
],
"symlink_target": ""
} |
from mayavi import mlab
from BDSpace.Coordinates import Cartesian
from BDSpace.Curve.Parametric import Line
import BDSpaceVis as Visual
coordinate_system = Cartesian()
fig = mlab.figure('CS demo', bgcolor=(0, 0, 0))
line = Line(name='Line', coordinate_system=coordinate_system, a=1, b=2, c=0, start=0, stop=1)
line_visual = Visual.CurveView(fig, line, color=(1, 0, 0), thickness=None)
line_visual.draw()
line_visual.set_thickness(line_visual.thickness / 2)
mlab.show()
| {
"content_hash": "6e99757f2f9ec92440273d74d9a8af6d",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 93,
"avg_line_length": 36.23076923076923,
"alnum_prop": 0.7494692144373672,
"repo_name": "bond-anton/Space",
"id": "3c11a12bbe1b989121b542cd98d8c5f872c9729e",
"size": "471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/07_lines_demo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "72731"
}
],
"symlink_target": ""
} |
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_blank05.xlsx')
self.ignore_elements = {'xl/drawings/drawing1.xml': ['<xdr:ext']}
def test_create_file(self):
"""Test the worksheet properties of an XlsxWriter chartsheet file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chartsheet = workbook.add_chartsheet()
chart = workbook.add_chart({'type': 'line'})
chart.axis_ids = [57619968, 57621504]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.show_blanks_as('span')
chartsheet.set_chart(chart)
workbook.close()
self.assertExcelEqual()
| {
"content_hash": "21eb4f709df87eb3cb2f98e5f41fc663",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 77,
"avg_line_length": 25.84,
"alnum_prop": 0.5820433436532507,
"repo_name": "jmcnamara/XlsxWriter",
"id": "344f183fa7125ecf8dea2f21162d41513c1756a7",
"size": "1505",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "xlsxwriter/test/comparison/test_chart_blank05.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7748"
},
{
"name": "Perl",
"bytes": "3503"
},
{
"name": "Python",
"bytes": "2807230"
},
{
"name": "Shell",
"bytes": "7964"
}
],
"symlink_target": ""
} |
"""
FVCproductions
Print your name out using the print function.
The print function is called through print().
You will also need quotation marks around your name.
"""
name = "FVCproductions"
major = "Computer Science"
print (name, major) | {
"content_hash": "0ac6f1d4569776f2584aa37523dad161",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 53,
"avg_line_length": 15.75,
"alnum_prop": 0.7222222222222222,
"repo_name": "fvcproductions/BITS",
"id": "15550f67746147ab38a09c1259df4f9c548128e1",
"size": "252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/1.3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7861"
},
{
"name": "C++",
"bytes": "5703"
},
{
"name": "HTML",
"bytes": "42158"
},
{
"name": "Java",
"bytes": "18953"
},
{
"name": "JavaScript",
"bytes": "1921"
},
{
"name": "PHP",
"bytes": "6079"
},
{
"name": "Python",
"bytes": "14721"
}
],
"symlink_target": ""
} |
import argparse
import base64
import json
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
from keras.models import load_model
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
# Fix error with Keras and TensorFlow
import tensorflow as tf
tf.python.control_flow_ops = tf
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)[60:,:,:]
transformed_image_array = image_array[None, :, :, :]
# This model currently assumes that the features of the model are just the images. Feel free to change this.
steering_angle = float(model.predict(transformed_image_array, batch_size=1))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
throttle = 0.2
print(steering_angle, throttle)
send_control(steering_angle, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition h5. Model should be on the same path.')
args = parser.parse_args()
model = load_model(args.model)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app)
| {
"content_hash": "c4c3b62c6d95997c70ccb002efc96c52",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 112,
"avg_line_length": 29.7027027027027,
"alnum_prop": 0.7056414922656961,
"repo_name": "brianz/udacity-sdc-p3",
"id": "a80f21ee84d5257b5b013f5509de92f0ac807157",
"size": "2198",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "248"
},
{
"name": "Python",
"bytes": "8762"
}
],
"symlink_target": ""
} |
"""
flask_security.views
~~~~~~~~~~~~~~~~~~~~
Flask-Security views module
:copyright: (c) 2012 by Matt Wright.
:license: MIT, see LICENSE for more details.
"""
from flask import Blueprint, after_this_request, current_app, redirect, request
from flask_login import current_user
from werkzeug.local import LocalProxy
from .changeable import change_user_password
from .confirmable import confirm_email_token_status, confirm_user, \
send_confirmation_instructions
from .decorators import anonymous_user_required, login_required
from .recoverable import reset_password_token_status, \
send_reset_password_instructions, update_password
from .registerable import register_user
from .utils import config_value, do_flash, get_message, \
get_post_login_redirect, get_post_logout_redirect, \
get_post_register_redirect, get_url, login_user, logout_user, \
slash_url_suffix
from .utils import url_for_security as url_for
# Convenient references
_security = LocalProxy(lambda: current_app.extensions['security'])
_datastore = LocalProxy(lambda: _security.datastore)
def _commit(response=None):
_datastore.commit()
return response
def _ctx(endpoint):
return _security._run_ctx_processor(endpoint)
@anonymous_user_required
def login():
"""View function for login view"""
form_class = _security.login_form
form = form_class(request.form)
if form.validate_on_submit():
login_user(form.user)
after_this_request(_commit)
return redirect(get_post_login_redirect(form.next.data))
return _security.render_template(config_value('LOGIN_USER_TEMPLATE'),
login_user_form=form,
**_ctx('login'))
def logout():
"""View function which handles a logout request."""
if current_user.is_authenticated:
logout_user()
return redirect(get_post_logout_redirect())
@anonymous_user_required
def register():
"""View function which handles a registration request."""
if _security.confirmable:
form_class = _security.confirm_register_form
else:
form_class = _security.register_form
form = form_class(request.form)
if form.validate_on_submit():
user = register_user(**form.to_dict())
form.user = user
if not _security.confirmable or _security.login_without_confirmation:
after_this_request(_commit)
login_user(user)
if not request.is_json:
if 'next' in form:
redirect_url = get_post_register_redirect(form.next.data)
else:
redirect_url = get_post_register_redirect()
return redirect(redirect_url)
return _security.render_template(config_value('REGISTER_USER_TEMPLATE'),
register_user_form=form,
**_ctx('register'))
def send_confirmation():
"""View function which sends confirmation instructions."""
form_class = _security.send_confirmation_form
form = form_class()
if form.validate_on_submit():
send_confirmation_instructions(form.user)
do_flash(*get_message('CONFIRMATION_REQUEST',
email=form.user.email))
return _security.render_template(
config_value('SEND_CONFIRMATION_TEMPLATE'),
send_confirmation_form=form,
**_ctx('send_confirmation')
)
def confirm_email(token):
"""View function which handles a email confirmation request."""
expired, invalid, user = confirm_email_token_status(token)
if not user or invalid:
invalid = True
do_flash(*get_message('INVALID_CONFIRMATION_TOKEN'))
already_confirmed = user is not None and user.confirmed_at is not None
if expired and not already_confirmed:
send_confirmation_instructions(user)
do_flash(*get_message('CONFIRMATION_EXPIRED', email=user.email,
within=_security.confirm_email_within))
if invalid or (expired and not already_confirmed):
return redirect(get_url(_security.confirm_error_view) or
url_for('send_confirmation'))
if user != current_user:
logout_user()
login_user(user)
if confirm_user(user):
after_this_request(_commit)
msg = 'EMAIL_CONFIRMED'
else:
msg = 'ALREADY_CONFIRMED'
do_flash(*get_message(msg))
return redirect(get_url(_security.post_confirm_view) or
get_url(_security.post_login_view))
@anonymous_user_required
def forgot_password():
"""View function that handles a forgotten password request."""
form_class = _security.forgot_password_form
form = form_class()
if form.validate_on_submit():
send_reset_password_instructions(form.user)
do_flash(*get_message('PASSWORD_RESET_REQUEST',
email=form.user.email))
return _security.render_template(config_value('FORGOT_PASSWORD_TEMPLATE'),
forgot_password_form=form,
**_ctx('forgot_password'))
@anonymous_user_required
def reset_password(token):
"""View function that handles a reset password request."""
expired, invalid, user = reset_password_token_status(token)
if not user or invalid:
invalid = True
do_flash(*get_message('INVALID_RESET_PASSWORD_TOKEN'))
if expired:
send_reset_password_instructions(user)
do_flash(*get_message('PASSWORD_RESET_EXPIRED', email=user.email,
within=_security.reset_password_within))
if invalid or expired:
return redirect(url_for('forgot_password'))
form = _security.reset_password_form()
form.user = user
if form.validate_on_submit():
after_this_request(_commit)
update_password(user, form.password.data)
do_flash(*get_message('PASSWORD_RESET'))
login_user(user)
return redirect(get_url(_security.post_reset_view) or
get_url(_security.post_login_view))
return _security.render_template(
config_value('RESET_PASSWORD_TEMPLATE'),
reset_password_form=form,
reset_password_token=token,
**_ctx('reset_password')
)
@login_required
def change_password():
"""View function which handles a change password request."""
form_class = _security.change_password_form
form = form_class()
if form.validate_on_submit():
after_this_request(_commit)
change_user_password(current_user._get_current_object(),
form.new_password.data)
do_flash(*get_message('PASSWORD_CHANGE'))
return redirect(get_url(_security.post_change_view) or
get_url(_security.post_login_view))
return _security.render_template(
config_value('CHANGE_PASSWORD_TEMPLATE'),
change_password_form=form,
**_ctx('change_password')
)
def create_blueprint(state, import_name):
"""Creates the security extension blueprint"""
bp = Blueprint(state.blueprint_name, import_name,
url_prefix=state.url_prefix,
subdomain=state.subdomain,
template_folder='templates')
bp.route(state.logout_url, endpoint='logout')(logout)
bp.route(state.login_url,
methods=['GET', 'POST'],
endpoint='login')(login)
if state.registerable:
bp.route(state.register_url,
methods=['GET', 'POST'],
endpoint='register')(register)
if state.recoverable:
bp.route(state.reset_url,
methods=['GET', 'POST'],
endpoint='forgot_password')(forgot_password)
bp.route(state.reset_url + slash_url_suffix(state.reset_url,
'<token>'),
methods=['GET', 'POST'],
endpoint='reset_password')(reset_password)
if state.changeable:
bp.route(state.change_url,
methods=['GET', 'POST'],
endpoint='change_password')(change_password)
if state.confirmable:
bp.route(state.confirm_url,
methods=['GET', 'POST'],
endpoint='send_confirmation')(send_confirmation)
bp.route(state.confirm_url + slash_url_suffix(state.confirm_url,
'<token>'),
methods=['GET', 'POST'],
endpoint='confirm_email')(confirm_email)
return bp
| {
"content_hash": "83322fa545b51848f22d90e195b55f38",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 79,
"avg_line_length": 31.14801444043321,
"alnum_prop": 0.613467779323134,
"repo_name": "inveniosoftware/flask-security-fork",
"id": "6ba8ada5f7ba789f052c0239cc35cca0ebfea968",
"size": "8652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask_security/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7385"
},
{
"name": "Python",
"bytes": "148805"
},
{
"name": "Shell",
"bytes": "475"
}
],
"symlink_target": ""
} |
import unittest
from conans.test.utils.tools import TestClient
class BasicTest(unittest.TestCase):
def help_test(self):
conan = TestClient()
conan.run("")
self.assertIn('Conan commands. Type $conan "command" -h', conan.user_io.out)
| {
"content_hash": "537b5e2930cc89eaf769f21aaaab3e30",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 84,
"avg_line_length": 26.3,
"alnum_prop": 0.6768060836501901,
"repo_name": "mropert/conan",
"id": "79f06fae072132960dc0df461295e8362a522193",
"size": "263",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/command/help_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "653"
},
{
"name": "Python",
"bytes": "1898890"
},
{
"name": "Shell",
"bytes": "1342"
}
],
"symlink_target": ""
} |
import sys, math, re, argparse
from xml.dom import minidom
from multiprocessing import Process, Queue
from graph import *
import shapely.ops
from shapely.geometry import LineString
import svg.path
import kdtree
try:
import silhouette
units = silhouette.units
except ImportError:
sys.stderr.write("Warning: no silhouette module available\n")
sys.exit(1)
units.define("pixel = inch / 72 = px")
def to_steps(thing):
if type(thing) in (tuple, list) and len(thing) == 2 and type(thing[0]) in (int, float):
(x, y) = thing
x *= units["pixel"]
y *= units["pixel"]
# flip x
x = (12 * units["inch"]) - x
x = x.to("steps").magnitude
y = y.to("steps").magnitude
return (x, y)
return map(to_steps, thing)
def draw_rect(cutter, **kw):
x = float(kw["x"])
y = float(kw["y"])
width = float(kw["width"])
height = float(kw["height"])
move = (x, y)
draw = [(x + width, y), (x + width, y + height), (x, y + height), (x, y)]
cutter.position = to_steps(move)
cutter.draw(to_steps(draw))
def walk_graph(graph, node):
stack = [node]
reverse = []
path = [node]
while stack:
node = stack[-1]
children = [nnode for nnode in graph[node] if not graph[node][nnode]["visited"]]
if children:
child = children[0]
graph[node][child]["visited"] = True
if reverse:
path += reverse
reverse = []
path.append(child)
stack.append(child)
continue
# no children
stack.pop()
if stack:
reverse.append(stack[-1])
return path
def build_path_commands(tree, graph):
cursor = (0, 0)
next_node = tree.search_nn(cursor)
nodes = []
culled = set()
while next_node:
(next_point, distance) = next_node
next_point = next_point.data
distance = math.sqrt(distance)
tree = tree.remove(next_point)
culled.add(next_point)
if nodes and distance > 16:
yield nodes
nodes = []
nodes += walk_graph(graph, next_point)
for node in nodes:
if node in culled:
continue
tree = tree.remove(node) or tree
culled.add(node)
next_node = tree.search_nn(nodes[-1])
if nodes:
yield nodes
def graph_lines(lines):
graph = BidirectedGraph()
if isinstance(lines, LineString):
lines = [lines]
for line in lines:
last_coord = None
for coord in line.coords:
if coord not in graph:
graph.add_node(coord)
if last_coord:
val = {"visited": False}
graph.connect(coord, last_coord, val)
last_coord = coord
return graph
def simplify_path(path):
lines = svg.path.parse_path(path)
coords = [lines[0].start]
for line in lines:
if type(line) != svg.path.Line:
raise NameError('The SVG file contains a path with crap: {}.'.format(type(line)))
coords.append(line.end)
coords = [(c.real, c.imag) for c in coords]
lines = to_steps(coords)
lines = [list(lines)]
result = shapely.ops.linemerge(lines)
print("building graph")
graph = graph_lines(result)
print("building kdtree")
tree = kdtree.create(list(graph.keys()))
return build_path_commands(tree, graph)
def produce_paths(svgfn, path_queue):
fh = open(svgfn)
doc = minidom.parse(fh)
paths = doc.getElementsByTagName("path")
for path in paths:
points = path.getAttribute('d')
paths = simplify_path(points)
for path in paths:
path_queue.put(path)
rects = doc.getElementsByTagName("rect")
for rect in rects:
path_queue.put(dict(rect.attributes.items()))
path_queue.put("done")
def draw_svg(worker, path_queue, connect_kws=None):
if connect_kws is None: connect_kws = {}
cutter = connect(**connect_kws)
try:
while 1:
thing = path_queue.get()
if thing == "done":
break
if type(thing) == dict:
for rpt in range(3):
draw_rect(cutter, **thing)
else:
cutter.position = thing[0]
cutter.draw(thing)
worker.join()
finally:
cutter.home()
def connect(**kw):
cutter = silhouette.Silhouette(**kw)
cutter.connect()
print("speed")
cutter.speed = 8
print("pressure")
cutter.pressure = 4
print("media")
cutter.media = 113
print("offset")
cutter.offset = 0
return cutter
if __name__ == "__main__":
path_queue = Queue()
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--output-file', help='Save commands to a file instead of sending to the cutter')
parser.add_argument('svg_img', metavar='SVG_IMG', help='Filename of the SVG image to be cut')
args = parser.parse_args()
worker = Process(target=produce_paths, args=(args.svg_img, path_queue))
worker.start()
if args.output_file:
with open(args.output_file, 'wb') as of:
draw_svg(worker, path_queue, connect_kws={'output_file': of})
else:
draw_svg(worker, path_queue)
| {
"content_hash": "5328470233e6214ad609fc69283d139d",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 105,
"avg_line_length": 30.056497175141242,
"alnum_prop": 0.5714285714285714,
"repo_name": "pklaus/silhouette",
"id": "91f84e7a22581febbbe38a712e7a128f894c79f9",
"size": "5343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "printsvg.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31536"
}
],
"symlink_target": ""
} |
"""A helper class for reading in and dealing with tests expectations
for layout tests.
"""
from collections import defaultdict
import logging
import re
from webkitpy.layout_tests.models.test_configuration import TestConfigurationConverter
_log = logging.getLogger(__name__)
# Test expectation and specifier constants.
#
# FIXME: range() starts with 0 which makes if expectation checks harder
# as PASS is 0.
(PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, TIMEOUT, CRASH, LEAK, SKIP, WONTFIX,
SLOW, REBASELINE, NEEDS_REBASELINE, NEEDS_MANUAL_REBASELINE, MISSING, FLAKY, NOW, NONE) = range(19)
# FIXME: Perhas these two routines should be part of the Port instead?
BASELINE_SUFFIX_LIST = ('png', 'wav', 'txt')
WEBKIT_BUG_PREFIX = 'webkit.org/b/'
CHROMIUM_BUG_PREFIX = 'crbug.com/'
V8_BUG_PREFIX = 'code.google.com/p/v8/issues/detail?id='
NAMED_BUG_PREFIX = 'Bug('
MISSING_KEYWORD = 'Missing'
NEEDS_REBASELINE_KEYWORD = 'NeedsRebaseline'
NEEDS_MANUAL_REBASELINE_KEYWORD = 'NeedsManualRebaseline'
# TODO(ojan): Don't add new platforms here. New mac platforms
# should use the version number directly instead of the english
# language names throughout the code.
MAC_VERSION_MAPPING = {
'mac10.6': 'snowleopard',
'mac10.7': 'lion',
'mac10.8': 'mountainlion',
'mac10.9': 'mavericks',
}
INVERTED_MAC_VERSION_MAPPING = {value: name for name, value in MAC_VERSION_MAPPING.items()}
class ParseError(Exception):
def __init__(self, warnings):
super(ParseError, self).__init__()
self.warnings = warnings
def __str__(self):
return '\n'.join(map(str, self.warnings))
def __repr__(self):
return 'ParseError(warnings=%s)' % self.warnings
class TestExpectationParser(object):
"""Provides parsing facilities for lines in the test_expectation.txt file."""
# FIXME: Rename these to *_KEYWORD as in MISSING_KEYWORD above, but make the case studdly-caps to match the actual file contents.
REBASELINE_MODIFIER = 'rebaseline'
NEEDS_REBASELINE_MODIFIER = 'needsrebaseline'
NEEDS_MANUAL_REBASELINE_MODIFIER = 'needsmanualrebaseline'
PASS_EXPECTATION = 'pass'
SKIP_MODIFIER = 'skip'
SLOW_MODIFIER = 'slow'
WONTFIX_MODIFIER = 'wontfix'
TIMEOUT_EXPECTATION = 'timeout'
MISSING_BUG_WARNING = 'Test lacks BUG specifier.'
def __init__(self, port, all_tests, is_lint_mode):
self._port = port
self._test_configuration_converter = TestConfigurationConverter(set(port.all_test_configurations()), port.configuration_specifier_macros())
if all_tests:
self._all_tests = set(all_tests)
else:
self._all_tests = set()
self._is_lint_mode = is_lint_mode
def parse(self, filename, expectations_string):
expectation_lines = []
line_number = 0
for line in expectations_string.split("\n"):
line_number += 1
test_expectation = self._tokenize_line(filename, line, line_number)
self._parse_line(test_expectation)
expectation_lines.append(test_expectation)
return expectation_lines
def _create_expectation_line(self, test_name, expectations, file_name):
expectation_line = TestExpectationLine()
expectation_line.original_string = test_name
expectation_line.name = test_name
expectation_line.filename = file_name
expectation_line.expectations = expectations
return expectation_line
def expectation_line_for_test(self, test_name, expectations):
expectation_line = self._create_expectation_line(test_name, expectations, '<Bot TestExpectations>')
self._parse_line(expectation_line)
return expectation_line
def expectation_for_skipped_test(self, test_name):
if not self._port.test_exists(test_name):
_log.warning('The following test %s from the Skipped list doesn\'t exist' % test_name)
expectation_line = self._create_expectation_line(test_name, [TestExpectationParser.PASS_EXPECTATION], '<Skipped file>')
expectation_line.expectations = [TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER]
expectation_line.is_skipped_outside_expectations_file = True
self._parse_line(expectation_line)
return expectation_line
def _parse_line(self, expectation_line):
if not expectation_line.name:
return
if not self._check_test_exists(expectation_line):
return
expectation_line.is_file = self._port.test_isfile(expectation_line.name)
if expectation_line.is_file:
expectation_line.path = expectation_line.name
else:
expectation_line.path = self._port.normalize_test_name(expectation_line.name)
self._collect_matching_tests(expectation_line)
self._parse_specifiers(expectation_line)
self._parse_expectations(expectation_line)
def _parse_specifier(self, specifier):
specifier = specifier.lower()
return MAC_VERSION_MAPPING.get(specifier, specifier)
def _parse_specifiers(self, expectation_line):
if self._is_lint_mode:
self._lint_line(expectation_line)
parsed_specifiers = set([self._parse_specifier(specifier) for specifier in expectation_line.specifiers])
expectation_line.matching_configurations = self._test_configuration_converter.to_config_set(parsed_specifiers, expectation_line.warnings)
def _lint_line(self, expectation_line):
expectations = [expectation.lower() for expectation in expectation_line.expectations]
if not expectation_line.bugs and self.WONTFIX_MODIFIER not in expectations:
expectation_line.warnings.append(self.MISSING_BUG_WARNING)
if self.REBASELINE_MODIFIER in expectations:
expectation_line.warnings.append('REBASELINE should only be used for running rebaseline.py. Cannot be checked in.')
if self.NEEDS_REBASELINE_MODIFIER in expectations or self.NEEDS_MANUAL_REBASELINE_MODIFIER in expectations:
for test in expectation_line.matching_tests:
if self._port.reference_files(test):
expectation_line.warnings.append('A reftest cannot be marked as NeedsRebaseline/NeedsManualRebaseline')
specifiers = [specifier.lower() for specifier in expectation_line.specifiers]
if (self.REBASELINE_MODIFIER in expectations or self.NEEDS_REBASELINE_MODIFIER in expectations) and ('debug' in specifiers or 'release' in specifiers):
expectation_line.warnings.append('A test cannot be rebaselined for Debug/Release.')
def _parse_expectations(self, expectation_line):
result = set()
for part in expectation_line.expectations:
expectation = TestExpectations.expectation_from_string(part)
if expectation is None: # Careful, PASS is currently 0.
expectation_line.warnings.append('Unsupported expectation: %s' % part)
continue
result.add(expectation)
expectation_line.parsed_expectations = result
def _check_test_exists(self, expectation_line):
# WebKit's way of skipping tests is to add a -disabled suffix.
# So we should consider the path existing if the path or the
# -disabled version exists.
if not self._port.test_exists(expectation_line.name) and not self._port.test_exists(expectation_line.name + '-disabled'):
# Log a warning here since you hit this case any
# time you update TestExpectations without syncing
# the LayoutTests directory
expectation_line.warnings.append('Path does not exist.')
return False
return True
def _collect_matching_tests(self, expectation_line):
"""Convert the test specification to an absolute, normalized
path and make sure directories end with the OS path separator."""
if not self._all_tests:
expectation_line.matching_tests = [expectation_line.path]
return
if not expectation_line.is_file:
# this is a test category, return all the tests of the category.
expectation_line.matching_tests = [test for test in self._all_tests if test.startswith(expectation_line.path)]
return
# this is a test file, do a quick check if it's in the
# full test suite.
if expectation_line.path in self._all_tests:
expectation_line.matching_tests.append(expectation_line.path)
# FIXME: Update the original specifiers and remove this once the old syntax is gone.
_configuration_tokens_list = [
'Mac', 'Mac10.6', 'Mac10.7', 'Mac10.8', 'Mac10.9', 'Mac10.10', 'Retina',
'Win', 'XP', 'Win7', 'Win10',
'Linux', 'Linux32', 'Precise', 'Trusty',
'Android',
'Release',
'Debug',
]
_configuration_tokens = dict((token, token.upper()) for token in _configuration_tokens_list)
_inverted_configuration_tokens = dict((value, name) for name, value in _configuration_tokens.iteritems())
# FIXME: Update the original specifiers list and remove this once the old syntax is gone.
_expectation_tokens = {
'Crash': 'CRASH',
'Leak': 'LEAK',
'Failure': 'FAIL',
MISSING_KEYWORD: 'MISSING',
'Pass': 'PASS',
'Rebaseline': 'REBASELINE',
NEEDS_REBASELINE_KEYWORD: 'NEEDSREBASELINE',
NEEDS_MANUAL_REBASELINE_KEYWORD: 'NEEDSMANUALREBASELINE',
'Skip': 'SKIP',
'Slow': 'SLOW',
'Timeout': 'TIMEOUT',
'WontFix': 'WONTFIX',
}
_inverted_expectation_tokens = dict([(value, name) for name, value in _expectation_tokens.iteritems()] +
[('TEXT', 'Failure'), ('IMAGE', 'Failure'), ('IMAGE+TEXT', 'Failure'), ('AUDIO', 'Failure')])
# FIXME: Seems like these should be classmethods on TestExpectationLine instead of TestExpectationParser.
@classmethod
def _tokenize_line(cls, filename, expectation_string, line_number):
"""Tokenizes a line from TestExpectations and returns an unparsed TestExpectationLine instance using the old format.
The new format for a test expectation line is:
[[bugs] [ "[" <configuration specifiers> "]" <name> [ "[" <expectations> "]" ["#" <comment>]
Any errant whitespace is not preserved.
"""
expectation_line = TestExpectationLine()
expectation_line.original_string = expectation_string
expectation_line.filename = filename
expectation_line.line_numbers = str(line_number)
comment_index = expectation_string.find("#")
if comment_index == -1:
comment_index = len(expectation_string)
else:
expectation_line.comment = expectation_string[comment_index + 1:]
remaining_string = re.sub(r"\s+", " ", expectation_string[:comment_index].strip())
if len(remaining_string) == 0:
return expectation_line
# special-case parsing this so that we fail immediately instead of treating this as a test name
if remaining_string.startswith('//'):
expectation_line.warnings = ['use "#" instead of "//" for comments']
return expectation_line
bugs = []
specifiers = []
name = None
expectations = []
warnings = []
has_unrecognized_expectation = False
tokens = remaining_string.split()
state = 'start'
for token in tokens:
if (token.startswith(WEBKIT_BUG_PREFIX) or
token.startswith(CHROMIUM_BUG_PREFIX) or
token.startswith(V8_BUG_PREFIX) or
token.startswith(NAMED_BUG_PREFIX)):
if state != 'start':
warnings.append('"%s" is not at the start of the line.' % token)
break
if token.startswith(WEBKIT_BUG_PREFIX):
bugs.append(token)
elif token.startswith(CHROMIUM_BUG_PREFIX):
bugs.append(token)
elif token.startswith(V8_BUG_PREFIX):
bugs.append(token)
else:
match = re.match('Bug\((\w+)\)$', token)
if not match:
warnings.append('unrecognized bug identifier "%s"' % token)
break
else:
bugs.append(token)
elif token == '[':
if state == 'start':
state = 'configuration'
elif state == 'name_found':
state = 'expectations'
else:
warnings.append('unexpected "["')
break
elif token == ']':
if state == 'configuration':
state = 'name'
elif state == 'expectations':
state = 'done'
else:
warnings.append('unexpected "]"')
break
elif token in ('//', ':', '='):
warnings.append('"%s" is not legal in the new TestExpectations syntax.' % token)
break
elif state == 'configuration':
specifiers.append(cls._configuration_tokens.get(token, token))
elif state == 'expectations':
if token not in cls._expectation_tokens:
has_unrecognized_expectation = True
warnings.append('Unrecognized expectation "%s"' % token)
else:
expectations.append(cls._expectation_tokens.get(token, token))
elif state == 'name_found':
warnings.append('expecting "[", "#", or end of line instead of "%s"' % token)
break
else:
name = token
state = 'name_found'
if not warnings:
if not name:
warnings.append('Did not find a test name.')
elif state not in ('name_found', 'done'):
warnings.append('Missing a "]"')
if 'WONTFIX' in expectations and 'SKIP' not in expectations:
expectations.append('SKIP')
if ('SKIP' in expectations or 'WONTFIX' in expectations) and len(set(expectations) - set(['SKIP', 'WONTFIX'])):
warnings.append('A test marked Skip or WontFix must not have other expectations.')
if not expectations and not has_unrecognized_expectation:
warnings.append('Missing expectations.')
expectation_line.bugs = bugs
expectation_line.specifiers = specifiers
expectation_line.expectations = expectations
expectation_line.name = name
expectation_line.warnings = warnings
return expectation_line
@classmethod
def _split_space_separated(cls, space_separated_string):
"""Splits a space-separated string into an array."""
return [part.strip() for part in space_separated_string.strip().split(' ')]
class TestExpectationLine(object):
"""Represents a line in test expectations file."""
def __init__(self):
"""Initializes a blank-line equivalent of an expectation."""
self.original_string = None
self.filename = None # this is the path to the expectations file for this line
self.line_numbers = "0"
self.name = None # this is the path in the line itself
self.path = None # this is the normpath of self.name
self.bugs = []
self.specifiers = []
self.parsed_specifiers = []
self.matching_configurations = set()
self.expectations = []
self.parsed_expectations = set()
self.comment = None
self.matching_tests = []
self.warnings = []
self.is_skipped_outside_expectations_file = False
def __eq__(self, other):
return (self.original_string == other.original_string
and self.filename == other.filename
and self.line_numbers == other.line_numbers
and self.name == other.name
and self.path == other.path
and self.bugs == other.bugs
and self.specifiers == other.specifiers
and self.parsed_specifiers == other.parsed_specifiers
and self.matching_configurations == other.matching_configurations
and self.expectations == other.expectations
and self.parsed_expectations == other.parsed_expectations
and self.comment == other.comment
and self.matching_tests == other.matching_tests
and self.warnings == other.warnings
and self.is_skipped_outside_expectations_file == other.is_skipped_outside_expectations_file)
def is_invalid(self):
return bool(self.warnings and self.warnings != [TestExpectationParser.MISSING_BUG_WARNING])
def is_flaky(self):
return len(self.parsed_expectations) > 1
def is_whitespace_or_comment(self):
return bool(re.match("^\s*$", self.original_string.split('#')[0]))
@staticmethod
def create_passing_expectation(test):
expectation_line = TestExpectationLine()
expectation_line.name = test
expectation_line.path = test
expectation_line.parsed_expectations = set([PASS])
expectation_line.expectations = set(['PASS'])
expectation_line.matching_tests = [test]
return expectation_line
@staticmethod
def merge_expectation_lines(line1, line2, model_all_expectations):
"""Merges the expectations of line2 into line1 and returns a fresh object."""
if line1 is None:
return line2
if line2 is None:
return line1
if model_all_expectations and line1.filename != line2.filename:
return line2
# Don't merge original_string or comment.
result = TestExpectationLine()
# We only care about filenames when we're linting, in which case the filenames are the same.
# Not clear that there's anything better to do when not linting and the filenames are different.
if model_all_expectations:
result.filename = line2.filename
result.line_numbers = line1.line_numbers + "," + line2.line_numbers
result.name = line1.name
result.path = line1.path
result.parsed_expectations = set(line1.parsed_expectations) | set(line2.parsed_expectations)
result.expectations = list(set(line1.expectations) | set(line2.expectations))
result.bugs = list(set(line1.bugs) | set(line2.bugs))
result.specifiers = list(set(line1.specifiers) | set(line2.specifiers))
result.parsed_specifiers = list(set(line1.parsed_specifiers) | set(line2.parsed_specifiers))
result.matching_configurations = set(line1.matching_configurations) | set(line2.matching_configurations)
result.matching_tests = list(list(set(line1.matching_tests) | set(line2.matching_tests)))
result.warnings = list(set(line1.warnings) | set(line2.warnings))
result.is_skipped_outside_expectations_file = line1.is_skipped_outside_expectations_file or line2.is_skipped_outside_expectations_file
return result
def to_string(self, test_configuration_converter, include_specifiers=True, include_expectations=True, include_comment=True):
parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
if self.is_invalid():
return self.original_string or ''
if self.name is None:
return '' if self.comment is None else "#%s" % self.comment
if test_configuration_converter and self.bugs:
specifiers_list = test_configuration_converter.to_specifiers_list(self.matching_configurations)
result = []
for specifiers in specifiers_list:
# FIXME: this is silly that we join the specifiers and then immediately split them.
specifiers = self._serialize_parsed_specifiers(test_configuration_converter, specifiers).split()
expectations = self._serialize_parsed_expectations(parsed_expectation_to_string).split()
result.append(self._format_line(self.bugs, specifiers, self.name, expectations, self.comment))
return "\n".join(result) if result else None
return self._format_line(self.bugs, self.specifiers, self.name, self.expectations, self.comment,
include_specifiers, include_expectations, include_comment)
def to_csv(self):
# Note that this doesn't include the comments.
return '%s,%s,%s,%s' % (self.name, ' '.join(self.bugs), ' '.join(self.specifiers), ' '.join(self.expectations))
def _serialize_parsed_expectations(self, parsed_expectation_to_string):
result = []
for index in TestExpectations.EXPECTATIONS.values():
if index in self.parsed_expectations:
result.append(parsed_expectation_to_string[index])
return ' '.join(result)
def _serialize_parsed_specifiers(self, test_configuration_converter, specifiers):
result = []
result.extend(sorted(self.parsed_specifiers))
result.extend(test_configuration_converter.specifier_sorter().sort_specifiers(specifiers))
result = [INVERTED_MAC_VERSION_MAPPING.get(specifier, specifier) for specifier in result]
return ' '.join(result)
@staticmethod
def _filter_redundant_expectations(expectations):
if set(expectations) == set(['Pass', 'Skip']):
return ['Skip']
if set(expectations) == set(['Pass', 'Slow']):
return ['Slow']
return expectations
@staticmethod
def _format_line(bugs, specifiers, name, expectations, comment, include_specifiers=True, include_expectations=True, include_comment=True):
new_specifiers = []
new_expectations = []
for specifier in specifiers:
# FIXME: Make this all work with the mixed-cased specifiers (e.g. WontFix, Slow, etc).
specifier = specifier.upper()
new_specifiers.append(TestExpectationParser._inverted_configuration_tokens.get(specifier, specifier))
for expectation in expectations:
expectation = expectation.upper()
new_expectations.append(TestExpectationParser._inverted_expectation_tokens.get(expectation, expectation))
result = ''
if include_specifiers and (bugs or new_specifiers):
if bugs:
result += ' '.join(bugs) + ' '
if new_specifiers:
result += '[ %s ] ' % ' '.join(new_specifiers)
result += name
if include_expectations and new_expectations:
new_expectations = TestExpectationLine._filter_redundant_expectations(new_expectations)
result += ' [ %s ]' % ' '.join(sorted(set(new_expectations)))
if include_comment and comment is not None:
result += " #%s" % comment
return result
# FIXME: Refactor API to be a proper CRUD.
class TestExpectationsModel(object):
"""Represents relational store of all expectations and provides CRUD semantics to manage it."""
def __init__(self, shorten_filename=None):
# Maps a test to its list of expectations.
self._test_to_expectations = {}
# Maps a test to list of its specifiers (string values)
self._test_to_specifiers = {}
# Maps a test to a TestExpectationLine instance.
self._test_to_expectation_line = {}
self._expectation_to_tests = self._dict_of_sets(TestExpectations.EXPECTATIONS)
self._timeline_to_tests = self._dict_of_sets(TestExpectations.TIMELINES)
self._result_type_to_tests = self._dict_of_sets(TestExpectations.RESULT_TYPES)
self._shorten_filename = shorten_filename or (lambda x: x)
def _merge_test_map(self, self_map, other_map):
for test in other_map:
new_expectations = set(other_map[test])
if test in self_map:
new_expectations |= set(self_map[test])
self_map[test] = list(new_expectations) if isinstance(other_map[test], list) else new_expectations
def _merge_dict_of_sets(self, self_dict, other_dict):
for key in other_dict:
self_dict[key] |= other_dict[key]
def merge_model(self, other):
self._merge_test_map(self._test_to_expectations, other._test_to_expectations)
# merge_expectation_lines is O(tests per line). Therefore, this loop
# is O((tests per line)^2) which is really expensive when a line
# contains a lot of tests. Cache the output of merge_expectation_lines
# so that we only call that n^2 in the number of *lines*.
merge_lines_cache = defaultdict(dict)
for test, other_line in other._test_to_expectation_line.items():
merged_line = None
if test in self._test_to_expectation_line:
self_line = self._test_to_expectation_line[test]
if other_line not in merge_lines_cache[self_line]:
merge_lines_cache[self_line][other_line] = TestExpectationLine.merge_expectation_lines(
self_line, other_line, model_all_expectations=False)
merged_line = merge_lines_cache[self_line][other_line]
else:
merged_line = other_line
self._test_to_expectation_line[test] = merged_line
self._merge_dict_of_sets(self._expectation_to_tests, other._expectation_to_tests)
self._merge_dict_of_sets(self._timeline_to_tests, other._timeline_to_tests)
self._merge_dict_of_sets(self._result_type_to_tests, other._result_type_to_tests)
def _dict_of_sets(self, strings_to_constants):
"""Takes a dict of strings->constants and returns a dict mapping
each constant to an empty set."""
d = {}
for c in strings_to_constants.values():
d[c] = set()
return d
def get_test_set(self, expectation, include_skips=True):
tests = self._expectation_to_tests[expectation]
if not include_skips:
tests = tests - self.get_test_set(SKIP)
return tests
def get_test_set_for_keyword(self, keyword):
expectation_enum = TestExpectations.EXPECTATIONS.get(keyword.lower(), None)
if expectation_enum is not None:
return self._expectation_to_tests[expectation_enum]
matching_tests = set()
for test, specifiers in self._test_to_specifiers.iteritems():
if keyword.lower() in specifiers:
matching_tests.add(test)
return matching_tests
def get_tests_with_result_type(self, result_type):
return self._result_type_to_tests[result_type]
def get_tests_with_timeline(self, timeline):
return self._timeline_to_tests[timeline]
def has_test(self, test):
return test in self._test_to_expectation_line
def get_expectation_line(self, test):
return self._test_to_expectation_line.get(test)
def get_expectations(self, test):
return self._test_to_expectations[test]
def get_expectations_string(self, test):
"""Returns the expectatons for the given test as an uppercase string.
If there are no expectations for the test, then "PASS" is returned."""
if self.get_expectation_line(test).is_skipped_outside_expectations_file:
return 'NOTRUN'
expectations = self.get_expectations(test)
retval = []
# FIXME: WontFix should cause the test to get skipped without artificially adding SKIP to the expectations list.
if WONTFIX in expectations and SKIP in expectations:
expectations.remove(SKIP)
for expectation in expectations:
retval.append(self.expectation_to_string(expectation))
return " ".join(retval)
def expectation_to_string(self, expectation):
"""Return the uppercased string equivalent of a given expectation."""
for item in TestExpectations.EXPECTATIONS.items():
if item[1] == expectation:
return item[0].upper()
raise ValueError(expectation)
def remove_expectation_line(self, test):
if not self.has_test(test):
return
self._clear_expectations_for_test(test)
del self._test_to_expectation_line[test]
def add_expectation_line(self, expectation_line,
model_all_expectations=False):
if expectation_line.is_invalid():
return
for test in expectation_line.matching_tests:
lines_involve_rebaseline = False
prev_expectation_line = self.get_expectation_line(test)
if prev_expectation_line:
# The previous path matched more of the test.
if len(prev_expectation_line.path) > len(expectation_line.path):
continue
if self._lines_conflict(prev_expectation_line, expectation_line):
continue
lines_involve_rebaseline = self._expects_rebaseline(prev_expectation_line) or self._expects_rebaseline(expectation_line)
# Exact path matches that conflict should be merged, e.g.
# [ Pass Timeout ] + [ NeedsRebaseline ] ==> [ Pass Timeout NeedsRebaseline ].
if model_all_expectations or lines_involve_rebaseline:
expectation_line = TestExpectationLine.merge_expectation_lines(prev_expectation_line, expectation_line, model_all_expectations)
self._clear_expectations_for_test(test)
self._test_to_expectation_line[test] = expectation_line
self._add_test(test, expectation_line)
def _add_test(self, test, expectation_line):
"""Sets the expected state for a given test.
This routine assumes the test has not been added before. If it has,
use _clear_expectations_for_test() to reset the state prior to
calling this."""
self._test_to_expectations[test] = expectation_line.parsed_expectations
for expectation in expectation_line.parsed_expectations:
self._expectation_to_tests[expectation].add(test)
self._test_to_specifiers[test] = expectation_line.specifiers
if WONTFIX in expectation_line.parsed_expectations:
self._timeline_to_tests[WONTFIX].add(test)
else:
self._timeline_to_tests[NOW].add(test)
if SKIP in expectation_line.parsed_expectations:
self._result_type_to_tests[SKIP].add(test)
elif expectation_line.parsed_expectations == set([PASS]):
self._result_type_to_tests[PASS].add(test)
elif expectation_line.is_flaky():
self._result_type_to_tests[FLAKY].add(test)
else:
# FIXME: What is this?
self._result_type_to_tests[FAIL].add(test)
def _clear_expectations_for_test(self, test):
"""Remove prexisting expectations for this test.
This happens if we are seeing a more precise path
than a previous listing.
"""
if self.has_test(test):
self._test_to_expectations.pop(test, '')
self._remove_from_sets(test, self._expectation_to_tests)
self._remove_from_sets(test, self._timeline_to_tests)
self._remove_from_sets(test, self._result_type_to_tests)
def _remove_from_sets(self, test, dict_of_sets_of_tests):
"""Removes the given test from the sets in the dictionary.
Args:
test: test to look for
dict: dict of sets of files"""
for set_of_tests in dict_of_sets_of_tests.itervalues():
if test in set_of_tests:
set_of_tests.remove(test)
def _expects_rebaseline(self, expectation_line):
expectations = expectation_line.parsed_expectations
return REBASELINE in expectations or NEEDS_REBASELINE in expectations or NEEDS_MANUAL_REBASELINE in expectations
def _lines_conflict(self, prev_expectation_line, expectation_line):
if prev_expectation_line.path != expectation_line.path:
return False
if PASS in expectation_line.parsed_expectations and self._expects_rebaseline(prev_expectation_line):
return False
if PASS in prev_expectation_line.parsed_expectations and self._expects_rebaseline(expectation_line):
return False
if prev_expectation_line.matching_configurations == expectation_line.matching_configurations:
expectation_line.warnings.append('Duplicate or ambiguous entry lines %s:%s and %s:%s.' % (
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers,
self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
return True
if prev_expectation_line.matching_configurations >= expectation_line.matching_configurations:
expectation_line.warnings.append('More specific entry for %s on line %s:%s overrides line %s:%s.' % (expectation_line.name,
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers,
self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
# FIXME: return False if we want more specific to win.
return True
if prev_expectation_line.matching_configurations <= expectation_line.matching_configurations:
expectation_line.warnings.append('More specific entry for %s on line %s:%s overrides line %s:%s.' % (expectation_line.name,
self._shorten_filename(expectation_line.filename), expectation_line.line_numbers,
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers))
return True
if prev_expectation_line.matching_configurations & expectation_line.matching_configurations:
expectation_line.warnings.append('Entries for %s on lines %s:%s and %s:%s match overlapping sets of configurations.' % (expectation_line.name,
self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers,
self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
return True
# Configuration sets are disjoint, then.
return False
class TestExpectations(object):
"""Test expectations consist of lines with specifications of what
to expect from layout test cases. The test cases can be directories
in which case the expectations apply to all test cases in that
directory and any subdirectory. The format is along the lines of:
LayoutTests/fast/js/fixme.js [ Failure ]
LayoutTests/fast/js/flaky.js [ Failure Pass ]
LayoutTests/fast/js/crash.js [ Crash Failure Pass Timeout ]
...
To add specifiers:
LayoutTests/fast/js/no-good.js
[ Debug ] LayoutTests/fast/js/no-good.js [ Pass Timeout ]
[ Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
[ Linux Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
[ Linux Win ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
Skip: Doesn't run the test.
Slow: The test takes a long time to run, but does not timeout indefinitely.
WontFix: For tests that we never intend to pass on a given platform (treated like Skip).
Notes:
-A test cannot be both SLOW and TIMEOUT
-A test can be included twice, but not via the same path.
-If a test is included twice, then the more precise path wins.
-CRASH tests cannot be WONTFIX
"""
# FIXME: Update to new syntax once the old format is no longer supported.
EXPECTATIONS = {'pass': PASS,
'audio': AUDIO,
'fail': FAIL,
'image': IMAGE,
'image+text': IMAGE_PLUS_TEXT,
'text': TEXT,
'timeout': TIMEOUT,
'crash': CRASH,
'leak': LEAK,
'missing': MISSING,
TestExpectationParser.SKIP_MODIFIER: SKIP,
TestExpectationParser.NEEDS_REBASELINE_MODIFIER: NEEDS_REBASELINE,
TestExpectationParser.NEEDS_MANUAL_REBASELINE_MODIFIER: NEEDS_MANUAL_REBASELINE,
TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
TestExpectationParser.SLOW_MODIFIER: SLOW,
TestExpectationParser.REBASELINE_MODIFIER: REBASELINE,
}
EXPECTATIONS_TO_STRING = dict((k, v) for (v, k) in EXPECTATIONS.iteritems())
# (aggregated by category, pass/fail/skip, type)
EXPECTATION_DESCRIPTIONS = {SKIP: 'skipped',
PASS: 'passes',
FAIL: 'failures',
IMAGE: 'image-only failures',
TEXT: 'text-only failures',
IMAGE_PLUS_TEXT: 'image and text failures',
AUDIO: 'audio failures',
CRASH: 'crashes',
LEAK: 'leaks',
TIMEOUT: 'timeouts',
MISSING: 'missing results'}
NON_TEST_OUTCOME_EXPECTATIONS = (REBASELINE, SKIP, SLOW, WONTFIX)
BUILD_TYPES = ('debug', 'release')
TIMELINES = {TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
'now': NOW}
RESULT_TYPES = {'skip': SKIP,
'pass': PASS,
'fail': FAIL,
'flaky': FLAKY}
@classmethod
def expectation_from_string(cls, string):
assert(' ' not in string) # This only handles one expectation at a time.
return cls.EXPECTATIONS.get(string.lower())
@staticmethod
def result_was_expected(result, expected_results, test_needs_rebaselining):
"""Returns whether we got a result we were expecting.
Args:
result: actual result of a test execution
expected_results: set of results listed in test_expectations
test_needs_rebaselining: whether test was marked as REBASELINE"""
if not (set(expected_results) - (set(TestExpectations.NON_TEST_OUTCOME_EXPECTATIONS))):
expected_results = set([PASS])
if result in expected_results:
return True
if result in (PASS, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, MISSING) and (NEEDS_REBASELINE in expected_results or NEEDS_MANUAL_REBASELINE in expected_results):
return True
if result in (TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO) and (FAIL in expected_results):
return True
if result == MISSING and test_needs_rebaselining:
return True
if result == SKIP:
return True
return False
@staticmethod
def remove_pixel_failures(expected_results):
"""Returns a copy of the expected results for a test, except that we
drop any pixel failures and return the remaining expectations. For example,
if we're not running pixel tests, then tests expected to fail as IMAGE
will PASS."""
expected_results = expected_results.copy()
if IMAGE in expected_results:
expected_results.remove(IMAGE)
expected_results.add(PASS)
return expected_results
@staticmethod
def remove_non_sanitizer_failures(expected_results):
"""Returns a copy of the expected results for a test, except that we
drop any failures that the sanitizers don't care about."""
expected_results = expected_results.copy()
for result in (IMAGE, FAIL, IMAGE_PLUS_TEXT):
if result in expected_results:
expected_results.remove(result)
expected_results.add(PASS)
return expected_results
@staticmethod
def has_pixel_failures(actual_results):
return IMAGE in actual_results or FAIL in actual_results
@staticmethod
def suffixes_for_expectations(expectations):
suffixes = set()
if IMAGE in expectations:
suffixes.add('png')
if FAIL in expectations:
suffixes.add('txt')
suffixes.add('png')
suffixes.add('wav')
return set(suffixes)
@staticmethod
def suffixes_for_actual_expectations_string(expectations):
suffixes = set()
if 'TEXT' in expectations:
suffixes.add('txt')
if 'IMAGE' in expectations:
suffixes.add('png')
if 'AUDIO' in expectations:
suffixes.add('wav')
if 'MISSING' in expectations:
suffixes.add('txt')
suffixes.add('png')
suffixes.add('wav')
return suffixes
# FIXME: This constructor does too much work. We should move the actual parsing of
# the expectations into separate routines so that linting and handling overrides
# can be controlled separately, and the constructor can be more of a no-op.
def __init__(self, port, tests=None, include_overrides=True, expectations_dict=None, model_all_expectations=False, is_lint_mode=False):
self._full_test_list = tests
self._test_config = port.test_configuration()
self._is_lint_mode = is_lint_mode
self._model_all_expectations = self._is_lint_mode or model_all_expectations
self._model = TestExpectationsModel(self._shorten_filename)
self._parser = TestExpectationParser(port, tests, self._is_lint_mode)
self._port = port
self._skipped_tests_warnings = []
self._expectations = []
if not expectations_dict:
expectations_dict = port.expectations_dict()
# Always parse the generic expectations (the generic file is required
# to be the first one in the expectations_dict, which must be an OrderedDict).
generic_path, generic_exps = expectations_dict.items()[0]
expectations = self._parser.parse(generic_path, generic_exps)
self._add_expectations(expectations, self._model)
self._expectations += expectations
# Now add the overrides if so requested.
if include_overrides:
for path, contents in expectations_dict.items()[1:]:
expectations = self._parser.parse(path, contents)
model = TestExpectationsModel(self._shorten_filename)
self._add_expectations(expectations, model)
self._expectations += expectations
self._model.merge_model(model)
# FIXME: move ignore_tests into port.skipped_layout_tests()
self.add_extra_skipped_tests(port.skipped_layout_tests(tests).union(set(port.get_option('ignore_tests', []))))
self.add_expectations_from_bot()
self._has_warnings = False
self._report_warnings()
self._process_tests_without_expectations()
# TODO(ojan): Allow for removing skipped tests when getting the list of
# tests to run, but not when getting metrics.
def model(self):
return self._model
def get_needs_rebaseline_failures(self):
return self._model.get_test_set(NEEDS_REBASELINE)
def get_rebaselining_failures(self):
return self._model.get_test_set(REBASELINE)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_expectations(self, test):
return self._model.get_expectations(test)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_tests_with_result_type(self, result_type):
return self._model.get_tests_with_result_type(result_type)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_test_set(self, expectation, include_skips=True):
return self._model.get_test_set(expectation, include_skips)
# FIXME: Change the callsites to use TestExpectationsModel and remove.
def get_tests_with_timeline(self, timeline):
return self._model.get_tests_with_timeline(timeline)
def get_expectations_string(self, test):
return self._model.get_expectations_string(test)
def expectation_to_string(self, expectation):
return self._model.expectation_to_string(expectation)
def matches_an_expected_result(self, test, result, pixel_tests_are_enabled, sanitizer_is_enabled):
expected_results = self._model.get_expectations(test)
if sanitizer_is_enabled:
expected_results = self.remove_non_sanitizer_failures(expected_results)
elif not pixel_tests_are_enabled:
expected_results = self.remove_pixel_failures(expected_results)
return self.result_was_expected(result, expected_results, self.is_rebaselining(test))
def is_rebaselining(self, test):
return REBASELINE in self._model.get_expectations(test)
def _shorten_filename(self, filename):
if filename.startswith(self._port.path_from_webkit_base()):
return self._port.host.filesystem.relpath(filename, self._port.path_from_webkit_base())
return filename
def _report_warnings(self):
warnings = []
for expectation in self._expectations:
for warning in expectation.warnings:
warnings.append('%s:%s %s %s' % (self._shorten_filename(expectation.filename), expectation.line_numbers,
warning, expectation.name if expectation.expectations else expectation.original_string))
if warnings:
self._has_warnings = True
if self._is_lint_mode:
raise ParseError(warnings)
_log.warning('--lint-test-files warnings:')
for warning in warnings:
_log.warning(warning)
_log.warning('')
def _process_tests_without_expectations(self):
if self._full_test_list:
for test in self._full_test_list:
if not self._model.has_test(test):
self._model.add_expectation_line(TestExpectationLine.create_passing_expectation(test))
def has_warnings(self):
return self._has_warnings
def remove_configurations(self, removals):
expectations_to_remove = []
modified_expectations = []
for test, test_configuration in removals:
for expectation in self._expectations:
if expectation.name != test or not expectation.parsed_expectations:
continue
if test_configuration not in expectation.matching_configurations:
continue
expectation.matching_configurations.remove(test_configuration)
if expectation.matching_configurations:
modified_expectations.append(expectation)
else:
expectations_to_remove.append(expectation)
for expectation in expectations_to_remove:
index = self._expectations.index(expectation)
self._expectations.remove(expectation)
if index == len(self._expectations) or self._expectations[index].is_whitespace_or_comment():
while index and self._expectations[index - 1].is_whitespace_or_comment():
index = index - 1
self._expectations.pop(index)
return self.list_to_string(self._expectations, self._parser._test_configuration_converter, modified_expectations)
def _add_expectations(self, expectation_list, model):
for expectation_line in expectation_list:
if not expectation_line.expectations:
continue
if self._model_all_expectations or self._test_config in expectation_line.matching_configurations:
model.add_expectation_line(expectation_line, model_all_expectations=self._model_all_expectations)
def add_extra_skipped_tests(self, tests_to_skip):
if not tests_to_skip:
return
for test in self._expectations:
if test.name and test.name in tests_to_skip:
test.warnings.append('%s:%s %s is also in a Skipped file.' % (test.filename, test.line_numbers, test.name))
model = TestExpectationsModel(self._shorten_filename)
for test_name in tests_to_skip:
expectation_line = self._parser.expectation_for_skipped_test(test_name)
model.add_expectation_line(expectation_line)
self._model.merge_model(model)
def add_expectations_from_bot(self):
# FIXME: With mode 'very-flaky' and 'maybe-flaky', this will show the expectations entry in the flakiness
# dashboard rows for each test to be whatever the bot thinks they should be. Is this a good thing?
bot_expectations = self._port.bot_expectations()
model = TestExpectationsModel(self._shorten_filename)
for test_name in bot_expectations:
expectation_line = self._parser.expectation_line_for_test(test_name, bot_expectations[test_name])
# Unexpected results are merged into existing expectations.
merge = self._port.get_option('ignore_flaky_tests') == 'unexpected'
model.add_expectation_line(expectation_line)
self._model.merge_model(model)
def add_expectation_line(self, expectation_line):
self._model.add_expectation_line(expectation_line)
self._expectations += [expectation_line]
def remove_expectation_line(self, test):
if not self._model.has_test(test):
return
self._expectations.remove(self._model.get_expectation_line(test))
self._model.remove_expectation_line(test)
@staticmethod
def list_to_string(expectation_lines, test_configuration_converter=None, reconstitute_only_these=None):
def serialize(expectation_line):
# If reconstitute_only_these is an empty list, we want to return original_string.
# So we need to compare reconstitute_only_these to None, not just check if it's falsey.
if reconstitute_only_these is None or expectation_line in reconstitute_only_these:
return expectation_line.to_string(test_configuration_converter)
return expectation_line.original_string
def nones_out(expectation_line):
return expectation_line is not None
return "\n".join(filter(nones_out, map(serialize, expectation_lines)))
| {
"content_hash": "df4654ecdda9afa8698fa4022bc53a96",
"timestamp": "",
"source": "github",
"line_count": 1125,
"max_line_length": 171,
"avg_line_length": 45.04,
"alnum_prop": 0.6371817643576081,
"repo_name": "Workday/OpenFrame",
"id": "a3f161ed5d72b8917cafca6cbe1de62e11691e41",
"size": "52200",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/models/test_expectations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
'''
Helloworld Modular Input Script
Copyright (C) 2012 Splunk, Inc.
All Rights Reserved
'''
import sys,logging
import xml.dom.minidom, xml.sax.saxutils
#set up logging
logging.root
logging.root.setLevel(logging.ERROR)
formatter = logging.Formatter('%(levelname)s %(message)s')
#with zero args , should go to STD ERR
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.root.addHandler(handler)
SCHEME = """<scheme>
<title>HelloWorld</title>
<description>Helloworld Modular Input Example</description>
<use_external_validation>true</use_external_validation>
<streaming_mode>xml</streaming_mode>
<use_single_instance>false</use_single_instance>
<endpoint>
<args>
<arg name="name">
<title>HelloWorld name</title>
<description>Name of this input</description>
</arg>
<arg name="someprop">
<title>Some Property</title>
<description>Some HelloWorld Property/description>
<required_on_edit>false</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="anotherprop">
<title>Another Property</title>
<description>Another HelloWorld Property</description>
<required_on_edit>false</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
</args>
</endpoint>
</scheme>
"""
def do_validate():
config = get_validation_config()
#TODO
#if error , print_validation_error & sys.exit(2)
def do_run():
config = get_input_config()
#TODO , poll for data and print output to STD OUT
for x in range(0, 10):
print_xml_single_instance_mode(config["someprop"] + " " + config["anotherprop"])
#if error , logger.error & sys.exit(2)
# prints validation error data to be consumed by Splunk
def print_validation_error(s):
print "<error><message>%s</message></error>" % xml.sax.saxutils.escape(s)
# prints XML stream
def print_xml_single_instance_mode(s):
print "<stream><event><data>%s</data></event></stream>" % xml.sax.saxutils.escape(s)
# prints XML stream
def print_xml_multi_instance_mode(s,stanza):
print "<stream><event stanza=""%s""><data>%s</data></event></stream>" % stanza,xml.sax.saxutils.escape(s)
# prints simple stream
def print_simple(s):
print "%s\n" % s
def usage():
print "usage: %s [--scheme|--validate-arguments]"
logging.error("Incorrect Program Usage")
sys.exit(2)
def do_scheme():
print SCHEME
#read XML configuration passed from splunkd, need to refactor to support single instance mode
def get_input_config():
config = {}
try:
# read everything from stdin
config_str = sys.stdin.read()
# parse the config XML
doc = xml.dom.minidom.parseString(config_str)
root = doc.documentElement
conf_node = root.getElementsByTagName("configuration")[0]
if conf_node:
logging.debug("XML: found configuration")
stanza = conf_node.getElementsByTagName("stanza")[0]
if stanza:
stanza_name = stanza.getAttribute("name")
if stanza_name:
logging.debug("XML: found stanza " + stanza_name)
config["name"] = stanza_name
params = stanza.getElementsByTagName("param")
for param in params:
param_name = param.getAttribute("name")
logging.debug("XML: found param '%s'" % param_name)
if param_name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
data = param.firstChild.data
config[param_name] = data
logging.debug("XML: '%s' -> '%s'" % (param_name, data))
checkpnt_node = root.getElementsByTagName("checkpoint_dir")[0]
if checkpnt_node and checkpnt_node.firstChild and \
checkpnt_node.firstChild.nodeType == checkpnt_node.firstChild.TEXT_NODE:
config["checkpoint_dir"] = checkpnt_node.firstChild.data
if not config:
raise Exception, "Invalid configuration received from Splunk."
except Exception, e:
raise Exception, "Error getting Splunk configuration via STDIN: %s" % str(e)
return config
#read XML configuration passed from splunkd, need to refactor to support single instance mode
def get_validation_config():
val_data = {}
# read everything from stdin
val_str = sys.stdin.read()
# parse the validation XML
doc = xml.dom.minidom.parseString(val_str)
root = doc.documentElement
logging.debug("XML: found items")
item_node = root.getElementsByTagName("item")[0]
if item_node:
logging.debug("XML: found item")
name = item_node.getAttribute("name")
val_data["stanza"] = name
params_node = item_node.getElementsByTagName("param")
for param in params_node:
name = param.getAttribute("name")
logging.debug("Found param %s" % name)
if name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
val_data[name] = param.firstChild.data
return val_data
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == "--scheme":
do_scheme()
elif sys.argv[1] == "--validate-arguments":
do_validate()
else:
usage()
else:
do_run()
sys.exit(0) | {
"content_hash": "b35a218da1bdb63d57521de0c838cfa8",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 109,
"avg_line_length": 32.7909604519774,
"alnum_prop": 0.5947622329427981,
"repo_name": "damiendallimore/SplunkModularInputsPythonFramework",
"id": "cc1bf82e46857de05b5fe0cbc8567a9b4fddd894",
"size": "5804",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "implementations/helloworld/bin/helloworld.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2583"
},
{
"name": "Python",
"bytes": "586914"
},
{
"name": "Ruby",
"bytes": "3243"
}
],
"symlink_target": ""
} |
from django import forms
from django.utils.translation import gettext as _
from dcim.models import DeviceRole, Platform, Region, Site, SiteGroup
from extras.forms import LocalConfigContextFilterForm
from ipam.models import VRF
from netbox.forms import NetBoxModelFilterSetForm
from tenancy.forms import ContactModelFilterForm, TenancyFilterForm
from utilities.forms import (
DynamicModelMultipleChoiceField, MultipleChoiceField, StaticSelect, TagFilterField, BOOLEAN_WITH_BLANK_CHOICES,
)
from virtualization.choices import *
from virtualization.models import *
__all__ = (
'ClusterFilterForm',
'ClusterGroupFilterForm',
'ClusterTypeFilterForm',
'VirtualMachineFilterForm',
'VMInterfaceFilterForm',
)
class ClusterTypeFilterForm(NetBoxModelFilterSetForm):
model = ClusterType
tag = TagFilterField(model)
class ClusterGroupFilterForm(ContactModelFilterForm, NetBoxModelFilterSetForm):
model = ClusterGroup
tag = TagFilterField(model)
class ClusterFilterForm(TenancyFilterForm, ContactModelFilterForm, NetBoxModelFilterSetForm):
model = Cluster
fieldsets = (
(None, ('q', 'tag')),
('Attributes', ('group_id', 'type_id')),
('Location', ('region_id', 'site_group_id', 'site_id')),
('Tenant', ('tenant_group_id', 'tenant_id')),
('Contacts', ('contact', 'contact_role')),
)
type_id = DynamicModelMultipleChoiceField(
queryset=ClusterType.objects.all(),
required=False,
label=_('Type')
)
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Region')
)
site_group_id = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False,
label=_('Site group')
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
null_option='None',
query_params={
'region_id': '$region_id',
'site_group_id': '$site_group_id',
},
label=_('Site')
)
group_id = DynamicModelMultipleChoiceField(
queryset=ClusterGroup.objects.all(),
required=False,
null_option='None',
label=_('Group')
)
tag = TagFilterField(model)
class VirtualMachineFilterForm(
LocalConfigContextFilterForm,
TenancyFilterForm,
ContactModelFilterForm,
NetBoxModelFilterSetForm
):
model = VirtualMachine
fieldsets = (
(None, ('q', 'tag')),
('Cluster', ('cluster_group_id', 'cluster_type_id', 'cluster_id')),
('Location', ('region_id', 'site_group_id', 'site_id')),
('Attriubtes', ('status', 'role_id', 'platform_id', 'mac_address', 'has_primary_ip', 'local_context_data')),
('Tenant', ('tenant_group_id', 'tenant_id')),
('Contacts', ('contact', 'contact_role')),
)
cluster_group_id = DynamicModelMultipleChoiceField(
queryset=ClusterGroup.objects.all(),
required=False,
null_option='None',
label=_('Cluster group')
)
cluster_type_id = DynamicModelMultipleChoiceField(
queryset=ClusterType.objects.all(),
required=False,
null_option='None',
label=_('Cluster type')
)
cluster_id = DynamicModelMultipleChoiceField(
queryset=Cluster.objects.all(),
required=False,
label=_('Cluster')
)
region_id = DynamicModelMultipleChoiceField(
queryset=Region.objects.all(),
required=False,
label=_('Region')
)
site_group_id = DynamicModelMultipleChoiceField(
queryset=SiteGroup.objects.all(),
required=False,
label=_('Site group')
)
site_id = DynamicModelMultipleChoiceField(
queryset=Site.objects.all(),
required=False,
null_option='None',
query_params={
'region_id': '$region_id',
'group_id': '$site_group_id',
},
label=_('Site')
)
role_id = DynamicModelMultipleChoiceField(
queryset=DeviceRole.objects.all(),
required=False,
null_option='None',
query_params={
'vm_role': "True"
},
label=_('Role')
)
status = MultipleChoiceField(
choices=VirtualMachineStatusChoices,
required=False
)
platform_id = DynamicModelMultipleChoiceField(
queryset=Platform.objects.all(),
required=False,
null_option='None',
label=_('Platform')
)
mac_address = forms.CharField(
required=False,
label='MAC address'
)
has_primary_ip = forms.NullBooleanField(
required=False,
label='Has a primary IP',
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
tag = TagFilterField(model)
class VMInterfaceFilterForm(NetBoxModelFilterSetForm):
model = VMInterface
fieldsets = (
(None, ('q', 'tag')),
('Virtual Machine', ('cluster_id', 'virtual_machine_id')),
('Attributes', ('enabled', 'mac_address', 'vrf_id')),
)
cluster_id = DynamicModelMultipleChoiceField(
queryset=Cluster.objects.all(),
required=False,
label=_('Cluster')
)
virtual_machine_id = DynamicModelMultipleChoiceField(
queryset=VirtualMachine.objects.all(),
required=False,
query_params={
'cluster_id': '$cluster_id'
},
label=_('Virtual machine')
)
enabled = forms.NullBooleanField(
required=False,
widget=StaticSelect(
choices=BOOLEAN_WITH_BLANK_CHOICES
)
)
mac_address = forms.CharField(
required=False,
label='MAC address'
)
vrf_id = DynamicModelMultipleChoiceField(
queryset=VRF.objects.all(),
required=False,
label='VRF'
)
tag = TagFilterField(model)
| {
"content_hash": "eefa8442781a55ebc2a31e4151e560fc",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 116,
"avg_line_length": 30.03553299492386,
"alnum_prop": 0.6200777420990367,
"repo_name": "digitalocean/netbox",
"id": "2f386e8893a1287b56f224a5b374fa6112b9634e",
"size": "5917",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/virtualization/forms/filtersets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189339"
},
{
"name": "HTML",
"bytes": "570800"
},
{
"name": "JavaScript",
"bytes": "326125"
},
{
"name": "Python",
"bytes": "1815170"
},
{
"name": "Shell",
"bytes": "2786"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.25
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1StatefulSet(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1StatefulSetSpec',
'status': 'V1StatefulSetStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1StatefulSet - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1StatefulSet. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1StatefulSet. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1StatefulSet.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1StatefulSet. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1StatefulSet. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1StatefulSet. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1StatefulSet.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1StatefulSet. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1StatefulSet. # noqa: E501
:return: The metadata of this V1StatefulSet. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1StatefulSet.
:param metadata: The metadata of this V1StatefulSet. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1StatefulSet. # noqa: E501
:return: The spec of this V1StatefulSet. # noqa: E501
:rtype: V1StatefulSetSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1StatefulSet.
:param spec: The spec of this V1StatefulSet. # noqa: E501
:type: V1StatefulSetSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1StatefulSet. # noqa: E501
:return: The status of this V1StatefulSet. # noqa: E501
:rtype: V1StatefulSetStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1StatefulSet.
:param status: The status of this V1StatefulSet. # noqa: E501
:type: V1StatefulSetStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1StatefulSet):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1StatefulSet):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "1f84bd06e3cb57ff873effc7025d7bab",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 312,
"avg_line_length": 31.898230088495577,
"alnum_prop": 0.6035511166597309,
"repo_name": "kubernetes-client/python",
"id": "1c7f957dfc5005233e681674283e3f80e13e632b",
"size": "7226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1_stateful_set.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "356"
},
{
"name": "Python",
"bytes": "11454299"
},
{
"name": "Shell",
"bytes": "43108"
}
],
"symlink_target": ""
} |
from abc import ABC, abstractmethod
import torch
# x is between 0 and 1
from hanlp_common.configurable import AutoConfigurable
def linear_growth_weight_scheduler(x):
return x
def linear_decay_weight_scheduler(x):
return 1 - x
def constant_temperature_scheduler(logits_S, logits_T, base_temperature):
'''
Remember to detach logits_S
'''
return base_temperature
def flsw_temperature_scheduler_builder(beta, gamma, eps=1e-4, *args):
'''
adapted from arXiv:1911.07471
'''
def flsw_temperature_scheduler(logits_S, logits_T, base_temperature):
v = logits_S.detach()
t = logits_T.detach()
with torch.no_grad():
v = v / (torch.norm(v, dim=-1, keepdim=True) + eps)
t = t / (torch.norm(t, dim=-1, keepdim=True) + eps)
w = torch.pow((1 - (v * t).sum(dim=-1)), gamma)
tau = base_temperature + (w.mean() - w) * beta
return tau
return flsw_temperature_scheduler
def cwsm_temperature_scheduler_builder(beta, *args):
'''
adapted from arXiv:1911.07471
'''
def cwsm_temperature_scheduler(logits_S, logits_T, base_temperature):
v = logits_S.detach()
with torch.no_grad():
v = torch.softmax(v, dim=-1)
v_max = v.max(dim=-1)[0]
w = 1 / (v_max + 1e-3)
tau = base_temperature + (w.mean() - w) * beta
return tau
return cwsm_temperature_scheduler
class LinearTeacherAnnealingScheduler(object):
def __init__(self, num_training_steps: int) -> None:
super().__init__()
self._num_training_steps = num_training_steps
self._current_training_steps = 0
def step(self):
self._current_training_steps += 1
def __float__(self):
return self._current_training_steps / self._num_training_steps
class TemperatureScheduler(ABC, AutoConfigurable):
def __init__(self, base_temperature) -> None:
super().__init__()
self.base_temperature = base_temperature
def __call__(self, logits_S, logits_T):
return self.forward(logits_S, logits_T)
@abstractmethod
def forward(self, logits_S, logits_T):
raise NotImplementedError()
@staticmethod
def from_name(name):
classes = {
'constant': ConstantScheduler,
'flsw': FlswScheduler,
'cwsm': CwsmScheduler,
}
assert name in classes, f'Unsupported temperature scheduler {name}. Expect one from {list(classes.keys())}.'
return classes[name]()
class FunctionalScheduler(TemperatureScheduler):
def __init__(self, scheduler_func, base_temperature) -> None:
super().__init__(base_temperature)
self._scheduler_func = scheduler_func
def forward(self, logits_S, logits_T):
return self._scheduler_func(logits_S, logits_T, self.base_temperature)
class ConstantScheduler(TemperatureScheduler):
def forward(self, logits_S, logits_T):
return self.base_temperature
class FlswScheduler(FunctionalScheduler):
def __init__(self, beta=1, gamma=1, eps=1e-4, base_temperature=8):
super().__init__(flsw_temperature_scheduler_builder(beta, gamma, eps), base_temperature)
self.beta = beta
self.gamma = gamma
self.eps = eps
class CwsmScheduler(FunctionalScheduler):
def __init__(self, beta=1, base_temperature=8):
super().__init__(cwsm_temperature_scheduler_builder(beta), base_temperature)
self.beta = beta
| {
"content_hash": "5164a403226fe6a8a17c9acc55306de0",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 116,
"avg_line_length": 28.71311475409836,
"alnum_prop": 0.6257493576934057,
"repo_name": "hankcs/HanLP",
"id": "44c725f1adc20660176567841baa80899ee31d92",
"size": "3585",
"binary": false,
"copies": "1",
"ref": "refs/heads/doc-zh",
"path": "hanlp/components/distillation/schedulers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "40933"
},
{
"name": "Jupyter Notebook",
"bytes": "566269"
},
{
"name": "Python",
"bytes": "2196905"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django_bundles.conf import default_settings
class SettingsHelper(object):
def __init__(self, user_settings, default_settings, optional_defaults):
self.user_settings = user_settings
self.default_settings = default_settings
self.optional_defaults = set(optional_defaults)
def __getattr__(self, name):
if hasattr(self.user_settings, name):
return getattr(self.user_settings, name)
if name in self.optional_defaults:
return getattr(self.default_settings, name)
raise ImproperlyConfigured, "%s is a required setting for django_bundles" % name
bundles_settings = SettingsHelper(settings, default_settings, [
'USE_BUNDLES',
'DEVELOPMENT_BUNDLES',
'DEFAULT_PREPROCESSORS',
'DEFAULT_POSTPROCESSORS',
'BUNDLES_LINTING',
'BUNDLES_LINT_SUCCESS_OK',
'BUNDLES_SINGLE_FILES',
'BUNDLES_TAG_HTML',
'GLOBAL_PRECOMPILE_DISABLE',
])
| {
"content_hash": "23ae5f0b3e05455178dd7a2465999e4e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 88,
"avg_line_length": 30.352941176470587,
"alnum_prop": 0.6957364341085271,
"repo_name": "sdcooke/django_bundles",
"id": "ad23fdcd1592ab959fda7cb053ddadc7a1ed92ca",
"size": "1032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_bundles/conf/bundles_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56105"
}
],
"symlink_target": ""
} |
from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
#from tcga_encoder.data.pathway_data import Pathways
from tcga_encoder.data.hallmark_data import Pathways
from tcga_encoder.definitions.tcga import *
#from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
#from tcga_encoder.algorithms import *
import seaborn as sns
from sklearn.manifold import TSNE, locally_linear_embedding
from scipy import stats
from scipy.spatial.distance import pdist, squareform
from scipy.spatial.distance import squareform
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
import networkx as nx
import json
from networkx.readwrite import json_graph
size_per_unit=0.25
def process_all_sources( save_dir, source2w, prefix ):
sources = source2w.keys()
ws = source2w.values()
#pdb.set_trace()
shapes2use = ["circle","square","triangle-up"]
scores2use = [0,0.5,1.0]
colors2use = ["red","blue","green"]
counts = [len(w) for w in ws]
W = pd.concat(ws,0)
#W=W/np.sqrt( np.sum( np.square( W.values ),0 ))
#pdb.set_trace()
n_features = len(W)
shapes = []
scores = []
colors = []
for i in xrange(n_features):
if i < counts[0]:
shapes.append( shapes2use[0] )
scores.append( scores2use[0] )
colors.append( colors2use[0] )
elif i < counts[1]+counts[0]:
shapes.append( shapes2use[1] )
scores.append( scores2use[1] )
colors.append( colors2use[1] )
else:
shapes.append( shapes2use[2] )
scores.append( scores2use[2] )
colors.append( colors2use[2] )
shapes = np.array(shapes,dtype=str)
colors = np.array(colors,dtype=str)
scores = np.array(scores,dtype=float)
sizes = 10*np.ones(n_features)
w_corr = W.T.corr()
corr_v = w_corr.values
names = w_corr.columns
min_corr = 0.8
keep_ids = []
for i in xrange(n_features):
c = corr_v[i]
if sum( np.abs(c) > min_corr ) > 1:
keep_ids.append(i )
print "keeping %d of %d nodes"%(len(keep_ids),n_features)
keep_ids = np.array(keep_ids)
keep_names = names[keep_ids]
keep_shapes = shapes[keep_ids]
keep_sizes = sizes[keep_ids]
keep_scores = scores[keep_ids]
keep_colors = colors[keep_ids]
w_corr = w_corr.loc[ keep_names ][keep_names]
corr_v = w_corr.values
n_features = len(w_corr)
#pdb.set_trace()
#
tau = min_corr
G=nx.Graph()
i=0
nodes = []
links = []
nodes_ids=[]
node_ids = OrderedDict()
#flare = OrderedDict()
for i,c,name_i in zip( xrange( n_features ), corr_v, keep_names ):
for j,name_j in zip( xrange(n_features), keep_names ):
if j > i:
if np.abs( c[j] ) > tau:
if node_ids.has_key(name_i) is False:
nodes.append( {"id":name_i})
if node_ids.has_key(name_j) is False:
nodes.append( {"id":name_j})
links.append( {"source":i,"target":j,"w":c[j]} )
nodes_ids.append(i)
nodes_ids.append(j)
nodes_ids = np.unique( np.array(nodes_ids))
json_node = []
for i,name,size,score,shape,color in zip( xrange( n_features ), keep_names, keep_sizes, keep_scores, keep_shapes, keep_colors ):
# name = names[i]
# size = int(80*total_weights[i])
# score = 1
# type = "circle"
json_node.append( {"size":size,"score":score,"id":name,"type":shape})
G.add_node(name, color=color, size=size )
json.dump({"nodes":json_node,"links":links,"directed": False,
"multigraph": False,"graph": []}, open(save_dir+'/all_force%s3.json'%(prefix),'w'))
for link in links:
G.add_edge( keep_names[link["source"]], keep_names[link["source"]], weight = np.abs(link["source"]) )
from networkx.drawing.nx_agraph import graphviz_layout
layout=graphviz_layout
print "laying out graph"
pos=layout(G)
pp.figure(figsize=(45,45))
print "drawing graph"
nx.draw(G,pos,
with_labels=True, hold=False, alpha=0.25, font_size=12
)
# d = json_graph.node_link_data(G)
G.clear()
pp.savefig(save_dir + "/mwst%s.png"%(prefix), fmt='png',dpi=300)
def process_source( save_dir, source, w, percent_weights, prefix="" ):
#corr = w.T.corr()
sorted_flattened = np.sort( np.abs(w.values.flatten()) )
n = len(sorted_flattened)
threshold = sorted_flattened[ - int( float(n)*percent_weights) ]
#w = w[ np.abs(w) >= threshold ].fillna(0)
#w = np.sign(w)
#pdb.set_trace()
total_weights = np.abs(w.values).sum(1)
corr = w.T.corr()
corr.sort_index(inplace=True)
corr = corr[ corr.index.values ]
corr_v = corr.values
names = corr.columns
n_source = len(names)
size1 = max( min( 40, int( w.values.shape[0]*size_per_unit ) ), 12 )
size2 = max( min( 40, int( w.values.shape[0]*size_per_unit )), 12 )
# cmap = sns.palplot(sns.light_palette((260, 75, 60), input="husl"))
# htmap3 = sns.clustermap ( corr, cmap=cmap, square=True, figsize=(size1,size1) )
# pp.setp(htmap3.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
# pp.setp(htmap3.ax_heatmap.xaxis.get_majorticklabels(), rotation=90)
# pp.setp(htmap3.ax_heatmap.yaxis.get_majorticklabels(), fontsize=12)
# pp.setp(htmap3.ax_heatmap.xaxis.get_majorticklabels(), fontsize=12)
# htmap3.ax_row_dendrogram.set_visible(False)
# htmap3.ax_col_dendrogram.set_visible(False)
# pp.savefig( save_dir + "/weights_%s_clustermap%s.png"%(source,prefix), fmt="png", bbox_inches = "tight")
#
#labels = [s.get_text() for s in htmap3.ax_heatmap.yaxis.get_majorticklabels()]
#corr = corr[labels]
#corr = corr.loc[labels]
corr_v = corr.values
names = corr.columns
# csr = csr_matrix(np.triu(1.0-np.abs(meth_corr.values)))
# Tcsr = minimum_spanning_tree(csr)
# as_mat = Tcsr.toarray()
#pdb.set_trace()
pp.figure(figsize=(45,45))
tau = 0.5
G=nx.Graph()
i=0
nodes = []
links = []
nodes_ids=[]
node_ids = OrderedDict()
#flare = OrderedDict()
for i in xrange( n_source ):
x = corr_v[i]
name_i = names[i]
#flare[name_i] = []
for j in xrange(n_source):
if j > i:
if np.abs( x[j] ) > tau:
name_j = names[j]
G.add_edge(name_i, name_j, weight = np.abs(x[j]) )
if node_ids.has_key(name_i) is False:
nodes.append( {"id":name_i})
#node_ids[name_i] = 1
#flare[name_i] = []
if node_ids.has_key(name_j) is False:
nodes.append( {"id":name_j})
#node_ids[name_i] = 1
links.append( {"source":i,"target":j} ) #, "value":np.abs(x[j])} )
#flare[name_i].append( name_j )
nodes_ids.append(i)
nodes_ids.append(j)
nodes_ids = np.unique( np.array(nodes_ids))
json_node = []
for i in xrange( n_source ):
name = names[i]
size = int(80*total_weights[i])
score = 1
type = "circle"
json_node.append( {"size":size,"score":score,"id":name,"type":type})
from networkx.drawing.nx_agraph import graphviz_layout
layout=graphviz_layout
#layout=nx.spectral_layout
pos=layout(G)
nx.draw(G,pos,
with_labels=True,
node_size=20, hold=False, node_color='b', alpha=0.25, font_size=12
)
d = json_graph.node_link_data(G)
#pdb.set_trace()
json.dump({"nodes":json_node,"links":links,"directed": False,
"multigraph": False,"graph": []}, open(save_dir+'/%s_force%s2.json'%(source,prefix),'w'))
# names = flare.keys()
# targets = flare.values()
# for target_list in targets:
#
# flares=[]
# targets = []
# for name_i,list_j in flare.iteritems():
# o=OrderedDict()
# o["name"] = name_i
# o["size"] = 100*len(list_j)
# o["imports"] = list_j
# flares.append( o )
#
# #targets.extend( )
#
#
# json.dump(flares, open(save_dir+'/%s_flare%s.json'%(source,prefix),'w'))
#from networkx.readwrite import json_graph
G.clear()
#pp.title("%s"%(tissue_name))
pp.savefig(save_dir + "/%s_mwst%s.png"%(source,prefix), fmt='png',dpi=300)
print " only doing one source now"
def join_weights( W_hidden2z, W_hidden ):
W = {}
n_z = W_hidden2z.shape[1]
columns = np.array( ["z_%d"%i for i in range(n_z)])
for input_source, source_w in W_hidden.iteritems():
#pdb.set_trace()
W[ input_source ] = pd.DataFrame( np.dot( source_w, W_hidden2z ), index = source_w.index, columns = columns )
return W
def get_hidden2z_weights( model_store ):
layer = "rec_z_space"
model_store.open()
w = model_store[ "%s"%(layer) + "/W/w%d"%(0)].values
model_store.close()
return w
def get_hidden_weights( model_store, input_sources, data_store ):
rna_genes = data_store["/RNA/FAIR"].columns
meth_genes = data_store["/METH/FAIR"].columns
mirna_hsas = data_store["/miRNA/FAIR"].columns
post_fix = "_scaled"
idx=1
n_sources = len(input_sources)
W = {}
for w_idx, input_source in zip( range(n_sources), input_sources ):
w = model_store[ "rec_hidden" + "/W/w%d"%(w_idx)].values
#pdb.set_trace()
d,k = w.shape
columns = np.array( ["h_%d"%i for i in range(k)])
if input_source == "RNA":
rows = rna_genes
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "miRNA":
rows = mirna_hsas
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "METH":
rows = meth_genes
#rows = np.array( [ "M-%s"%g for g in meth_genes], dtype=str )
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
if input_source == "TISSUE":
rows = tissue_names
print input_source, w.shape, len(rows), len(columns)
W[ input_source ] = pd.DataFrame( w, index=rows, columns = columns )
model_store.close()
return W
def auc_standard_error( theta, nA, nN ):
# from: Hanley and McNeil (1982), The Meaning and Use of the Area under the ROC Curve
# theta: estimated AUC, can be 0.5 for a random test
# nA size of population A
# nN size of population N
Q1=theta/(2.0-theta); Q2=2*theta*theta/(1+theta)
SE = np.sqrt( (theta*(1-theta)+(nA-1)*(Q1-theta*theta) + (nN-1)*(Q2-theta*theta) )/(nA*nN) )
return SE
def auc_test( true_y, est_y ):
n = len(true_y)
n_1 = true_y.sum()
n_0 = n - n_1
if n_1 == 0 or n_1 == n:
return 0.5, 0.0, 0.0, 1.0
auc = roc_auc_score( true_y, est_y )
difference = auc - 0.5
if difference < 0:
# switch labels
se = auc_standard_error( auc, n_0, n_1 )
se_null = auc_standard_error( 0.5, n_0, n_1 )
else:
se = auc_standard_error( 1-auc, n_1, n_0 )
se_null = auc_standard_error( 0.5, n_1, n_0 )
se_combined = np.sqrt( se**2 + se_null**2 )
z_value = np.abs(difference) / se_combined
p_value = 1.0 - stats.norm.cdf( np.abs(z_value) )
return auc, se, z_value, p_value
def find_keepers_over_groups( z, groups, name, nbr2keep, stats2use ):
inners = []; p_inners=[]
mx_inner = 0.0
norm_z = np.linalg.norm(z)
for X, stat in zip( groups, stats2use ):
pearsons = np.zeros( X.shape[1] )
pvalues = np.zeros( X.shape[1] )
for x,x_idx in zip( X.values.T, range(X.shape[1])):
if stat == "pearson":
pearsons[x_idx], pvalues[x_idx] = stats.pearsonr( z, x )
elif stat == "auc":
true_y = (x>0).astype(int)
auc, se, zvalue, pvalue = auc_test( true_y, z ) #np.sqrt( ses_tissue**2 + se_r_tissue**2 )
pearsons[x_idx] = auc-0.5
pvalues[x_idx] = pvalue
#pdb.set_trace()
#norms = norm_z*np.linalg.norm( X, axis=0 )
#inner = pd.Series( np.dot( z, X )/norms, index = X.columns, name=name )
inner = pd.Series( pearsons, index = X.columns, name=name )
p_inner = pd.Series( pvalues, index = X.columns, name=name )
inners.append(inner)
p_inners.append(p_inner)
this_mx = np.max(np.abs(inner))
if this_mx > mx_inner:
mx_inner = this_mx
all_keepers = []
#all_pvalues = []
for inner,p_inner in zip(inners,p_inners):
#inner.sort_values(inplace=True)
#inner = inner / mx_inner
#abs_inner = np.abs( inner )
#ordered = np.argsort( -inner.values )
ordered = np.argsort( p_inner.values )
ordered = pd.DataFrame( np.vstack( (inner.values[ordered],p_inner.values[ordered] ) ).T, index =inner.index[ordered],columns=["r","p"] )
#pdb.set_trace()
#keepers = pd.concat( [ordered[:nbr2keep], ordered[-nbr2keep:]], axis=0 )
keepers = ordered[:nbr2keep]
#pdb.set_trace()
#keepers = keepers.sort_values()
all_keepers.append(keepers)
return all_keepers
def find_keepers(z, X, name, nbr2keep):
inner = pd.Series( np.dot( z, X ), index = X.columns, name=name )
inner.sort_values(inplace=True)
inner = inner / np.max(np.abs(inner))
#signed = np.sign( inner )
abs_inner = np.abs( inner )
ordered = np.argsort( -abs_inner.values )
ordered = pd.Series( inner.values[ordered], index =inner.index[ordered],name=name )
keepers = ordered[:nbr2keep]
keepers = keepers.sort_values()
return keepers
def main( data_location, results_location ):
pathway_info = Pathways()
data_path = os.path.join( HOME_DIR ,data_location ) #, "data.h5" )
results_path = os.path.join( HOME_DIR, results_location )
data_filename = os.path.join( data_path, "data.h5")
fill_filename = os.path.join( results_path, "full_vae_fill.h5" )
model_filename = os.path.join( results_path, "full_vae_model.h5" )
save_dir = os.path.join( results_path, "weight_clustering" )
check_and_mkdir(save_dir)
z_dir = os.path.join( save_dir, "z_pics" )
check_and_mkdir(z_dir)
h_dir = os.path.join( save_dir, "h_pics" )
check_and_mkdir(h_dir)
print "HOME_DIR: ", HOME_DIR
print "data_filename: ", data_filename
print "fill_filename: ", fill_filename
print "LOADING stores"
data_store = pd.HDFStore( data_filename, "r" )
fill_store = pd.HDFStore( fill_filename, "r" )
model_store = pd.HDFStore( model_filename, "r" )
Z_train = fill_store["/Z/TRAIN/Z/mu"]
Z_val = fill_store["/Z/VAL/Z/mu"]
#input_sources = ["METH","RNA","miRNA"]
input_sources = ["RNA","miRNA","METH"]
W_hidden = get_hidden_weights( model_store, input_sources, data_store )
W_hidden2z = get_hidden2z_weights( model_store )
size_per_unit = 0.25
weighted_z = join_weights( W_hidden2z, W_hidden )
barcodes = data_store["/CLINICAL/observed"][ data_store["/CLINICAL/observed"][["RNA","miRNA","METH","DNA"]].sum(1)==4 ].index.values
tissues = data_store["/CLINICAL/TISSUE"].loc[barcodes]
tissue_names = tissues.columns
tissue_idx = np.argmax( tissues.values, 1 )
#n = len(Z)
n_tissues = len(tissue_names)
n_h = W_hidden2z.shape[0]
print "+++++++++++++++++++++++++++"
print " find weights that are significant together, not"
#W_hidden["RNA_miRNA"] = pd.concat( [W_hidden["RNA"],W_hidden["miRNA"] ],0 )
percent_weights = 0.05
process_all_sources( save_dir, weighted_z, prefix="_all_Z" )
process_all_sources( save_dir, W_hidden, prefix="_all" )
# for source, w in weighted_z.iteritems():
#
# process_source( save_dir, source, w, percent_weights, prefix="_Z" )
#
# for source, w in W_hidden.iteritems():
#
# process_source( save_dir, source, w, percent_weights )
# #break
pp.close('all')
if __name__ == "__main__":
data_location = sys.argv[1]
results_location = sys.argv[2]
main( data_location, results_location ) | {
"content_hash": "cb401fe13b161b4203b16f14aa0efb52",
"timestamp": "",
"source": "github",
"line_count": 497,
"max_line_length": 140,
"avg_line_length": 31.52112676056338,
"alnum_prop": 0.6123452061789864,
"repo_name": "tedmeeds/tcga_encoder",
"id": "0f5fdba6b57eb5aa017ee96bbbc7f660c18da7fb",
"size": "15666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tcga_encoder/analyses/old/weight_clustering2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2115"
},
{
"name": "Python",
"bytes": "2472857"
},
{
"name": "Shell",
"bytes": "1714"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_datastore
short_description: Manage a datastore on ESXi host
description:
- This module can be used to mount/umount datastore on ESXi host.
- This module only supports NFS (NFS v3 or NFS v4.1) and VMFS datastores.
- For VMFS datastore, available device must already be connected on ESXi host.
- All parameters and VMware object names are case sensitive.
version_added: '2.5'
author:
- Ludovic Rivallain (@lrivallain) <ludovic.rivallain@gmail.com>
- Christian Kotte (@ckotte) <christian.kotte@gmx.de>
notes:
- Tested on vSphere 6.0 and 6.5
- NFS v4.1 tested on vSphere 6.5
- Kerberos authentication with NFS v4.1 isn't implemented
requirements:
- python >= 2.6
- PyVmomi
options:
datacenter_name:
description:
- Name of the datacenter to add the datastore.
- The datacenter isn't used by the API to create a datastore.
- Will be removed in 2.11.
required: false
datastore_name:
description:
- Name of the datastore to add/remove.
required: true
datastore_type:
description:
- Type of the datastore to configure (nfs/nfs41/vmfs).
required: true
choices: [ 'nfs', 'nfs41', 'vmfs' ]
nfs_server:
description:
- NFS host serving nfs datastore.
- Required if datastore type is set to C(nfs)/C(nfs41) and state is set to C(present), else unused.
- Two or more servers can be defined if datastore type is set to C(nfs41)
nfs_path:
description:
- Resource path on NFS host.
- Required if datastore type is set to C(nfs)/C(nfs41) and state is set to C(present), else unused.
nfs_ro:
description:
- ReadOnly or ReadWrite mount.
- Unused if datastore type is not set to C(nfs)/C(nfs41) and state is not set to C(present).
default: False
type: bool
vmfs_device_name:
description:
- Name of the device to be used as VMFS datastore.
- Required for VMFS datastore type and state is set to C(present), else unused.
vmfs_version:
description:
- VMFS version to use for datastore creation.
- Unused if datastore type is not set to C(vmfs) and state is not set to C(present).
esxi_hostname:
description:
- ESXi hostname to manage the datastore.
required: true
state:
description:
- "present: Mount datastore on host if datastore is absent else do nothing."
- "absent: Umount datastore if datastore is present else do nothing."
default: present
choices: [ present, absent ]
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Mount VMFS datastores to ESXi
vmware_host_datastore:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datastore_name: '{{ item.name }}'
datastore_type: '{{ item.type }}'
vmfs_device_name: 'naa.XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
vmfs_version: 6
esxi_hostname: '{{ inventory_hostname }}'
state: present
delegate_to: localhost
- name: Mount NFS datastores to ESXi
vmware_host_datastore:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datastore_name: '{{ item.name }}'
datastore_type: '{{ item.type }}'
nfs_server: '{{ item.server }}'
nfs_path: '{{ item.path }}'
nfs_ro: no
esxi_hostname: '{{ inventory_hostname }}'
state: present
delegate_to: localhost
loop:
- { 'name': 'NasDS_vol01', 'server': 'nas01', 'path': '/mnt/vol01', 'type': 'nfs'}
- { 'name': 'NasDS_vol02', 'server': 'nas01', 'path': '/mnt/vol02', 'type': 'nfs'}
- name: Mount NFS v4.1 datastores to ESXi
vmware_host_datastore:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datastore_name: '{{ item.name }}'
datastore_type: '{{ item.type }}'
nfs_server: '{{ item.server }}'
nfs_path: '{{ item.path }}'
nfs_ro: no
esxi_hostname: '{{ inventory_hostname }}'
state: present
delegate_to: localhost
loop:
- { 'name': 'NasDS_vol03', 'server': 'nas01,nas02', 'path': '/mnt/vol01', 'type': 'nfs41'}
- { 'name': 'NasDS_vol04', 'server': 'nas01,nas02', 'path': '/mnt/vol02', 'type': 'nfs41'}
- name: Remove/Umount Datastores from ESXi
vmware_host_datastore:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datastore_name: NasDS_vol01
esxi_hostname: '{{ inventory_hostname }}'
state: absent
delegate_to: localhost
'''
RETURN = r'''
'''
try:
from pyVmomi import vim, vmodl
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi, find_datastore_by_name
from ansible.module_utils._text import to_native
class VMwareHostDatastore(PyVmomi):
def __init__(self, module):
super(VMwareHostDatastore, self).__init__(module)
# NOTE: The below parameter is deprecated starting from Ansible v2.11
self.datacenter_name = module.params['datacenter_name']
self.datastore_name = module.params['datastore_name']
self.datastore_type = module.params['datastore_type']
self.nfs_server = module.params['nfs_server']
self.nfs_path = module.params['nfs_path']
self.nfs_ro = module.params['nfs_ro']
self.vmfs_device_name = module.params['vmfs_device_name']
self.vmfs_version = module.params['vmfs_version']
self.esxi_hostname = module.params['esxi_hostname']
self.state = module.params['state']
self.esxi = self.find_hostsystem_by_name(self.esxi_hostname)
if self.esxi is None:
self.module.fail_json(msg="Failed to find ESXi hostname %s " % self.esxi_hostname)
def process_state(self):
ds_states = {
'absent': {
'present': self.umount_datastore_host,
'absent': self.state_exit_unchanged,
},
'present': {
'present': self.state_exit_unchanged,
'absent': self.mount_datastore_host,
}
}
try:
ds_states[self.state][self.check_datastore_host_state()]()
except (vmodl.RuntimeFault, vmodl.MethodFault) as vmodl_fault:
self.module.fail_json(msg=to_native(vmodl_fault.msg))
except Exception as e:
self.module.fail_json(msg=to_native(e))
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def check_datastore_host_state(self):
storage_system = self.esxi.configManager.storageSystem
host_file_sys_vol_mount_info = storage_system.fileSystemVolumeInfo.mountInfo
for host_mount_info in host_file_sys_vol_mount_info:
if host_mount_info.volume.name == self.datastore_name:
return 'present'
return 'absent'
def umount_datastore_host(self):
ds = find_datastore_by_name(self.content, self.datastore_name)
if not ds:
self.module.fail_json(msg="No datastore found with name %s" % self.datastore_name)
if self.module.check_mode is False:
error_message_umount = "Cannot umount datastore %s from host %s" % (self.datastore_name, self.esxi_hostname)
try:
self.esxi.configManager.datastoreSystem.RemoveDatastore(ds)
except (vim.fault.NotFound, vim.fault.HostConfigFault, vim.fault.ResourceInUse) as fault:
self.module.fail_json(msg="%s: %s" % (error_message_umount, to_native(fault.msg)))
except Exception as e:
self.module.fail_json(msg="%s: %s" % (error_message_umount, to_native(e)))
self.module.exit_json(changed=True, result="Datastore %s on host %s" % (self.datastore_name, self.esxi_hostname))
def mount_datastore_host(self):
if self.datastore_type == 'nfs' or self.datastore_type == 'nfs41':
self.mount_nfs_datastore_host()
if self.datastore_type == 'vmfs':
self.mount_vmfs_datastore_host()
def mount_nfs_datastore_host(self):
if self.module.check_mode is False:
mnt_specs = vim.host.NasVolume.Specification()
# NFS v3
if self.datastore_type == 'nfs':
mnt_specs.type = "NFS"
mnt_specs.remoteHost = self.nfs_server
# NFS v4.1
if self.datastore_type == 'nfs41':
mnt_specs.type = "NFS41"
# remoteHost needs to be set to a non-empty string, but the value is not used
mnt_specs.remoteHost = "something"
mnt_specs.remoteHostNames = [self.nfs_server]
mnt_specs.remotePath = self.nfs_path
mnt_specs.localPath = self.datastore_name
if self.nfs_ro:
mnt_specs.accessMode = "readOnly"
else:
mnt_specs.accessMode = "readWrite"
error_message_mount = "Cannot mount datastore %s on host %s" % (self.datastore_name, self.esxi_hostname)
try:
ds = self.esxi.configManager.datastoreSystem.CreateNasDatastore(mnt_specs)
if not ds:
self.module.fail_json(msg=error_message_mount)
except (vim.fault.NotFound, vim.fault.DuplicateName,
vim.fault.AlreadyExists, vim.fault.HostConfigFault,
vmodl.fault.InvalidArgument, vim.fault.NoVirtualNic,
vim.fault.NoGateway) as fault:
self.module.fail_json(msg="%s: %s" % (error_message_mount, to_native(fault.msg)))
except Exception as e:
self.module.fail_json(msg="%s : %s" % (error_message_mount, to_native(e)))
self.module.exit_json(changed=True, result="Datastore %s on host %s" % (self.datastore_name, self.esxi_hostname))
def mount_vmfs_datastore_host(self):
if self.module.check_mode is False:
ds_path = "/vmfs/devices/disks/" + str(self.vmfs_device_name)
host_ds_system = self.esxi.configManager.datastoreSystem
ds_system = vim.host.DatastoreSystem
error_message_mount = "Cannot mount datastore %s on host %s" % (self.datastore_name, self.esxi_hostname)
try:
vmfs_ds_options = ds_system.QueryVmfsDatastoreCreateOptions(host_ds_system,
ds_path,
self.vmfs_version)
vmfs_ds_options[0].spec.vmfs.volumeName = self.datastore_name
ds = ds_system.CreateVmfsDatastore(host_ds_system,
vmfs_ds_options[0].spec)
except (vim.fault.NotFound, vim.fault.DuplicateName,
vim.fault.HostConfigFault, vmodl.fault.InvalidArgument) as fault:
self.module.fail_json(msg="%s : %s" % (error_message_mount, to_native(fault.msg)))
except Exception as e:
self.module.fail_json(msg="%s : %s" % (error_message_mount, to_native(e)))
self.module.exit_json(changed=True, result="Datastore %s on host %s" % (self.datastore_name, self.esxi_hostname))
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
datacenter_name=dict(type='str', required=False, removed_in_version=2.11),
datastore_name=dict(type='str', required=True),
datastore_type=dict(type='str', choices=['nfs', 'nfs41', 'vmfs']),
nfs_server=dict(type='str'),
nfs_path=dict(type='str'),
nfs_ro=dict(type='bool', default=False),
vmfs_device_name=dict(type='str'),
vmfs_version=dict(type='int'),
esxi_hostname=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present'])
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_together=[
['nfs_server', 'nfs_path']
],
)
# more complex required_if
if module.params['state'] == 'present':
if module.params['datastore_type'] == 'nfs' and not module.params['nfs_server']:
msg = "Missing nfs_server with datastore_type = nfs"
module.fail_json(msg=msg)
if module.params['datastore_type'] == 'nfs41' and not module.params['nfs_server']:
msg = "Missing nfs_server with datastore_type = nfs41"
module.fail_json(msg=msg)
if module.params['datastore_type'] == 'vmfs' and not module.params['vmfs_device_name']:
msg = "Missing vmfs_device_name with datastore_type = vmfs"
module.fail_json(msg=msg)
vmware_host_datastore = VMwareHostDatastore(module)
vmware_host_datastore.process_state()
if __name__ == '__main__':
main()
| {
"content_hash": "31cc15315d80bc6972716805dd2f79a8",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 121,
"avg_line_length": 41.33228840125392,
"alnum_prop": 0.6166856276071293,
"repo_name": "SergeyCherepanov/ansible",
"id": "1d15985e8b75f5fd134b3abd41a585b39ea5a483",
"size": "13361",
"binary": false,
"copies": "25",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/cloud/vmware/vmware_host_datastore.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
from . import gates
from .util import Pos, as_bin, from_bin
from .connectable import Connectable
from .connectable_registry import ConnectableRegistry
from .exceptions import NoSuchComponentType
@ConnectableRegistry.register
class HeaderConnectable(Connectable):
ttype = 'header'
def __init__(self, name, args):
self.bits = int(args[0]) if args else 8
self.valid_inputs = self.valid_outputs = [
self.get_pin_name(x) for x in range(self.bits)
]
super().__init__(name, args)
@staticmethod
def get_pin_name(i):
return 'pin{}'.format(i)
def get_pin(self, i):
return self.state[self.get_pin_name(i)]
def set_pin(self, i, state):
assert state in {0, 1}
self.set_plug(self.get_pin_name(i), state)
def get_num(self):
return from_bin(self.get_outputs()[::-1])
def set_num(self, num):
for idx, i in enumerate(as_bin(num)):
self.set_pin(idx, i)
class HasDynamicStateMixin:
def calc_state(self):
raise NotImplementedError(self)
def set_plug(self, plug, state):
assert plug in self.valid_inputs
old_state = self.calc_state()
super().set_plug(plug, state)
new_state = self.calc_state()
assert isinstance(old_state, int) and isinstance(new_state, int)
if old_state != new_state:
super().set_plug('o', new_state)
@ConnectableRegistry.register
class AndConnectable(HasDynamicStateMixin, Connectable):
ttype = 'and'
valid_inputs = ['a', 'b']
valid_outputs = ['o']
def calc_state(self):
return int(self.state['a'] and self.state['b'])
def render_state(self):
return '({a} & {b} = {o})'.format_map(self.state)
def live(self):
return self
@ConnectableRegistry.register
class XORConnectable(HasDynamicStateMixin, Connectable):
ttype = 'xor'
valid_inputs = ['a', 'b']
valid_outputs = ['o']
def calc_state(self):
return int(gates.xor(self.state['a'], self.state['b']))
def live(self):
return self
@ConnectableRegistry.register
class OrConnectable(HasDynamicStateMixin, Connectable):
ttype = 'or'
valid_inputs = ['a', 'b']
valid_outputs = ['o']
def calc_state(self):
return int(self.state['a'] or self.state['b'])
def live(self):
return self
@ConnectableRegistry.register
class NotConnectable(HasDynamicStateMixin, Connectable):
ttype = 'not'
valid_inputs = ['a']
valid_outputs = ['o']
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.lived = False
def calc_state(self):
return int(not self.state['a'])
def render_state(self):
return '(~{a} = {o})'.format_map(self.state)
def live(self):
if not self.lived:
self.lived = True
Connectable.set_plug(
self,
'o',
self.calc_state()
)
return self
@ConnectableRegistry.register
class NandConnectable(HasDynamicStateMixin, Connectable):
ttype = 'nand'
valid_inputs = ['a', 'b']
valid_outputs = ['o']
def calc_state(self):
a, b = self.state['a'], self.state['b']
return int(not (a and b))
class ComponentDeclaration():
def __init__(self, name, ttype, pos, args):
self.name = name
self.ttype = ttype
self.pos = pos
self.args = args
assert isinstance(name, str)
assert isinstance(ttype, str)
assert isinstance(pos, Pos)
assert isinstance(args, list)
def __repr__(self):
return '<ComponentDeclaration[{}][{}] on line {}, column {}>'.format(
self.name, self.ttype, self.pos.line, self.pos.column
)
def resolve_implementation(self):
registry = ConnectableRegistry.instance().registry
if self.ttype not in registry:
raise NoSuchComponentType(self.ttype, self.pos)
return registry[self.ttype].build(self.name, self.args)
class CustomComponent():
def __init__(self, name, inputs, outputs, contents):
self.ttype = self.name = name
self.inputs = inputs
self.outputs = outputs
self.contents = contents
def __repr__(self):
return '<CustomComponent[{}] inputs=[{}] outputs=[{}]>'.format(
self.name,
','.join(self.inputs),
','.join(self.outputs)
)
def build(self, name, args):
return CustomComponentImplementation(name, self, args)
class CustomComponentImplementation(Connectable):
def __init__(self, name, reference, args):
self.reference = reference
super().__init__(name, args)
# avoid import loop
from .graph import build_graph
self.graph = build_graph(
self.reference.graph['componentdeclaration'],
self.reference.graph['connector'],
{'self': self}
)
assert self.reference.inputs != self.reference.outputs
self.lived = False
def render_state(self):
render = lambda d: ' '.join(
'{}={}'.format(key, self.state[key])
for key in d
)
return '({} = {})'.format(
render(self.valid_inputs),
render(self.valid_outputs)
)
@property
def ttype(self):
return self.reference.ttype
valid_inputs = property(lambda self: self.reference.inputs)
valid_outputs = property(lambda self: self.reference.outputs)
def __repr__(self):
# splice the ttype in
s = super().__repr__()
first, second = s.split(self.name)
return '{}{}][{}{}'.format(
first, self.name, self.ttype, second
)
def live(self):
if self.lived:
return self
self.lived = True
assert self.graph
for thing in self.graph.values():
thing.live()
return self
| {
"content_hash": "91dc25b18d0b3858e0d7c6ef863a6d39",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 77,
"avg_line_length": 25.48085106382979,
"alnum_prop": 0.5818303273213092,
"repo_name": "Mause/circuitry",
"id": "715e58ef89791cfd7b75a0e5726d7672473058e1",
"size": "5988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "circuitry/connectable_impls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29476"
}
],
"symlink_target": ""
} |
from setuptools import setup, Extension
import numpy as np
import platform
version = '0.1.1'
ext_modules = []
setup (name = 'droneapi',
zip_safe=True,
version = version,
description = 'Python language bindings for the DroneApi',
long_description = '''Python language bindings for the DroneApi (includes the droneapi MAVProxy module)''',
url = 'https://github.com/diydrones/droneapi-python',
author = '3D Robotics',
install_requires = [ 'pymavlink',
'MAVProxy >= 1.3.1' ],
author_email = 'kevinh@geeksville.com',
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering'
],
license='apache',
packages = ['droneapi', 'droneapi.module', 'droneapi.lib' ],
# doesn't work: package_data={'droneapi': ['example/*']},
ext_modules = ext_modules)
| {
"content_hash": "cbb3b8799ff26d28101cfb1191a87b6d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 114,
"avg_line_length": 40.733333333333334,
"alnum_prop": 0.5638297872340425,
"repo_name": "mrpollo/droneapi-python",
"id": "3910c9c9efe20030ca5b2d3446ede36938f25e2c",
"size": "1222",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from .cloudwatch_event import *
| {
"content_hash": "8458e22cf1c08ec752fb3608ac0c34e8",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 32,
"alnum_prop": 0.78125,
"repo_name": "gogoair/foremast",
"id": "9ac45dce626a4677a53a70a8667ad15a9cc28f5c",
"size": "660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/foremast/awslambda/cloudwatch_event/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7614"
},
{
"name": "Python",
"bytes": "484364"
},
{
"name": "Shell",
"bytes": "180"
}
],
"symlink_target": ""
} |
import random
import opties
def main():
o = opties.list
print random.choice(o)
if __name__ == '__main__':
main() | {
"content_hash": "8702c40af1c6f80995fd3b3e771ea9bf",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 26,
"avg_line_length": 9.384615384615385,
"alnum_prop": 0.6065573770491803,
"repo_name": "ArtezGDA/text-IO",
"id": "7a97ce16c82d74dcdce2d079e7a1be4dc8cbe857",
"size": "122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Nikki_L/TextlO/kopofmunt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "312828"
},
{
"name": "JavaScript",
"bytes": "6634"
},
{
"name": "Python",
"bytes": "2718648"
},
{
"name": "Ruby",
"bytes": "846066"
},
{
"name": "Shell",
"bytes": "613"
}
],
"symlink_target": ""
} |
from collections import namedtuple
import os
import posixpath
import errno
import math
try:
import urlparse as url_parser
except ImportError:
import urllib.parse as url_parser
from m3u8 import parser
class M3U8(object):
'''
Represents a single M3U8 playlist. Should be instantiated with
the content as string.
Parameters:
`content`
the m3u8 content as string
`base_path`
all urls (key and segments url) will be updated with this base_path,
ex.:
base_path = "http://videoserver.com/hls"
/foo/bar/key.bin --> http://videoserver.com/hls/key.bin
http://vid.com/segment1.ts --> http://videoserver.com/hls/segment1.ts
can be passed as parameter or setted as an attribute to ``M3U8`` object.
`base_uri`
uri the playlist comes from. it is propagated to SegmentList and Key
ex.: http://example.com/path/to
Attributes:
`key`
it's a `Key` object, the EXT-X-KEY from m3u8. Or None
`segments`
a `SegmentList` object, represents the list of `Segment`s from this playlist
`is_variant`
Returns true if this M3U8 is a variant playlist, with links to
other M3U8s with different bitrates.
If true, `playlists` is a list of the playlists available,
and `iframe_playlists` is a list of the i-frame playlists available.
`is_endlist`
Returns true if EXT-X-ENDLIST tag present in M3U8.
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.8
`playlists`
If this is a variant playlist (`is_variant` is True), returns a list of
Playlist objects
`iframe_playlists`
If this is a variant playlist (`is_variant` is True), returns a list of
IFramePlaylist objects
`playlist_type`
A lower-case string representing the type of the playlist, which can be
one of VOD (video on demand) or EVENT.
`media`
If this is a variant playlist (`is_variant` is True), returns a list of
Media objects
`target_duration`
Returns the EXT-X-TARGETDURATION as an integer
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.2
`media_sequence`
Returns the EXT-X-MEDIA-SEQUENCE as an integer
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.3
`program_date_time`
Returns the EXT-X-PROGRAM-DATE-TIME as a string
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.5
`version`
Return the EXT-X-VERSION as is
`allow_cache`
Return the EXT-X-ALLOW-CACHE as is
`files`
Returns an iterable with all files from playlist, in order. This includes
segments and key uri, if present.
`base_uri`
It is a property (getter and setter) used by
SegmentList and Key to have absolute URIs.
`is_i_frames_only`
Returns true if EXT-X-I-FRAMES-ONLY tag present in M3U8.
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.12
`is_independent_segments`
Returns true if EXT-X-INDEPENDENT-SEGMENTS tag present in M3U8.
https://tools.ietf.org/html/draft-pantos-http-live-streaming-13#section-3.4.16
'''
simple_attributes = (
# obj attribute # parser attribute
('is_variant', 'is_variant'),
('is_endlist', 'is_endlist'),
('is_i_frames_only', 'is_i_frames_only'),
('target_duration', 'targetduration'),
('media_sequence', 'media_sequence'),
('program_date_time', 'program_date_time'),
('is_independent_segments', 'is_independent_segments'),
('version', 'version'),
('allow_cache', 'allow_cache'),
('playlist_type', 'playlist_type')
)
def __init__(self, content=None, base_path=None, base_uri=None, strict=False):
if content is not None:
self.data = parser.parse(content, strict)
else:
self.data = {}
self._base_uri = base_uri
if self._base_uri:
if not self._base_uri.endswith('/'):
self._base_uri += '/'
self._initialize_attributes()
self.base_path = base_path
def _initialize_attributes(self):
self.key = Key(base_uri=self.base_uri, **self.data['key']) if 'key' in self.data else None
self.segments = SegmentList([ Segment(base_uri=self.base_uri, **params)
for params in self.data.get('segments', []) ])
for attr, param in self.simple_attributes:
setattr(self, attr, self.data.get(param))
self.files = []
if self.key:
self.files.append(self.key.uri)
self.files.extend(self.segments.uri)
self.media = MediaList([ Media(base_uri=self.base_uri,
**media)
for media in self.data.get('media', []) ])
self.playlists = PlaylistList([ Playlist(base_uri=self.base_uri,
media=self.media,
**playlist)
for playlist in self.data.get('playlists', []) ])
self.iframe_playlists = PlaylistList()
for ifr_pl in self.data.get('iframe_playlists', []):
self.iframe_playlists.append(
IFramePlaylist(base_uri=self.base_uri,
uri=ifr_pl['uri'],
iframe_stream_info=ifr_pl['iframe_stream_info'])
)
def __unicode__(self):
return self.dumps()
@property
def base_uri(self):
return self._base_uri
@base_uri.setter
def base_uri(self, new_base_uri):
self._base_uri = new_base_uri
self.media.base_uri = new_base_uri
self.playlists.base_uri = new_base_uri
self.segments.base_uri = new_base_uri
@property
def base_path(self):
return self._base_path
@base_path.setter
def base_path(self, newbase_path):
self._base_path = newbase_path
self._update_base_path()
def _update_base_path(self):
if self._base_path is None:
return
if self.key:
self.key.base_path = self.base_path
self.media.base_path = self.base_path
self.segments.base_path = self.base_path
self.playlists.base_path = self.base_path
def add_playlist(self, playlist):
self.is_variant = True
self.playlists.append(playlist)
def add_iframe_playlist(self, iframe_playlist):
if iframe_playlist is not None:
self.is_variant = True
self.iframe_playlists.append(iframe_playlist)
def add_media(self, media):
self.media.append(media)
def add_segment(self, segment):
self.segments.append(segment)
def dumps(self):
'''
Returns the current m3u8 as a string.
You could also use unicode(<this obj>) or str(<this obj>)
'''
output = ['#EXTM3U']
if self.is_independent_segments:
output.append('#EXT-X-INDEPENDENT-SEGMENTS')
if self.media_sequence > 0:
output.append('#EXT-X-MEDIA-SEQUENCE:' + str(self.media_sequence))
if self.allow_cache:
output.append('#EXT-X-ALLOW-CACHE:' + self.allow_cache.upper())
if self.version:
output.append('#EXT-X-VERSION:' + self.version)
if self.key:
output.append(str(self.key))
if self.target_duration:
output.append('#EXT-X-TARGETDURATION:' + int_or_float_to_string(self.target_duration))
if self.program_date_time is not None:
output.append('#EXT-X-PROGRAM-DATE-TIME:' + parser.format_date_time(self.program_date_time))
if not (self.playlist_type is None or self.playlist_type == ''):
output.append(
'#EXT-X-PLAYLIST-TYPE:%s' % str(self.playlist_type).upper())
if self.is_i_frames_only:
output.append('#EXT-X-I-FRAMES-ONLY')
if self.is_variant:
if self.media:
output.append(str(self.media))
output.append(str(self.playlists))
if self.iframe_playlists:
output.append(str(self.iframe_playlists))
output.append(str(self.segments))
if self.is_endlist:
output.append('#EXT-X-ENDLIST')
return '\n'.join(output)
def dump(self, filename):
'''
Saves the current m3u8 to ``filename``
'''
self._create_sub_directories(filename)
with open(filename, 'w') as fileobj:
fileobj.write(self.dumps())
def _create_sub_directories(self, filename):
basename = os.path.dirname(filename)
try:
os.makedirs(basename)
except OSError as error:
if error.errno != errno.EEXIST:
raise
class BasePathMixin(object):
@property
def absolute_uri(self):
if self.uri is None:
return None
if parser.is_url(self.uri):
return self.uri
else:
if self.base_uri is None:
raise ValueError('There can not be `absolute_uri` with no `base_uri` set')
return _urijoin(self.base_uri, self.uri)
@property
def base_path(self):
return os.path.dirname(self.uri)
@base_path.setter
def base_path(self, newbase_path):
if not self.base_path:
self.uri = "%s/%s" % (newbase_path, self.uri)
self.uri = self.uri.replace(self.base_path, newbase_path)
class GroupedBasePathMixin(object):
def _set_base_uri(self, new_base_uri):
for item in self:
item.base_uri = new_base_uri
base_uri = property(None, _set_base_uri)
def _set_base_path(self, newbase_path):
for item in self:
item.base_path = newbase_path
base_path = property(None, _set_base_path)
class Segment(BasePathMixin):
'''
A video segment from a M3U8 playlist
`uri`
a string with the segment uri
`title`
title attribute from EXTINF parameter
`program_date_time`
Returns the EXT-X-PROGRAM-DATE-TIME as a datetime
http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.5
`discontinuity`
Returns a boolean indicating if a EXT-X-DISCONTINUITY tag exists
http://tools.ietf.org/html/draft-pantos-http-live-streaming-13#section-3.4.11
`cue_out`
Returns a boolean indicating if a EXT-X-CUE-OUT-CONT tag exists
`duration`
duration attribute from EXTINF parameter
`base_uri`
uri the key comes from in URI hierarchy. ex.: http://example.com/path/to
`byterange`
byterange attribute from EXT-X-BYTERANGE parameter
`key`
Key used to encrypt the segment (EXT-X-KEY)
'''
def __init__(self, uri, base_uri, program_date_time=None, duration=None,
title=None, byterange=None, cue_out=False, discontinuity=False, key=None):
self.uri = uri
self.duration = duration
self.title = title
self.base_uri = base_uri
self.byterange = byterange
self.program_date_time = program_date_time
self.discontinuity = discontinuity
self.cue_out = cue_out
self.key = Key(base_uri=base_uri,**key) if key else None
def dumps(self, last_segment):
output = []
if last_segment and self.key != last_segment.key:
output.append(str(self.key))
output.append('\n')
if self.discontinuity:
output.append('#EXT-X-DISCONTINUITY\n')
if self.program_date_time:
output.append('#EXT-X-PROGRAM-DATE-TIME:%s\n' % parser.format_date_time(self.program_date_time))
if self.cue_out:
output.append('#EXT-X-CUE-OUT-CONT\n')
output.append('#EXTINF:%s,' % int_or_float_to_string(self.duration))
if self.title:
output.append(quoted(self.title))
output.append('\n')
if self.byterange:
output.append('#EXT-X-BYTERANGE:%s\n' % self.byterange)
output.append(self.uri)
return ''.join(output)
def __str__(self):
return self.dumps()
class SegmentList(list, GroupedBasePathMixin):
def __str__(self):
output = []
last_segment = None
for segment in self:
output.append(segment.dumps(last_segment))
last_segment = segment
return '\n'.join(output)
@property
def uri(self):
return [seg.uri for seg in self]
class Key(BasePathMixin):
'''
Key used to encrypt the segments in a m3u8 playlist (EXT-X-KEY)
`method`
is a string. ex.: "AES-128"
`uri`
is a string. ex:: "https://priv.example.com/key.php?r=52"
`base_uri`
uri the key comes from in URI hierarchy. ex.: http://example.com/path/to
`iv`
initialization vector. a string representing a hexadecimal number. ex.: 0X12A
'''
def __init__(self, method, uri, base_uri, iv=None, keyformat=None, keyformatversions=None):
self.method = method
self.uri = uri
self.iv = iv
self.keyformat = keyformat
self.keyformatversions = keyformatversions
self.base_uri = base_uri
def __str__(self):
output = [
'METHOD=%s' % self.method,
]
if self.uri:
output.append('URI="%s"' % self.uri)
if self.iv:
output.append('IV=%s' % self.iv)
if self.keyformat:
output.append('KEYFORMAT="%s"' % self.keyformat)
if self.keyformatversions:
output.append('KEYFORMATVERSIONS="%s"' % self.keyformatversions)
return '#EXT-X-KEY:' + ','.join(output)
def __eq__(self, other):
return self.method == other.method and \
self.uri == other.uri and \
self.iv == other.iv and \
self.base_uri == other.base_uri and \
self.keyformat == other.keyformat and \
self.keyformatversions == other.keyformatversions
def __ne__(self, other):
return not self.__eq__(other)
class Playlist(BasePathMixin):
'''
Playlist object representing a link to a variant M3U8 with a specific bitrate.
Attributes:
`stream_info` is a named tuple containing the attributes: `program_id`,
`bandwidth`,`resolution`, `codecs` and `resolution` which is a a tuple (w, h) of integers
`media` is a list of related Media entries.
More info: http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.10
'''
def __init__(self, uri, stream_info, media, base_uri):
self.uri = uri
self.base_uri = base_uri
resolution = stream_info.get('resolution')
if resolution != None:
values = resolution.split('x')
resolution_pair = (int(values[0]), int(values[1]))
else:
resolution_pair = None
self.stream_info = StreamInfo(bandwidth=stream_info['bandwidth'],
program_id=stream_info.get('program_id'),
resolution=resolution_pair,
codecs=stream_info.get('codecs'))
self.media = []
for media_type in ('audio', 'video', 'subtitles'):
group_id = stream_info.get(media_type)
if not group_id:
continue
self.media += filter(lambda m: m.group_id == group_id, media)
def __str__(self):
stream_inf = []
if self.stream_info.program_id:
stream_inf.append('PROGRAM-ID=%d' % self.stream_info.program_id)
if self.stream_info.bandwidth:
stream_inf.append('BANDWIDTH=%d' % self.stream_info.bandwidth)
if self.stream_info.resolution:
res = str(self.stream_info.resolution[0]) + 'x' + str(self.stream_info.resolution[1])
stream_inf.append('RESOLUTION=' + res)
if self.stream_info.codecs:
stream_inf.append('CODECS=' + quoted(self.stream_info.codecs))
for media in self.media:
media_type = media.type.upper()
stream_inf.append('%s="%s"' % (media_type, media.group_id))
return '#EXT-X-STREAM-INF:' + ','.join(stream_inf) + '\n' + self.uri
class IFramePlaylist(BasePathMixin):
'''
IFramePlaylist object representing a link to a
variant M3U8 i-frame playlist with a specific bitrate.
Attributes:
`iframe_stream_info` is a named tuple containing the attributes:
`program_id`, `bandwidth`, `codecs` and `resolution` which
is a tuple (w, h) of integers
More info: http://tools.ietf.org/html/draft-pantos-http-live-streaming-07#section-3.3.13
'''
def __init__(self, base_uri, uri, iframe_stream_info):
self.uri = uri
self.base_uri = base_uri
resolution = iframe_stream_info.get('resolution')
if resolution is not None:
values = resolution.split('x')
resolution_pair = (int(values[0]), int(values[1]))
else:
resolution_pair = None
self.iframe_stream_info = StreamInfo(
bandwidth=iframe_stream_info.get('bandwidth'),
program_id=iframe_stream_info.get('program_id'),
resolution=resolution_pair,
codecs=iframe_stream_info.get('codecs')
)
def __str__(self):
iframe_stream_inf = []
if self.iframe_stream_info.program_id:
iframe_stream_inf.append('PROGRAM-ID=%d' %
self.iframe_stream_info.program_id)
if self.iframe_stream_info.bandwidth:
iframe_stream_inf.append('BANDWIDTH=%d' %
self.iframe_stream_info.bandwidth)
if self.iframe_stream_info.resolution:
res = (str(self.iframe_stream_info.resolution[0]) + 'x' +
str(self.iframe_stream_info.resolution[1]))
iframe_stream_inf.append('RESOLUTION=' + res)
if self.iframe_stream_info.codecs:
iframe_stream_inf.append('CODECS=' +
quoted(self.iframe_stream_info.codecs))
if self.uri:
iframe_stream_inf.append('URI=' + quoted(self.uri))
return '#EXT-X-I-FRAME-STREAM-INF:' + ','.join(iframe_stream_inf)
StreamInfo = namedtuple('StreamInfo', ['bandwidth', 'program_id', 'resolution', 'codecs'])
class Media(BasePathMixin):
'''
A media object from a M3U8 playlist
https://tools.ietf.org/html/draft-pantos-http-live-streaming-16#section-4.3.4.1
`uri`
a string with the media uri
`type`
`group_id`
`language`
`assoc-language`
`name`
`default`
`autoselect`
`forced`
`instream_id`
`characteristics`
attributes in the EXT-MEDIA tag
`base_uri`
uri the media comes from in URI hierarchy. ex.: http://example.com/path/to
'''
def __init__(self, uri=None, type=None, group_id=None, language=None,
name=None, default=None, autoselect=None, forced=None,
characteristics=None, assoc_language=None,
instream_id=None,base_uri=None, **extras):
self.base_uri = base_uri
self.uri = uri
self.type = type
self.group_id = group_id
self.language = language
self.name = name
self.default = default
self.autoselect = autoselect
self.forced = forced
self.assoc_language = assoc_language
self.instream_id = instream_id
self.characteristics = characteristics
self.extras = extras
def dumps(self):
media_out = []
if self.uri:
media_out.append('URI=' + quoted(self.uri))
if self.type:
media_out.append('TYPE=' + self.type)
if self.group_id:
media_out.append('GROUP-ID=' + quoted(self.group_id))
if self.language:
media_out.append('LANGUAGE=' + quoted(self.language))
if self.assoc_language:
media_out.append('ASSOC-LANGUAGE=' + quoted(self.assoc_language))
if self.name:
media_out.append('NAME=' + quoted(self.name))
if self.default:
media_out.append('DEFAULT=' + self.default)
if self.autoselect:
media_out.append('AUTOSELECT=' + self.autoselect)
if self.forced:
media_out.append('FORCED=' + self.forced)
if self.instream_id:
media_out.append('INSTREAM-ID=' + self.instream_id)
if self.characteristics:
media_out.append('CHARACTERISTICS=' + quoted(self.characteristics))
return ('#EXT-X-MEDIA:' + ','.join(media_out))
def __str__(self):
return self.dumps()
class MediaList(list, GroupedBasePathMixin):
def __str__(self):
output = [str(playlist) for playlist in self]
return '\n'.join(output)
@property
def uri(self):
return [media.uri for media in self]
class PlaylistList(list, GroupedBasePathMixin):
def __str__(self):
output = [str(playlist) for playlist in self]
return '\n'.join(output)
def denormalize_attribute(attribute):
return attribute.replace('_','-').upper()
def quoted(string):
return '"%s"' % string
def _urijoin(base_uri, path):
if parser.is_url(base_uri):
return url_parser.urljoin(base_uri, path)
else:
return os.path.normpath(os.path.join(base_uri, path.strip('/')))
def int_or_float_to_string(number):
return str(int(number)) if number == math.floor(number) else str(number)
| {
"content_hash": "f7440cd981ff947ef21a07881e62c8cb",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 112,
"avg_line_length": 33.243161094224924,
"alnum_prop": 0.5876382920362073,
"repo_name": "cristina0botez/m3u8",
"id": "c30650578cdde3558dc1f5dba35f8c92f5d1a155",
"size": "22059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "m3u8/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "81847"
},
{
"name": "Shell",
"bytes": "646"
}
],
"symlink_target": ""
} |
"""
@author: letian
@homepage: http://www.letiantian.me
@github: https://github.com/someus/
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import networkx as nx
import numpy as np
from . import util
from .Segmentation import Segmentation
class TextRank4Sentence(object):
def __init__(self, stop_words_file = None,
allow_speech_tags = util.allow_speech_tags,
delimiters = util.sentence_delimiters):
"""
Keyword arguments:
stop_words_file -- str,停止词文件路径,若不是str则是使用默认停止词文件
delimiters -- 默认值是`?!;?!。;…\n`,用来将文本拆分为句子。
Object Var:
self.sentences -- 由句子组成的列表。
self.words_no_filter -- 对sentences中每个句子分词而得到的两级列表。
self.words_no_stop_words -- 去掉words_no_filter中的停止词而得到的两级列表。
self.words_all_filters -- 保留words_no_stop_words中指定词性的单词而得到的两级列表。
"""
self.seg = Segmentation(stop_words_file=stop_words_file,
allow_speech_tags=allow_speech_tags,
delimiters=delimiters)
self.sentences = None
self.words_no_filter = None # 2维列表
self.words_no_stop_words = None
self.words_all_filters = None
self.key_sentences = None
def analyze(self, text, lower = False,
source = 'no_stop_words',
sim_func = util.get_similarity,
pagerank_config = {'alpha': 0.85,}):
"""
Keyword arguments:
text -- 文本内容,字符串。
lower -- 是否将文本转换为小写。默认为False。
source -- 选择使用words_no_filter, words_no_stop_words, words_all_filters中的哪一个来生成句子之间的相似度。
默认值为`'all_filters'`,可选值为`'no_filter', 'no_stop_words', 'all_filters'`。
sim_func -- 指定计算句子相似度的函数。
"""
self.key_sentences = []
result = self.seg.segment(text=text, lower=lower)
self.sentences = result.sentences
self.words_no_filter = result.words_no_filter
self.words_no_stop_words = result.words_no_stop_words
self.words_all_filters = result.words_all_filters
options = ['no_filter', 'no_stop_words', 'all_filters']
if source in options:
_source = result['words_'+source]
else:
_source = result['words_no_stop_words']
self.key_sentences = util.sort_sentences(sentences = self.sentences,
words = _source,
sim_func = sim_func,
pagerank_config = pagerank_config)
def get_key_sentences(self, num = 6, sentence_min_len = 6):
"""获取最重要的num个长度大于等于sentence_min_len的句子用来生成摘要。
Return:
多个句子组成的列表。
"""
result = []
count = 0
for item in self.key_sentences:
if count >= num:
break
if len(item['sentence']) >= sentence_min_len:
result.append(item)
count += 1
return result
if __name__ == '__main__':
pass | {
"content_hash": "6c882773286b8e19ecc76419941f7aa9",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 109,
"avg_line_length": 35.58064516129032,
"alnum_prop": 0.5170746449078272,
"repo_name": "someus/TextRank4ZH",
"id": "6e14c2596d0571c66c41aa21f2dc6947b3880474",
"size": "3751",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "textrank4zh/TextRank4Sentence.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28341"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import re
import logging
from django.db import IntegrityError, transaction
from six import BytesIO
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.project import ProjectEndpoint, ProjectReleasePermission
from sentry.api.content_negotiation import ConditionalContentNegotiation
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.paginator import OffsetPaginator
from sentry.api.serializers import serialize
from sentry.api.endpoints.organization_release_files import load_dist
from sentry.constants import MAX_RELEASE_FILES_OFFSET
from sentry.models import File, Release, ReleaseFile
from sentry.utils.apidocs import scenario, attach_scenarios
ERR_FILE_EXISTS = "A file matching this name already exists for the given release"
_filename_re = re.compile(r"[\n\t\r\f\v\\]")
@scenario("UploadReleaseFile")
def upload_file_scenario(runner):
runner.request(
method="POST",
path="/projects/%s/%s/releases/%s/files/"
% (runner.org.slug, runner.default_project.slug, runner.default_release.version),
data={
"header": "Content-Type:text/plain; encoding=utf-8",
"name": "/demo/hello.py",
"file": ("hello.py", BytesIO(b'print "Hello World!"')),
},
format="multipart",
)
@scenario("ListReleaseFiles")
def list_files_scenario(runner):
runner.utils.create_release_file(
project=runner.default_project,
release=runner.default_release,
path="/demo/message-for-you.txt",
contents="Hello World!",
)
runner.request(
method="GET",
path="/projects/%s/%s/releases/%s/files/"
% (runner.org.slug, runner.default_project.slug, runner.default_release.version),
)
class ProjectReleaseFilesEndpoint(ProjectEndpoint):
doc_section = DocSection.RELEASES
content_negotiation_class = ConditionalContentNegotiation
permission_classes = (ProjectReleasePermission,)
@attach_scenarios([list_files_scenario])
def get(self, request, project, version):
"""
List a Project Release's Files
``````````````````````````````
Retrieve a list of files for a given release.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string project_slug: the slug of the project to list the
release files of.
:pparam string version: the version identifier of the release.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=project.organization_id, projects=project, version=version
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
file_list = (
ReleaseFile.objects.filter(release=release).select_related("file").order_by("name")
)
return self.paginate(
request=request,
queryset=file_list,
order_by="name",
paginator_cls=OffsetPaginator,
max_offset=MAX_RELEASE_FILES_OFFSET,
on_results=lambda r: serialize(load_dist(r), request.user),
)
@attach_scenarios([upload_file_scenario])
def post(self, request, project, version):
"""
Upload a New Project Release File
`````````````````````````````````
Upload a new file for the given release.
Unlike other API requests, files must be uploaded using the
traditional multipart/form-data content-type.
The optional 'name' attribute should reflect the absolute path
that this file will be referenced as. For example, in the case of
JavaScript you might specify the full web URI.
:pparam string organization_slug: the slug of the organization the
release belongs to.
:pparam string project_slug: the slug of the project to change the
release of.
:pparam string version: the version identifier of the release.
:param string name: the name (full path) of the file.
:param string dist: the name of the dist.
:param file file: the multipart encoded file.
:param string header: this parameter can be supplied multiple times
to attach headers to the file. Each header
is a string in the format ``key:value``. For
instance it can be used to define a content
type.
:auth: required
"""
try:
release = Release.objects.get(
organization_id=project.organization_id, projects=project, version=version
)
except Release.DoesNotExist:
raise ResourceDoesNotExist
logger = logging.getLogger("sentry.files")
logger.info("projectreleasefile.start")
if "file" not in request.data:
return Response({"detail": "Missing uploaded file"}, status=400)
fileobj = request.data["file"]
full_name = request.data.get("name", fileobj.name)
if not full_name or full_name == "file":
return Response({"detail": "File name must be specified"}, status=400)
name = full_name.rsplit("/", 1)[-1]
if _filename_re.search(name):
return Response(
{"detail": "File name must not contain special whitespace characters"}, status=400
)
dist_name = request.data.get("dist")
dist = None
if dist_name:
dist = release.add_dist(dist_name)
headers = {"Content-Type": fileobj.content_type}
for headerval in request.data.getlist("header") or ():
try:
k, v = headerval.split(":", 1)
except ValueError:
return Response({"detail": "header value was not formatted correctly"}, status=400)
else:
if _filename_re.search(v):
return Response(
{"detail": "header value must not contain special whitespace characters"},
status=400,
)
headers[k] = v.strip()
file = File.objects.create(name=name, type="release.file", headers=headers)
file.putfile(fileobj, logger=logger)
try:
with transaction.atomic():
releasefile = ReleaseFile.objects.create(
organization_id=release.organization_id,
release=release,
file=file,
name=full_name,
dist=dist,
)
except IntegrityError:
file.delete()
return Response({"detail": ERR_FILE_EXISTS}, status=409)
return Response(serialize(releasefile, request.user), status=201)
| {
"content_hash": "23928825b21b664a1387e8bff9d7f2f1",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 99,
"avg_line_length": 38.108108108108105,
"alnum_prop": 0.6009929078014185,
"repo_name": "mvaled/sentry",
"id": "2c5640ab24aa81f5bfe759d8fd88bd5b4079a407",
"size": "7050",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/project_release_files.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.codegen.thrift import thrift_parser
from pants.backend.codegen.thrift.target_types import ThriftSourceField
from pants.backend.codegen.thrift.thrift_parser import ParsedThrift, ParsedThriftRequest
from pants.engine.addresses import Address
from pants.testutil.rule_runner import QueryRule, RuleRunner
def parse(content: str) -> ParsedThrift:
rule_runner = RuleRunner(
rules=[*thrift_parser.rules(), QueryRule(ParsedThrift, [ParsedThriftRequest])]
)
rule_runner.write_files({"f.thrift": content})
return rule_runner.request(
ParsedThrift,
[ParsedThriftRequest(ThriftSourceField("f.thrift", Address("", target_name="t")))],
)
def test_parse_thrift_imports() -> None:
result = parse(
dedent(
"""\
include "double_quotes.thrift"
include 'single_quotes.thrift'
include 'mixed_quotes.thrift"
include\t"tab.thrift"\t
# Complex paths
include "path/to_dir/f.thrift"
include "path\\to_dir\\f.thrift"
include "âčĘï.thrift"
include "123.thrift"
# Invalid includes
include invalid.thrift
ilude "invalid.thrift"
include "invalid.trft"
"""
)
)
assert set(result.imports) == {
"double_quotes.thrift",
"single_quotes.thrift",
"mixed_quotes.thrift",
"tab.thrift",
"path/to_dir/f.thrift",
"path\\to_dir\\f.thrift",
"âčĘï.thrift",
"123.thrift",
}
assert not result.namespaces
@pytest.mark.parametrize("namespace", ["my_mod", "path.to.my_mod", "path.to.âčĘï"])
def test_namespaces_valid(namespace: str) -> None:
result = parse(
dedent(
f"""\
namespace py {namespace}
namespace java {namespace}
"""
)
)
assert not result.imports
assert dict(result.namespaces) == {"py": namespace, "java": namespace}
def test_namespaces_invalid() -> None:
result = parse(
dedent(
"""\
namspc py invalid
namespace âčĘï invalid
"""
)
)
assert not result.imports
assert not result.namespaces
| {
"content_hash": "61ba60588ab39f98719d4e9a4b683654",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 91,
"avg_line_length": 27.892857142857142,
"alnum_prop": 0.5953905249679897,
"repo_name": "benjyw/pants",
"id": "25e2b2b4d7b26b2b478b520d8a46baf1869ad8f1",
"size": "2491",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/codegen/thrift/thrift_parser_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "10690"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3595"
},
{
"name": "Python",
"bytes": "7135320"
},
{
"name": "Rust",
"bytes": "1601736"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31723"
},
{
"name": "Starlark",
"bytes": "72809"
}
],
"symlink_target": ""
} |
from messente.api.sms import api
from messente.api.sms.constants import VERSION
from messente.api.sms.messente import Messente
__all__ = ["api", "VERSION", "Messente"]
| {
"content_hash": "f18b472009fea18ead57d65e48e6516f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 46,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.7529411764705882,
"repo_name": "messente/messente-python",
"id": "6cf74495d13a31c74f334d23f6982b0912dd5018",
"size": "787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "messente/api/sms/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "252"
},
{
"name": "Python",
"bytes": "63470"
}
],
"symlink_target": ""
} |
import uuid
import json
import logging
import unittest
import bus
import server
import repository
import rangevoting
class SpyBus:
def __init__(self):
self.last_command = None
def execute(self, command):
self.last_command = command
return bus.Result()
class SpyQueryDispatcher:
def __init__(self, results=None):
self.last_query = None
self.results = results
def execute(self, query):
self.last_query = query
return self.results
class ServerTestCase(unittest.TestCase):
def setUp(self):
server.app.repository = repository.MockRepository()
server.app.config['TESTING'] = True
server.app.configure_handlers()
self.app = server.app.test_client()
root = logging.getLogger()
root.setLevel(logging.CRITICAL)
def test_server_register_handlers(self):
self.assertGreater(len(server.app.bus.handlers), 0)
def test_register_handler_has_repository(self):
handler = next(iter(server.app.bus.handlers.values()))
self.assertIsNotNone(handler.repository)
def test_create_rangevotes_respond_201_created(self):
response = self.app.post('/rangevotes',
data=json.dumps({'question': 'test question ?', 'choices': ['c1', 'c2', 'c3']}),
content_type='application/json')
self.assertEqual(201, response.status_code)
def test_create_bad_rangevotes_respond_400_bad_request(self):
response = self.app.post('/rangevotes',
data=json.dumps({'question': '', 'choices': []}),
content_type='application/json')
self.assertEqual(400, response.status_code)
def test_command_is_properly_created(self):
server.app.bus = SpyBus()
self.app.post('/rangevotes',
data=json.dumps({'question': 'test question ?', 'choices': ['c1', 'c2', 'c3']}),
content_type='application/json')
self.assertEqual(type(uuid.uuid4()), type(server.app.bus.last_command.uuid))
self.assertEqual('test question ?', server.app.bus.last_command.question)
self.assertEqual(['c1', 'c2', 'c3'], server.app.bus.last_command.choices)
def test_return_uuid_of_rangevote(self):
server.app.bus = SpyBus()
response = self.app.post('/rangevotes',
data=json.dumps({'question': 'test question ?', 'choices': ['c1', 'c2', 'c3']}),
content_type='application/json')
location = '/rangevotes/' + str(server.app.bus.last_command.uuid)
self.assertTrue(location in response.headers['Location'])
def test_get_rangevote(self):
server.app.query_dispatcher = SpyQueryDispatcher(rangevoting.RangeVote(uuid=1, question='q?', choices=['c1', 'c2']).serialize())
response = self.app.get('/rangevotes/375ce742-495f-4b0c-b831-3fb0dcc61b17', content_type='application/json')
self.assertEqual(200, response.status_code)
self.assertEqual('375ce742-495f-4b0c-b831-3fb0dcc61b17', server.app.query_dispatcher.last_query.uuid)
def test_get_rangevote_404(self):
response = self.app.get('/rangevotes/375ce742-495f-4b0c-b831-3fb0dcc61b17', content_type='application/json')
self.assertEqual(404, response.status_code)
def test_update_rangevote(self):
server.app.bus = SpyBus()
response = self.app.put('/rangevotes/375ce742-495f-4b0c-b831-3fb0dcc61b17',
data=json.dumps({'question': 'test question ?', 'choices': ['c1', 'c2', 'c3'], 'votes': []}),
content_type='application/json')
self.assertEqual(200, response.status_code)
self.assertEqual('375ce742-495f-4b0c-b831-3fb0dcc61b17', server.app.bus.last_command.uuid)
self.assertEqual('test question ?', server.app.bus.last_command.question)
self.assertListEqual(['c1', 'c2', 'c3'], server.app.bus.last_command.choices)
def test_create_vote(self):
server.app.bus = SpyBus()
response = self.app.post('/rangevotes/375ce742-495f-4b0c-b831-3fb0dcc61b17/votes',
data=json.dumps({'elector': 'Guillaume Vincent', 'opinions': {'a': 1, 'b': -1}}),
content_type='application/json')
self.assertEqual(201, response.status_code)
self.assertEqual('375ce742-495f-4b0c-b831-3fb0dcc61b17', server.app.bus.last_command.rangevote_id)
self.assertEqual('Guillaume Vincent', server.app.bus.last_command.elector)
self.assertDictEqual({'a': 1, 'b': -1}, server.app.bus.last_command.opinions)
def test_get_results(self):
server.app.query_dispatcher = SpyQueryDispatcher(
{'choices': ['c1', 'c2'], 'id': '1', 'randomized_choices': ['c2', 'c1'], 'votes': [], 'question': 'q?'}
)
response = self.app.get('/rangevotes/375ce742-495f-4b0c-b831-3fb0dcc61b17/results', content_type='application/json')
self.assertEqual('375ce742-495f-4b0c-b831-3fb0dcc61b17', server.app.query_dispatcher.last_query.uuid)
self.assertEqual(200, response.status_code)
def test_get_votes(self):
server.app.query_dispatcher = SpyQueryDispatcher([])
response = self.app.get('/rangevotes', content_type='application/json')
self.assertEqual(20, server.app.query_dispatcher.last_query.count)
self.assertEqual(200, response.status_code)
| {
"content_hash": "4a615bcc14345c850cd289cd67e0cb96",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 136,
"avg_line_length": 40.794117647058826,
"alnum_prop": 0.6250901225666907,
"repo_name": "guillaumevincent/rangevoting",
"id": "e8205d74861d98128a8555105d07277edbdefc9b",
"size": "5548",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12535"
},
{
"name": "HTML",
"bytes": "18111"
},
{
"name": "JavaScript",
"bytes": "19557"
},
{
"name": "Python",
"bytes": "38836"
}
],
"symlink_target": ""
} |
from Connector import CoreConnector
from m5.params import *
from m5.proxy import *
class X86Connector(CoreConnector):
type = 'X86Connector'
cxx_header = "mem/dtu/connector/x86.hh"
irq_master_port = MasterPort("Port to send the IRQs to")
| {
"content_hash": "b52488256c7be85feeb174705dcc7242",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 60,
"avg_line_length": 31.25,
"alnum_prop": 0.736,
"repo_name": "TUD-OS/gem5-dtu",
"id": "f57bbd3f11e0ec4d5841d9c044e96bcc7b136f6e",
"size": "1811",
"binary": false,
"copies": "1",
"ref": "refs/heads/dtu-mmu",
"path": "src/mem/dtu/connector/X86Connector.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "648342"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "C",
"bytes": "1717604"
},
{
"name": "C++",
"bytes": "35149040"
},
{
"name": "CMake",
"bytes": "79529"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Forth",
"bytes": "15790"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Java",
"bytes": "3179"
},
{
"name": "M4",
"bytes": "75007"
},
{
"name": "Makefile",
"bytes": "68265"
},
{
"name": "Objective-C",
"bytes": "24714"
},
{
"name": "Perl",
"bytes": "33696"
},
{
"name": "Python",
"bytes": "6073714"
},
{
"name": "Roff",
"bytes": "8783"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "14236"
},
{
"name": "Shell",
"bytes": "101649"
},
{
"name": "VBA",
"bytes": "2884"
},
{
"name": "Vim Script",
"bytes": "4335"
},
{
"name": "sed",
"bytes": "3927"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.utils.translation import ugettext_noop as _
from transurlvania.defaults import *
admin.autodiscover()
urlpatterns = lang_prefixed_patterns('garfield.views',
url(r'^$', 'home'),
url(r'^admin/', include(admin.site.urls)),
(r'^garfield/', include('garfield.urls')),
url(_(r'^about-us/$'), 'about_us', name='about_us'),
)
urlpatterns += patterns('transurlvania.views',
(r'^$', 'detect_language_and_redirect'),
)
| {
"content_hash": "9af49c026125ea5e7660a1c654871b84",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 56,
"avg_line_length": 26.77777777777778,
"alnum_prop": 0.6701244813278008,
"repo_name": "trapeze/transurlvania",
"id": "c143f1e512e62a50c3a4c52aa2300f55f97fccec",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "47954"
}
],
"symlink_target": ""
} |
import sys
import os
import unittest
import json
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
from testlib import get_fixture, random_string, function, random_int
from testlib import EapiConfigUnitTest
import pyeapi.api.interfaces
INTERFACES = ['Ethernet1', 'Ethernet1/1', 'Vlan1234', 'Management1',
'Port-Channel1', 'Vxlan1']
class TestFunctions(unittest.TestCase):
def test_isvalidinterface_returns_true(self):
func = pyeapi.api.interfaces.isvalidinterface
for intf in INTERFACES:
self.assertTrue(func(intf))
def test_isvalidinterface_returns_false(self):
func = pyeapi.api.interfaces.isvalidinterface
for intf in ['Et1', 'Ma1', 'Po1', 'Vl1', random_string()]:
self.assertFalse(func(intf))
def test_instance(self):
result = pyeapi.api.interfaces.instance(None)
self.assertIsInstance(result, pyeapi.api.interfaces.Interfaces)
class TestApiInterfaces(EapiConfigUnitTest):
def __init__(self, *args, **kwargs):
super(TestApiInterfaces, self).__init__(*args, **kwargs)
self.instance = pyeapi.api.interfaces.Interfaces(None)
self.config = open(get_fixture('running_config.text')).read()
def test_get_interface_generic(self):
for intf in ['Management1', 'Loopback0']:
result = self.instance.get(intf)
self.assertEqual(result['type'], 'generic')
def test_get_interface_ethernet(self):
result = self.instance.get('Ethernet1')
self.assertEqual(result['type'], 'ethernet')
def test_proxy_method_success(self):
result = self.instance.set_sflow('Ethernet1', True)
self.assertTrue(result)
def test_proxy_method_raises_attribute_error(self):
with self.assertRaises(AttributeError):
self.instance.set_sflow('Management1', True)
class TestApiBaseInterface(EapiConfigUnitTest):
def __init__(self, *args, **kwargs):
super(TestApiBaseInterface, self).__init__(*args, **kwargs)
self.instance = pyeapi.api.interfaces.BaseInterface(None)
self.config = open(get_fixture('running_config.text')).read()
def test_get(self):
result = self.instance.get('Loopback0')
values = dict(name='Loopback0', type='generic',
shutdown=False, description=None)
self.assertEqual(result, values)
def test_set_description_with_value(self):
for intf in INTERFACES:
value = random_string()
cmds = ['interface %s' % intf, 'description %s' % value]
func = function('set_description', intf, value)
self.eapi_positive_config_test(func, cmds)
def test_set_description_with_no_value(self):
for intf in INTERFACES:
cmds = ['interface %s' % intf, 'no description']
func = function('set_description', intf)
self.eapi_positive_config_test(func, cmds)
def test_set_description_with_default(self):
for intf in INTERFACES:
cmds = ['interface %s' % intf, 'default description']
func = function('set_description', intf, default=True)
self.eapi_positive_config_test(func, cmds)
def test_set_shutdown_with_value(self):
for intf in INTERFACES:
for value in [True, False]:
cmds = ['interface %s' % intf]
if value:
cmds.append('shutdown')
else:
cmds.append('no shutdown')
func = function('set_shutdown', intf, value)
self.eapi_positive_config_test(func, cmds)
def test_set_shutdown_with_no_value(self):
for intf in INTERFACES:
cmds = ['interface %s' % intf, 'no shutdown']
func = function('set_shutdown', intf)
self.eapi_positive_config_test(func, cmds)
def test_set_shutdown_with_default(self):
for intf in INTERFACES:
cmds = ['interface %s' % intf, 'default shutdown']
func = function('set_shutdown', intf, default=True)
self.eapi_positive_config_test(func, cmds)
def test_set_shutdown_invalid_value_raises_value_error(self):
for intf in INTERFACES:
func = function('set_shutdown', intf, random_string())
self.eapi_exception_config_test(func, ValueError)
class TestApiEthernetInterface(EapiConfigUnitTest):
INTERFACES = ['Ethernet1', 'Ethernet1/1']
def __init__(self, *args, **kwargs):
super(TestApiEthernetInterface, self).__init__(*args, **kwargs)
self.instance = pyeapi.api.interfaces.EthernetInterface(None)
self.config = open(get_fixture('running_config.text')).read()
def test_get(self):
result = self.instance.get('Ethernet1')
values = dict(name='Ethernet1', type='ethernet',
description=None, shutdown=False,
sflow=True, flowcontrol_send='off',
flowcontrol_receive='off')
self.assertEqual(values, result)
def test_instance_functions(self):
for intf in self.INTERFACES:
for name in ['create', 'delete', 'default']:
if name == 'create':
if intf[0:2] not in ['Et', 'Ma']:
cmds = 'interface %s' % intf
func = function(name, intf)
self.eapi_positive_config_test(func, cmds)
elif name == 'delete':
if intf[0:2] not in ['Et', 'Ma']:
cmds = 'no interface %s' % intf
func = function(name, intf)
self.eapi_positive_config_test(func, cmds)
elif name == 'default':
cmds = 'default interface %s' % intf
func = function(name, intf)
self.eapi_positive_config_test(func, cmds)
def test_set_flowcontrol_with_value(self):
for intf in self.INTERFACES:
for direction in ['send', 'receive']:
for value in ['on', 'off']:
cmds = ['interface %s' % intf,
'flowcontrol %s %s' % (direction, value)]
func = function('set_flowcontrol', intf, direction, value)
self.eapi_positive_config_test(func, cmds)
def test_set_flowcontrol_with_invalid_direction_raises_value_error(self):
for intf in self.INTERFACES:
func = function('set_flowcontrol', intf, 'invalid', None)
self.eapi_exception_config_test(func, ValueError)
def test_set_flowcontrol_with_invalid_value_raises_value_error(self):
for intf in self.INTERFACES:
for direction in ['send', 'receive']:
func = function('set_flowcontrol', intf, direction, 'invalid')
self.eapi_exception_config_test(func, ValueError)
def test_set_flowcontrol_with_no_value(self):
for intf in self.INTERFACES:
for direction in ['send', 'receive']:
cmds = ['interface %s' % intf, 'no flowcontrol %s' % direction]
func = function('set_flowcontrol', intf, direction)
self.eapi_positive_config_test(func, cmds)
def test_set_flowcontrol_with_default(self):
for intf in self.INTERFACES:
for direction in ['send', 'receive']:
cmds = ['interface %s' % intf,
'default flowcontrol %s' % direction]
func = function('set_flowcontrol', intf, direction,
default=True)
self.eapi_positive_config_test(func, cmds)
def test_set_sflow_with_value(self):
for intf in self.INTERFACES:
for value in [True, False]:
cmds = ['interface %s' % intf]
if value:
cmds.append('sflow enable')
else:
cmds.append('no sflow enable')
func = function('set_sflow', intf, value)
self.eapi_positive_config_test(func, cmds)
def test_set_sflow_with_no_value(self):
for intf in INTERFACES:
cmds = ['interface %s' % intf, 'no sflow enable']
func = function('set_sflow', intf)
self.eapi_positive_config_test(func, cmds)
def test_set_sflow_with_default(self):
for intf in INTERFACES:
cmds = ['interface %s' % intf, 'default sflow']
func = function('set_sflow', intf, default=True)
self.eapi_positive_config_test(func, cmds)
def test_set_shutdown_invalid_value_raises_value_error(self):
for intf in INTERFACES:
func = function('set_sflow', intf, random_string())
self.eapi_exception_config_test(func, ValueError)
class TestApiPortchannelInterface(EapiConfigUnitTest):
def __init__(self, *args, **kwargs):
super(TestApiPortchannelInterface, self).__init__(*args, **kwargs)
self.instance = pyeapi.api.interfaces.PortchannelInterface(None)
self.config = open(get_fixture('running_config.portchannel')).read()
def setUp(self):
super(TestApiPortchannelInterface, self).setUp()
response = open(get_fixture('show_portchannel.json'))
self.node.enable.return_value = json.load(response)
response.close()
def test_get(self):
result = self.instance.get('Port-Channel1')
values = dict(name='Port-Channel1', type='portchannel',
description=None, shutdown=False,
lacp_mode='on', minimum_links=0,
members=['Ethernet5', 'Ethernet6'])
self.assertEqual(values, result)
def test_set_minimum_links_with_value(self):
minlinks = random_int(0, 16)
cmds = ['interface Port-Channel1',
'port-channel min-links %s' % minlinks]
func = function('set_minimum_links', 'Port-Channel1', minlinks)
self.eapi_positive_config_test(func, cmds)
def test_set_minimum_links_with_no_value(self):
cmds = ['interface Port-Channel1', 'no port-channel min-links']
func = function('set_minimum_links', 'Port-Channel1')
self.eapi_positive_config_test(func, cmds)
def test_set_minimum_links_with_default(self):
cmds = ['interface Port-Channel1', 'default port-channel min-links']
func = function('set_minimum_links', 'Port-Channel1', default=True)
self.eapi_positive_config_test(func, cmds)
def test_get_lacp_mode(self):
result = self.instance.get_lacp_mode('Port-Channel1')
self.assertEqual(result, 'on')
def test_get_members(self):
result = self.instance.get_members('Port-Channel1')
self.assertEqual(result, ['Ethernet5', 'Ethernet6'])
def test_set_members(self):
cmds = ['interface Ethernet6', 'no channel-group 1',
'interface Ethernet7', 'channel-group 1 mode on']
func = function('set_members', 'Port-Channel1',
['Ethernet5', 'Ethernet7'])
self.eapi_positive_config_test(func, cmds)
def test_set_members_no_changes(self):
func = function('set_members', 'Port-Channel1',
['Ethernet5', 'Ethernet6'])
self.eapi_positive_config_test(func)
def test_set_lacp_mode(self):
cmds = ['interface Ethernet5', 'no channel-group 1',
'interface Ethernet6', 'no channel-group 1',
'interface Ethernet5', 'channel-group 1 mode active',
'interface Ethernet6', 'channel-group 1 mode active']
func = function('set_lacp_mode', 'Port-Channel1', 'active')
self.eapi_positive_config_test(func, cmds)
def test_set_lacp_mode_invalid_mode(self):
func = function('set_lacp_mode', 'Port-Channel1', random_string())
self.eapi_negative_config_test(func)
class TestApiVxlanInterface(EapiConfigUnitTest):
def __init__(self, *args, **kwargs):
super(TestApiVxlanInterface, self).__init__(*args, **kwargs)
self.instance = pyeapi.api.interfaces.VxlanInterface(None)
self.config = open(get_fixture('running_config.vxlan')).read()
def test_get(self):
keys = ['name', 'type', 'description', 'shutdown', 'source_interface',
'multicast_group', 'udp_port', 'vlans', 'flood_list']
result = self.instance.get('Vxlan1')
self.assertEqual(sorted(keys), sorted(result.keys()))
def test_set_source_interface_with_value(self):
cmds = ['interface Vxlan1', 'vxlan source-interface Loopback0']
func = function('set_source_interface', 'Vxlan1', 'Loopback0')
self.eapi_positive_config_test(func, cmds)
def test_set_source_interface_with_no_value(self):
cmds = ['interface Vxlan1', 'no vxlan source-interface']
func = function('set_source_interface', 'Vxlan1')
self.eapi_positive_config_test(func, cmds)
def test_set_source_interface_with_default(self):
cmds = ['interface Vxlan1', 'default vxlan source-interface']
func = function('set_source_interface', 'Vxlan1', default=True)
self.eapi_positive_config_test(func, cmds)
def test_set_multicast_group_with_value(self):
cmds = ['interface Vxlan1', 'vxlan multicast-group 239.10.10.10']
func = function('set_multicast_group', 'Vxlan1', '239.10.10.10')
self.eapi_positive_config_test(func, cmds)
def test_set_multicast_group_with_no_value(self):
cmds = ['interface Vxlan1', 'no vxlan multicast-group']
func = function('set_multicast_group', 'Vxlan1')
self.eapi_positive_config_test(func, cmds)
def test_set_multicast_group_with_default(self):
cmds = ['interface Vxlan1', 'default vxlan multicast-group']
func = function('set_multicast_group', 'Vxlan1', default=True)
self.eapi_positive_config_test(func, cmds)
def test_set_udp_port_with_value(self):
cmds = ['interface Vxlan1', 'vxlan udp-port 1024']
func = function('set_udp_port', 'Vxlan1', '1024')
self.eapi_positive_config_test(func, cmds)
def test_set_udp_port_with_no_value(self):
cmds = ['interface Vxlan1', 'no vxlan udp-port']
func = function('set_udp_port', 'Vxlan1')
self.eapi_positive_config_test(func, cmds)
def test_set_udp_port_with_default(self):
cmds = ['interface Vxlan1', 'default vxlan udp-port']
func = function('set_udp_port', 'Vxlan1', default=True)
self.eapi_positive_config_test(func, cmds)
def test_update_vlan(self):
cmds = ['interface Vxlan1', 'vxlan vlan 10 vni 10']
func = function('update_vlan', 'Vxlan1', 10, 10)
self.eapi_positive_config_test(func, cmds)
def test_remove_vlan(self):
cmds = ['interface Vxlan1', 'no vxlan vlan 10 vni']
func = function('remove_vlan', 'Vxlan1', 10)
self.eapi_positive_config_test(func, cmds)
def test_add_vtep(self):
cmds = ['interface Vxlan1', 'vxlan flood vtep add 1.1.1.1']
func = function('add_vtep', 'Vxlan1', '1.1.1.1')
self.eapi_positive_config_test(func, cmds)
def test_add_vtep_to_vlan(self):
cmds = ['interface Vxlan1', 'vxlan vlan 10 flood vtep add 1.1.1.1']
func = function('add_vtep', 'Vxlan1', '1.1.1.1', vlan='10')
self.eapi_positive_config_test(func, cmds)
def test_remove_vtep(self):
cmds = ['interface Vxlan1', 'vxlan flood vtep remove 1.1.1.1']
func = function('remove_vtep', 'Vxlan1', '1.1.1.1')
self.eapi_positive_config_test(func, cmds)
def test_remove_vtep_from_vlan(self):
cmds = ['interface Vxlan1', 'vxlan vlan 10 flood vtep remove 1.1.1.1']
func = function('remove_vtep', 'Vxlan1', '1.1.1.1', vlan='10')
self.eapi_positive_config_test(func, cmds)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0f48bc00a118d31650efdd94efe217ab",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 79,
"avg_line_length": 41.6710182767624,
"alnum_prop": 0.6025062656641604,
"repo_name": "SivagnanamCiena/pyeapi",
"id": "e7d8cbe3b9183c11bf2432e2901614dc63c7bd93",
"size": "17490",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "test/unit/test_api_interfaces.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1521"
},
{
"name": "Python",
"bytes": "363606"
}
],
"symlink_target": ""
} |
"""
Policy engine for neutron. Largely copied from nova.
"""
import itertools
import re
from oslo.config import cfg
from neutron.api.v2 import attributes
from neutron.common import exceptions
import neutron.common.utils as utils
from neutron import manager
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import policy
LOG = logging.getLogger(__name__)
_POLICY_PATH = None
_POLICY_CACHE = {}
ADMIN_CTX_POLICY = 'context_is_admin'
# Maps deprecated 'extension' policies to new-style policies
DEPRECATED_POLICY_MAP = {
'extension:provider_network':
['network:provider:network_type',
'network:provider:physical_network',
'network:provider:segmentation_id'],
'extension:router':
['network:router:external'],
'extension:port_binding':
['port:binding:vif_type', 'port:binding:capabilities',
'port:binding:profile', 'port:binding:host_id']
}
DEPRECATED_ACTION_MAP = {
'view': ['get'],
'set': ['create', 'update']
}
cfg.CONF.import_opt('policy_file', 'neutron.common.config')
def reset():
global _POLICY_PATH
global _POLICY_CACHE
_POLICY_PATH = None
_POLICY_CACHE = {}
policy.reset()
def init():
global _POLICY_PATH
global _POLICY_CACHE
if not _POLICY_PATH:
_POLICY_PATH = utils.find_config_file({}, cfg.CONF.policy_file)
if not _POLICY_PATH:
raise exceptions.PolicyFileNotFound(path=cfg.CONF.policy_file)
# pass _set_brain to read_cached_file so that the policy brain
# is reset only if the file has changed
utils.read_cached_file(_POLICY_PATH, _POLICY_CACHE,
reload_func=_set_rules)
def get_resource_and_action(action):
"""Extract resource and action (write, read) from api operation."""
data = action.split(':', 1)[0].split('_', 1)
return ("%ss" % data[-1], data[0] != 'get')
def _set_rules(data):
default_rule = 'default'
LOG.debug(_("Loading policies from file: %s"), _POLICY_PATH)
# Ensure backward compatibility with folsom/grizzly convention
# for extension rules
policies = policy.Rules.load_json(data, default_rule)
for pol in policies.keys():
if any([pol.startswith(depr_pol) for depr_pol in
DEPRECATED_POLICY_MAP.keys()]):
LOG.warn(_("Found deprecated policy rule:%s. Please consider "
"upgrading your policy configuration file"), pol)
pol_name, action = pol.rsplit(':', 1)
try:
new_actions = DEPRECATED_ACTION_MAP[action]
new_policies = DEPRECATED_POLICY_MAP[pol_name]
# bind new actions and policies together
for actual_policy in ['_'.join(item) for item in
itertools.product(new_actions,
new_policies)]:
if actual_policy not in policies:
# New policy, same rule
LOG.info(_("Inserting policy:%(new_policy)s in place "
"of deprecated policy:%(old_policy)s"),
{'new_policy': actual_policy,
'old_policy': pol})
policies[actual_policy] = policies[pol]
# Remove old-style policy
del policies[pol]
except KeyError:
LOG.error(_("Backward compatibility unavailable for "
"deprecated policy %s. The policy will "
"not be enforced"), pol)
policy.set_rules(policies)
def _is_attribute_explicitly_set(attribute_name, resource, target):
"""Verify that an attribute is present and has a non-default value."""
return ('default' in resource[attribute_name] and
attribute_name in target and
target[attribute_name] is not attributes.ATTR_NOT_SPECIFIED and
target[attribute_name] != resource[attribute_name]['default'])
def _build_subattr_match_rule(attr_name, attr, action, target):
"""Create the rule to match for sub-attribute policy checks."""
# TODO(salv-orlando): Instead of relying on validator info, introduce
# typing for API attributes
# Expect a dict as type descriptor
validate = attr['validate']
key = filter(lambda k: k.startswith('type:dict'), validate.keys())
if not key:
LOG.warn(_("Unable to find data type descriptor for attribute %s"),
attr_name)
return
data = validate[key[0]]
if not isinstance(data, dict):
LOG.debug(_("Attribute type descriptor is not a dict. Unable to "
"generate any sub-attr policy rule for %s."),
attr_name)
return
sub_attr_rules = [policy.RuleCheck('rule', '%s:%s:%s' %
(action, attr_name,
sub_attr_name)) for
sub_attr_name in data if sub_attr_name in
target[attr_name]]
return policy.AndCheck(sub_attr_rules)
def _build_match_rule(action, target):
"""Create the rule to match for a given action.
The policy rule to be matched is built in the following way:
1) add entries for matching permission on objects
2) add an entry for the specific action (e.g.: create_network)
3) add an entry for attributes of a resource for which the action
is being executed (e.g.: create_network:shared)
4) add an entry for sub-attributes of a resource for which the
action is being executed
(e.g.: create_router:external_gateway_info:network_id)
"""
match_rule = policy.RuleCheck('rule', action)
resource, is_write = get_resource_and_action(action)
# Attribute-based checks shall not be enforced on GETs
if is_write:
# assigning to variable with short name for improving readability
res_map = attributes.RESOURCE_ATTRIBUTE_MAP
if resource in res_map:
for attribute_name in res_map[resource]:
if _is_attribute_explicitly_set(attribute_name,
res_map[resource],
target):
attribute = res_map[resource][attribute_name]
if 'enforce_policy' in attribute:
attr_rule = policy.RuleCheck('rule', '%s:%s' %
(action, attribute_name))
# Build match entries for sub-attributes, if present
validate = attribute.get('validate')
if (validate and any([k.startswith('type:dict') and v
for (k, v) in
validate.iteritems()])):
attr_rule = policy.AndCheck(
[attr_rule, _build_subattr_match_rule(
attribute_name, attribute,
action, target)])
match_rule = policy.AndCheck([match_rule, attr_rule])
return match_rule
# This check is registered as 'tenant_id' so that it can override
# GenericCheck which was used for validating parent resource ownership.
# This will prevent us from having to handling backward compatibility
# for policy.json
# TODO(salv-orlando): Reinstate GenericCheck for simple tenant_id checks
@policy.register('tenant_id')
class OwnerCheck(policy.Check):
"""Resource ownership check.
This check verifies the owner of the current resource, or of another
resource referenced by the one under analysis.
In the former case it falls back to a regular GenericCheck, whereas
in the latter case it leverages the plugin to load the referenced
resource and perform the check.
"""
def __init__(self, kind, match):
# Process the match
try:
self.target_field = re.findall('^\%\((.*)\)s$',
match)[0]
except IndexError:
err_reason = (_("Unable to identify a target field from:%s."
"match should be in the form %%(<field_name>)s") %
match)
LOG.exception(err_reason)
raise exceptions.PolicyInitError(
policy="%s:%s" % (kind, match),
reason=err_reason)
super(OwnerCheck, self).__init__(kind, match)
def __call__(self, target, creds):
if self.target_field not in target:
# policy needs a plugin check
# target field is in the form resource:field
# however if they're not separated by a colon, use an underscore
# as a separator for backward compatibility
def do_split(separator):
parent_res, parent_field = self.target_field.split(
separator, 1)
return parent_res, parent_field
for separator in (':', '_'):
try:
parent_res, parent_field = do_split(separator)
break
except ValueError:
LOG.debug(_("Unable to find ':' as separator in %s."),
self.target_field)
else:
# If we are here split failed with both separators
err_reason = (_("Unable to find resource name in %s") %
self.target_field)
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
parent_foreign_key = attributes.RESOURCE_FOREIGN_KEYS.get(
"%ss" % parent_res, None)
if not parent_foreign_key:
err_reason = (_("Unable to verify match:%(match)s as the "
"parent resource: %(res)s was not found") %
{'match': self.match, 'res': parent_res})
LOG.exception(err_reason)
raise exceptions.PolicyCheckError(
policy="%s:%s" % (self.kind, self.match),
reason=err_reason)
# NOTE(salv-orlando): This check currently assumes the parent
# resource is handled by the core plugin. It might be worth
# having a way to map resources to plugins so to make this
# check more general
f = getattr(manager.NeutronManager.get_instance().plugin,
'get_%s' % parent_res)
# f *must* exist, if not found it is better to let neutron
# explode. Check will be performed with admin context
context = importutils.import_module('neutron.context')
try:
data = f(context.get_admin_context(),
target[parent_foreign_key],
fields=[parent_field])
target[self.target_field] = data[parent_field]
except Exception:
LOG.exception(_('Policy check error while calling %s!'), f)
raise
match = self.match % target
if self.kind in creds:
return match == unicode(creds[self.kind])
return False
@policy.register('field')
class FieldCheck(policy.Check):
def __init__(self, kind, match):
# Process the match
resource, field_value = match.split(':', 1)
field, value = field_value.split('=', 1)
super(FieldCheck, self).__init__(kind, '%s:%s:%s' %
(resource, field, value))
# Value might need conversion - we need help from the attribute map
try:
attr = attributes.RESOURCE_ATTRIBUTE_MAP[resource][field]
conv_func = attr['convert_to']
except KeyError:
conv_func = lambda x: x
self.field = field
self.value = conv_func(value)
def __call__(self, target_dict, cred_dict):
target_value = target_dict.get(self.field)
# target_value might be a boolean, explicitly compare with None
if target_value is None:
LOG.debug(_("Unable to find requested field: %(field)s in "
"target: %(target_dict)s"),
{'field': self.field,
'target_dict': target_dict})
return False
return target_value == self.value
def _prepare_check(context, action, target):
"""Prepare rule, target, and credentials for the policy engine."""
init()
# Compare with None to distinguish case in which target is {}
if target is None:
target = {}
match_rule = _build_match_rule(action, target)
credentials = context.to_dict()
return match_rule, target, credentials
def check(context, action, target, plugin=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:return: Returns True if access is permitted else False.
"""
return policy.check(*(_prepare_check(context, action, target)))
def check_if_exists(context, action, target):
"""Verify if the action can be authorized, and raise if it is unknown.
Check whether the action can be performed on the target within this
context, and raise a PolicyRuleNotFound exception if the action is
not defined in the policy engine.
"""
# TODO(salvatore-orlando): Consider modifying oslo policy engine in
# order to allow to raise distinct exception when check fails and
# when policy is missing
# Raise if there's no match for requested action in the policy engine
if not policy._rules or action not in policy._rules:
raise exceptions.PolicyRuleNotFound(rule=action)
return policy.check(*(_prepare_check(context, action, target)))
def enforce(context, action, target, plugin=None):
"""Verifies that the action is valid on the target in this context.
:param context: neutron context
:param action: string representing the action to be checked
this should be colon separated for clarity.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``
:param plugin: currently unused and deprecated.
Kept for backward compatibility.
:raises neutron.exceptions.PolicyNotAllowed: if verification fails.
"""
init()
rule, target, credentials = _prepare_check(context, action, target)
return policy.check(rule, target, credentials,
exc=exceptions.PolicyNotAuthorized, action=action)
def check_is_admin(context):
"""Verify context has admin rights according to policy settings."""
init()
# the target is user-self
credentials = context.to_dict()
target = credentials
# Backward compatibility: if ADMIN_CTX_POLICY is not
# found, default to validating role:admin
admin_policy = (ADMIN_CTX_POLICY in policy._rules
and ADMIN_CTX_POLICY or 'role:admin')
return policy.check(admin_policy, target, credentials)
def _extract_roles(rule, roles):
if isinstance(rule, policy.RoleCheck):
roles.append(rule.match.lower())
elif isinstance(rule, policy.RuleCheck):
_extract_roles(policy._rules[rule.match], roles)
elif hasattr(rule, 'rules'):
for rule in rule.rules:
_extract_roles(rule, roles)
def get_admin_roles():
"""Return a list of roles which are granted admin rights according
to policy settings.
"""
# NOTE(salvatore-orlando): This function provides a solution for
# populating implicit contexts with the appropriate roles so that
# they correctly pass policy checks, and will become superseded
# once all explicit policy checks are removed from db logic and
# plugin modules. For backward compatibility it returns the literal
# admin if ADMIN_CTX_POLICY is not defined
init()
if not policy._rules or ADMIN_CTX_POLICY not in policy._rules:
return ['admin']
try:
admin_ctx_rule = policy._rules[ADMIN_CTX_POLICY]
except (KeyError, TypeError):
return
roles = []
_extract_roles(admin_ctx_rule, roles)
return roles
| {
"content_hash": "c9209a19aba731fd3c99f1d675c539b4",
"timestamp": "",
"source": "github",
"line_count": 407,
"max_line_length": 78,
"avg_line_length": 41.75921375921376,
"alnum_prop": 0.5951988703224288,
"repo_name": "citrix-openstack-build/neutron",
"id": "a129813a2139dad48e17633ce25e791c51effe5c",
"size": "17682",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Python",
"bytes": "6817315"
},
{
"name": "Shell",
"bytes": "8983"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class ForgottenPasswordInformation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'forgotten_password_answer1': 'str',
'forgotten_password_answer2': 'str',
'forgotten_password_answer3': 'str',
'forgotten_password_answer4': 'str',
'forgotten_password_question1': 'str',
'forgotten_password_question2': 'str',
'forgotten_password_question3': 'str',
'forgotten_password_question4': 'str'
}
attribute_map = {
'forgotten_password_answer1': 'forgottenPasswordAnswer1',
'forgotten_password_answer2': 'forgottenPasswordAnswer2',
'forgotten_password_answer3': 'forgottenPasswordAnswer3',
'forgotten_password_answer4': 'forgottenPasswordAnswer4',
'forgotten_password_question1': 'forgottenPasswordQuestion1',
'forgotten_password_question2': 'forgottenPasswordQuestion2',
'forgotten_password_question3': 'forgottenPasswordQuestion3',
'forgotten_password_question4': 'forgottenPasswordQuestion4'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""ForgottenPasswordInformation - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._forgotten_password_answer1 = None
self._forgotten_password_answer2 = None
self._forgotten_password_answer3 = None
self._forgotten_password_answer4 = None
self._forgotten_password_question1 = None
self._forgotten_password_question2 = None
self._forgotten_password_question3 = None
self._forgotten_password_question4 = None
self.discriminator = None
setattr(self, "_{}".format('forgotten_password_answer1'), kwargs.get('forgotten_password_answer1', None))
setattr(self, "_{}".format('forgotten_password_answer2'), kwargs.get('forgotten_password_answer2', None))
setattr(self, "_{}".format('forgotten_password_answer3'), kwargs.get('forgotten_password_answer3', None))
setattr(self, "_{}".format('forgotten_password_answer4'), kwargs.get('forgotten_password_answer4', None))
setattr(self, "_{}".format('forgotten_password_question1'), kwargs.get('forgotten_password_question1', None))
setattr(self, "_{}".format('forgotten_password_question2'), kwargs.get('forgotten_password_question2', None))
setattr(self, "_{}".format('forgotten_password_question3'), kwargs.get('forgotten_password_question3', None))
setattr(self, "_{}".format('forgotten_password_question4'), kwargs.get('forgotten_password_question4', None))
@property
def forgotten_password_answer1(self):
"""Gets the forgotten_password_answer1 of this ForgottenPasswordInformation. # noqa: E501
The answer to the first forgotten password challenge question. # noqa: E501
:return: The forgotten_password_answer1 of this ForgottenPasswordInformation. # noqa: E501
:rtype: str
"""
return self._forgotten_password_answer1
@forgotten_password_answer1.setter
def forgotten_password_answer1(self, forgotten_password_answer1):
"""Sets the forgotten_password_answer1 of this ForgottenPasswordInformation.
The answer to the first forgotten password challenge question. # noqa: E501
:param forgotten_password_answer1: The forgotten_password_answer1 of this ForgottenPasswordInformation. # noqa: E501
:type: str
"""
self._forgotten_password_answer1 = forgotten_password_answer1
@property
def forgotten_password_answer2(self):
"""Gets the forgotten_password_answer2 of this ForgottenPasswordInformation. # noqa: E501
The answer to the second forgotten password challenge question. # noqa: E501
:return: The forgotten_password_answer2 of this ForgottenPasswordInformation. # noqa: E501
:rtype: str
"""
return self._forgotten_password_answer2
@forgotten_password_answer2.setter
def forgotten_password_answer2(self, forgotten_password_answer2):
"""Sets the forgotten_password_answer2 of this ForgottenPasswordInformation.
The answer to the second forgotten password challenge question. # noqa: E501
:param forgotten_password_answer2: The forgotten_password_answer2 of this ForgottenPasswordInformation. # noqa: E501
:type: str
"""
self._forgotten_password_answer2 = forgotten_password_answer2
@property
def forgotten_password_answer3(self):
"""Gets the forgotten_password_answer3 of this ForgottenPasswordInformation. # noqa: E501
The answer to the third forgotten password challenge question. # noqa: E501
:return: The forgotten_password_answer3 of this ForgottenPasswordInformation. # noqa: E501
:rtype: str
"""
return self._forgotten_password_answer3
@forgotten_password_answer3.setter
def forgotten_password_answer3(self, forgotten_password_answer3):
"""Sets the forgotten_password_answer3 of this ForgottenPasswordInformation.
The answer to the third forgotten password challenge question. # noqa: E501
:param forgotten_password_answer3: The forgotten_password_answer3 of this ForgottenPasswordInformation. # noqa: E501
:type: str
"""
self._forgotten_password_answer3 = forgotten_password_answer3
@property
def forgotten_password_answer4(self):
"""Gets the forgotten_password_answer4 of this ForgottenPasswordInformation. # noqa: E501
The answer to the fourth forgotten password challenge question. # noqa: E501
:return: The forgotten_password_answer4 of this ForgottenPasswordInformation. # noqa: E501
:rtype: str
"""
return self._forgotten_password_answer4
@forgotten_password_answer4.setter
def forgotten_password_answer4(self, forgotten_password_answer4):
"""Sets the forgotten_password_answer4 of this ForgottenPasswordInformation.
The answer to the fourth forgotten password challenge question. # noqa: E501
:param forgotten_password_answer4: The forgotten_password_answer4 of this ForgottenPasswordInformation. # noqa: E501
:type: str
"""
self._forgotten_password_answer4 = forgotten_password_answer4
@property
def forgotten_password_question1(self):
"""Gets the forgotten_password_question1 of this ForgottenPasswordInformation. # noqa: E501
The first challenge question presented to a user who has forgotten their password. # noqa: E501
:return: The forgotten_password_question1 of this ForgottenPasswordInformation. # noqa: E501
:rtype: str
"""
return self._forgotten_password_question1
@forgotten_password_question1.setter
def forgotten_password_question1(self, forgotten_password_question1):
"""Sets the forgotten_password_question1 of this ForgottenPasswordInformation.
The first challenge question presented to a user who has forgotten their password. # noqa: E501
:param forgotten_password_question1: The forgotten_password_question1 of this ForgottenPasswordInformation. # noqa: E501
:type: str
"""
self._forgotten_password_question1 = forgotten_password_question1
@property
def forgotten_password_question2(self):
"""Gets the forgotten_password_question2 of this ForgottenPasswordInformation. # noqa: E501
The second challenge question presented to a user who has forgotten their password. # noqa: E501
:return: The forgotten_password_question2 of this ForgottenPasswordInformation. # noqa: E501
:rtype: str
"""
return self._forgotten_password_question2
@forgotten_password_question2.setter
def forgotten_password_question2(self, forgotten_password_question2):
"""Sets the forgotten_password_question2 of this ForgottenPasswordInformation.
The second challenge question presented to a user who has forgotten their password. # noqa: E501
:param forgotten_password_question2: The forgotten_password_question2 of this ForgottenPasswordInformation. # noqa: E501
:type: str
"""
self._forgotten_password_question2 = forgotten_password_question2
@property
def forgotten_password_question3(self):
"""Gets the forgotten_password_question3 of this ForgottenPasswordInformation. # noqa: E501
The third challenge question presented to a user who has forgotten their password. # noqa: E501
:return: The forgotten_password_question3 of this ForgottenPasswordInformation. # noqa: E501
:rtype: str
"""
return self._forgotten_password_question3
@forgotten_password_question3.setter
def forgotten_password_question3(self, forgotten_password_question3):
"""Sets the forgotten_password_question3 of this ForgottenPasswordInformation.
The third challenge question presented to a user who has forgotten their password. # noqa: E501
:param forgotten_password_question3: The forgotten_password_question3 of this ForgottenPasswordInformation. # noqa: E501
:type: str
"""
self._forgotten_password_question3 = forgotten_password_question3
@property
def forgotten_password_question4(self):
"""Gets the forgotten_password_question4 of this ForgottenPasswordInformation. # noqa: E501
The fourth challenge question presented to a user who has forgotten their password. # noqa: E501
:return: The forgotten_password_question4 of this ForgottenPasswordInformation. # noqa: E501
:rtype: str
"""
return self._forgotten_password_question4
@forgotten_password_question4.setter
def forgotten_password_question4(self, forgotten_password_question4):
"""Sets the forgotten_password_question4 of this ForgottenPasswordInformation.
The fourth challenge question presented to a user who has forgotten their password. # noqa: E501
:param forgotten_password_question4: The forgotten_password_question4 of this ForgottenPasswordInformation. # noqa: E501
:type: str
"""
self._forgotten_password_question4 = forgotten_password_question4
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ForgottenPasswordInformation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ForgottenPasswordInformation):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ForgottenPasswordInformation):
return True
return self.to_dict() != other.to_dict()
| {
"content_hash": "2c9b63b17a33370e69eab897ca15072d",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 140,
"avg_line_length": 41.266881028938904,
"alnum_prop": 0.6749259778712794,
"repo_name": "docusign/docusign-python-client",
"id": "f3b6a25abe6c7f2f0788baac964f78f2ad5d0d72",
"size": "12851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docusign_esign/models/forgotten_password_information.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9687716"
}
],
"symlink_target": ""
} |
"""
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from nipype.interfaces.base import CommandLineInputSpec, CommandLine, traits, TraitedSpec, File
from nipype.utils.filemanip import split_filename
import os, os.path as op
class Tracks2ProbInputSpec(CommandLineInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-2,
desc='tract file')
template_file = File(exists=True, argstr='-template %s', position=1,
desc='an image file to be used as a template for the output (the output image wil have the same transform and field of view)')
voxel_dims = traits.List(traits.Float, argstr='-vox %s', sep=',', position=2, minlen=3, maxlen=3,
desc='Three comma-separated numbers giving the size of each voxel in mm.')
colour = traits.Bool(argstr='-colour', position=3, desc="add colour to the output image according to the direction of the tracks.")
fraction = traits.Bool(argstr='-fraction', position=3, desc="produce an image of the fraction of fibres through each voxel (as a proportion of the total number in the file), rather than the count.")
output_datatype = traits.Enum("nii", "float", "char", "short", "int", "long", "double", argstr='-datatype %s', position=2,
desc='"i.e. Bfloat". Can be "char", "short", "int", "long", "float" or "double"') #, usedefault=True)
resample = traits.Float(argstr='-resample %d', position=3,
units='mm', desc='resample the tracks at regular intervals using Hermite interpolation. If omitted, the program will select an appropriate interpolation factor automatically.')
out_filename = File(genfile=True, argstr='%s', position= -1, desc='output data file')
class Tracks2ProbOutputSpec(TraitedSpec):
tract_image = File(exists=True, desc='Output tract count or track density image')
class Tracks2Prob(CommandLine):
"""
Convert a tract file into a map of the fraction of tracks to enter
each voxel - also known as a tract density image (TDI) - in MRtrix's
image format (.mif). This can be viewed using MRview or converted to
Nifti using MRconvert.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> tdi = mrt.Tracks2Prob()
>>> tdi.inputs.in_file = 'dwi_CSD_tracked.tck'
>>> tdi.inputs.colour = True
>>> tdi.run() # doctest: +SKIP
"""
_cmd = 'tracks2prob'
input_spec=Tracks2ProbInputSpec
output_spec=Tracks2ProbOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['tract_image'] = op.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name is 'out_filename':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + '_TDI.mif'
class StreamlineTrackInputSpec(CommandLineInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-2, desc='the image containing the source data.' \
'The type of data required depends on the type of tracking as set in the preceeding argument. For DT methods, ' \
'the base DWI are needed. For SD methods, the SH harmonic coefficients of the FOD are needed.')
seed_file = File(exists=True, argstr='-seed %s', mandatory=False, position=2, desc='seed file')
seed_spec = traits.List(traits.Int, desc='seed specification in voxels and radius (x y z r)', position=2,
argstr='-seed %s', minlen=4, maxlen=4, sep=',', units='voxels')
include_file = File(exists=True, argstr='-include %s', mandatory=False, position=2, desc='inclusion file')
include_spec = traits.List(traits.Int, desc='inclusion specification in voxels and radius (x y z r)', position=2,
argstr='-seed %s', minlen=4, maxlen=4, sep=',', units='voxels')
exclude_file = File(exists=True, argstr='-exclude %s', mandatory=False, position=2, desc='exclusion file')
exclude_spec = traits.List(traits.Int, desc='exclusion specification in voxels and radius (x y z r)', position=2,
argstr='-seed %s', minlen=4, maxlen=4, sep=',', units='voxels')
mask_file = File(exists=True, argstr='-exclude %s', mandatory=False, position=2, desc='mask file. Only tracks within mask.')
mask_spec = traits.List(traits.Int, desc='Mask specification in voxels and radius (x y z r). Tracks will be terminated when they leave the ROI.', position=2,
argstr='-seed %s', minlen=4, maxlen=4, sep=',', units='voxels')
inputmodel = traits.Enum('DT_STREAM', 'SD_PROB', 'SD_STREAM',
argstr='%s', desc='input model type', usedefault=True, position=-3)
stop = traits.Bool(argstr='-gzip', desc="stop track as soon as it enters any of the include regions.")
do_not_precompute = traits.Bool(argstr='-noprecomputed', desc="Turns off precomputation of the legendre polynomial values. Warning: this will slow down the algorithm by a factor of approximately 4.")
unidirectional = traits.Bool(argstr='-unidirectional', desc="Track from the seed point in one direction only (default is to track in both directions).")
no_mask_interpolation = traits.Bool(argstr='-nomaskinterp', desc="Turns off trilinear interpolation of mask images.")
step_size = traits.Float(argstr='-step %s', units='mm',
desc="Set the step size of the algorithm in mm (default is 0.2).")
minimum_radius_of_curvature = traits.Float(argstr='-curvature %s', units='mm',
desc="Set the minimum radius of curvature (default is 2 mm for DT_STREAM, 0 for SD_STREAM, 1 mm for SD_PROB and DT_PROB)")
desired_number_of_tracks = traits.Int(argstr='-number %d', desc='Sets the desired number of tracks.' \
'The program will continue to generate tracks until this number of tracks have been selected and written to the output file' \
'(default is 100 for *_STREAM methods, 1000 for *_PROB methods).')
maximum_number_of_tracks = traits.Int(argstr='-maxnum %d', desc='Sets the maximum number of tracks to generate.' \
"The program will not generate more tracks than this number, even if the desired number of tracks hasn't yet been reached" \
'(default is 100 x number).')
minimum_tract_length = traits.Float(argstr='-minlength %s', units='mm',
desc="Sets the minimum length of any track in millimeters (default is 10 mm).")
maximum_tract_length = traits.Float(argstr='-length %s', units='mm',
desc="Sets the maximum length of any track in millimeters (default is 200 mm).")
cutoff_value = traits.Float(argstr='-cutoff %s', units='NA',
desc="Set the FA or FOD amplitude cutoff for terminating tracks (default is 0.1).")
initial_cutoff_value = traits.Float(argstr='-initcutoff %s', units='NA',
desc="Sets the minimum FA or FOD amplitude for initiating tracks (default is twice the normal cutoff).")
initial_direction = traits.List(traits.Int, desc='Specify the initial tracking direction as a vector',
argstr='-initdirection %s', minlen=2, maxlen=2, units='voxels')
out_file = File(argstr='%s', position= -1, genfile=True, desc='output data file')
class StreamlineTrackOutputSpec(TraitedSpec):
tracked = File(exists=True, desc='output file containing reconstructed tracts')
class StreamlineTrack(CommandLine):
"""
Performs tractography using one of the following models:
'dt_prob', 'dt_stream', 'sd_prob', 'sd_stream',
Where 'dt' stands for diffusion tensor, 'sd' stands for spherical
deconvolution, and 'prob' stands for probabilistic.
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> strack = mrt.StreamlineTrack()
>>> strack.inputs.inputmodel = 'SD_PROB'
>>> strack.inputs.in_file = 'data.Bfloat'
>>> strack.inputs.seed_file = 'seed_mask.nii'
>>> strack.run() # doctest: +SKIP
"""
_cmd = 'streamtrack'
input_spec = StreamlineTrackInputSpec
output_spec = StreamlineTrackOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['tracked'] = op.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name is 'out_file':
return self._gen_outfilename()
else:
return None
def _gen_outfilename(self):
_, name , _ = split_filename(self.inputs.in_file)
return name + '_tracked'
class DiffusionTensorStreamlineTrackInputSpec(StreamlineTrackInputSpec):
gradient_encoding_file = File(exists=True, argstr='-grad %s', mandatory=True, position=-2,
desc='Gradient encoding, supplied as a 4xN text file with each line is in the format [ X Y Z b ], where [ X Y Z ] describe the direction of the applied gradient, and b gives the b-value in units (1000 s/mm^2). See FSL2MRTrix')
class DiffusionTensorStreamlineTrack(StreamlineTrack):
"""
Specialized interface to StreamlineTrack. This interface is used for
streamline tracking from diffusion tensor data, and calls the MRtrix
function 'streamtrack' with the option 'DT_STREAM'
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> dtstrack = mrt.DiffusionTensorStreamlineTrack()
>>> dtstrack.inputs.in_file = 'data.Bfloat'
>>> dtstrack.inputs.seed_file = 'seed_mask.nii'
>>> dtstrack.run() # doctest: +SKIP
"""
input_spec = DiffusionTensorStreamlineTrackInputSpec
def __init__(self, command=None, **inputs):
inputs["inputmodel"] = "DT_STREAM"
return super(DiffusionTensorStreamlineTrack, self).__init__(command, **inputs)
class ProbabilisticSphericallyDeconvolutedStreamlineTrackInputSpec(StreamlineTrackInputSpec):
maximum_number_of_trials = traits.Int(argstr='-trials %s', units='mm',
desc="Set the maximum number of sampling trials at each point (only used for probabilistic tracking).")
class ProbabilisticSphericallyDeconvolutedStreamlineTrack(StreamlineTrack):
"""
Performs probabilistic tracking using spherically deconvolved data
Specialized interface to StreamlineTrack. This interface is used for
probabilistic tracking from spherically deconvolved data, and calls
the MRtrix function 'streamtrack' with the option 'SD_PROB'
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> sdprobtrack = mrt.ProbabilisticSphericallyDeconvolutedStreamlineTrack()
>>> sdprobtrack.inputs.in_file = 'data.Bfloat'
>>> sdprobtrack.inputs.seed_file = 'seed_mask.nii'
>>> sdprobtrack.run() # doctest: +SKIP
"""
input_spec = ProbabilisticSphericallyDeconvolutedStreamlineTrackInputSpec
def __init__(self, command=None, **inputs):
inputs["inputmodel"] = "SD_PROB"
return super(ProbabilisticSphericallyDeconvolutedStreamlineTrack, self).__init__(command, **inputs)
class SphericallyDeconvolutedStreamlineTrack(StreamlineTrack):
"""
Performs streamline tracking using spherically deconvolved data
Specialized interface to StreamlineTrack. This interface is used for
streamline tracking from spherically deconvolved data, and calls
the MRtrix function 'streamtrack' with the option 'SD_STREAM'
Example
-------
>>> import nipype.interfaces.mrtrix as mrt
>>> sdtrack = mrt.SphericallyDeconvolutedStreamlineTrack()
>>> sdtrack.inputs.in_file = 'data.Bfloat'
>>> sdtrack.inputs.seed_file = 'seed_mask.nii'
>>> sdtrack.run() # doctest: +SKIP
"""
input_spec = StreamlineTrackInputSpec
def __init__(self, command=None, **inputs):
inputs["inputmodel"] = "SD_STREAM"
return super(SphericallyDeconvolutedStreamlineTrack, self).__init__(command, **inputs)
| {
"content_hash": "f19a425856e5d7b654459944aa22b390",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 230,
"avg_line_length": 52.32900432900433,
"alnum_prop": 0.6780277961614825,
"repo_name": "christianbrodbeck/nipype",
"id": "580cdaae1aeed6fd0cd4dab01c1b2abf03ff0d19",
"size": "12202",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nipype/interfaces/mrtrix/tracking.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Matlab",
"bytes": "282"
},
{
"name": "Objective-C",
"bytes": "4736"
},
{
"name": "Python",
"bytes": "2537426"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
import paho.mqtt.client as mqtt
import ssl
ROOT_CA = "<PATH_TO_ROOT_CERT>"
CERTIFICATE = "<PATH_TO_YOUR_CERT>"
PRIVATE_KEY = "<PATH_TO_PRIVATE_KEY>"
AWS_IOT_TOPIC = "<YOUR_IOT_TOPIC>"
AWS_IOT_ENDPOINT = "<YOUR_IOT_ENDPOINT>"
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
client.subscribe(AWS_IOT_TOPIC)
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
client = mqtt.Client()
client.on_connect = on_connect
client.tls_set(ROOT_CA,
CERTIFICATE,
PRIVATE_KEY,
tls_version=ssl.PROTOCOL_TLSv1_2)
client.on_message = on_message
client.connect(AWS_IOT_ENDPOINT, 8883, 10)
client.loop_forever()
| {
"content_hash": "0b398da62ba5cbe8e2199a30b0aa4899",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 45,
"avg_line_length": 24.428571428571427,
"alnum_prop": 0.7090643274853801,
"repo_name": "awslabs/simplerobotservice",
"id": "33312659c48ca2f5fd883c4371b74d885b18dffd",
"size": "707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "device/src/test_subscriber.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4136"
},
{
"name": "HTML",
"bytes": "4757"
},
{
"name": "JavaScript",
"bytes": "21552"
},
{
"name": "Python",
"bytes": "8820"
},
{
"name": "Shell",
"bytes": "726"
}
],
"symlink_target": ""
} |
"""
gh_lists.py MILESTONE
Functions for Github API requests.
"""
from __future__ import print_function, division, absolute_import
import os
import re
import sys
import json
import collections
import argparse
from urllib2 import urlopen
Issue = collections.namedtuple('Issue', ('id', 'title', 'url'))
def main():
p = argparse.ArgumentParser(usage=__doc__.lstrip())
p.add_argument('--project', default='holgern/pyedflib')
p.add_argument('milestone')
args = p.parse_args()
getter = CachedGet('gh_cache.json')
try:
milestones = get_milestones(getter, args.project)
if args.milestone not in milestones:
msg = "Milestone {0} not available. Available milestones: {1}"
msg = msg.format(args.milestone, u", ".join(sorted(milestones)))
p.error(msg)
issues = get_issues(getter, args.project, args.milestone)
issues.sort()
finally:
getter.save()
prs = [x for x in issues if u'/pull/' in x.url]
issues = [x for x in issues if x not in prs]
def print_list(title, items):
print()
print(title)
print("-"*len(title))
print()
for issue in items:
msg = u"- `#{0} <{1}>`__: {2}"
title = re.sub(u"\s+", u" ", issue.title.strip())
if len(title) > 60:
remainder = re.sub(u"\s.*$", u"...", title[60:])
if len(remainder) > 20:
remainder = title[:80] + u"..."
else:
title = title[:60] + remainder
msg = msg.format(issue.id, issue.url, title)
print(msg)
print()
msg = u"Issues closed for {0}".format(args.milestone)
print_list(msg, issues)
msg = u"Pull requests for {0}".format(args.milestone)
print_list(msg, prs)
return 0
def get_milestones(getter, project):
url = "https://api.github.com/repos/{project}/milestones".format(project=project)
raw_data, info = getter.get(url)
data = json.loads(raw_data)
milestones = {}
for ms in data:
milestones[ms[u'title']] = ms[u'number']
return milestones
def get_issues(getter, project, milestone):
milestones = get_milestones(getter, project)
mid = milestones[milestone]
url = "https://api.github.com/repos/{project}/issues?milestone={mid}&state=closed&sort=created&direction=asc"
url = url.format(project=project, mid=mid)
raw_datas = []
while True:
raw_data, info = getter.get(url)
raw_datas.append(raw_data)
if 'link' not in info:
break
m = re.search('<(.*?)>; rel="next"', info['link'])
if m:
url = m.group(1)
continue
break
issues = []
for raw_data in raw_datas:
data = json.loads(raw_data)
for issue_data in data:
issues.append(Issue(issue_data[u'number'],
issue_data[u'title'],
issue_data[u'html_url']))
return issues
class CachedGet(object):
def __init__(self, filename):
self.filename = filename
if os.path.isfile(filename):
print("[gh_lists] using {0} as cache (remove it if you want fresh data)".format(filename),
file=sys.stderr)
with open(filename, 'rb') as f:
self.cache = json.load(f)
else:
self.cache = {}
def get(self, url):
url = unicode(url)
if url not in self.cache:
print("[gh_lists] get:", url, file=sys.stderr)
req = urlopen(url)
if req.getcode() != 200:
raise RuntimeError()
data = req.read()
info = dict(req.info())
self.cache[url] = (data, info)
req.close()
else:
print("[gh_lists] get (cached):", url, file=sys.stderr)
return self.cache[url]
def save(self):
tmp = self.filename + ".new"
with open(tmp, 'wb') as f:
json.dump(self.cache, f)
os.rename(tmp, self.filename)
if __name__ == "__main__":
sys.exit(main())
| {
"content_hash": "47eabb0c5692184264d2d732f3cee7d1",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 113,
"avg_line_length": 28.613793103448277,
"alnum_prop": 0.5473608098336948,
"repo_name": "holgern/pyyawt",
"id": "1a4ef35c706acb82c5c6b8c3a11ce6c999eb7248",
"size": "4196",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "util/gh_lists.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2447"
},
{
"name": "C",
"bytes": "634296"
},
{
"name": "PowerShell",
"bytes": "3310"
},
{
"name": "Python",
"bytes": "268418"
}
],
"symlink_target": ""
} |
import os
import sys
import RTIMU
import schedule
import GeneralSettings
from Utility.abstract_process import processAbstract
import time
class imuHandler(processAbstract):
#SETTINGS_FILE = GeneralSettings.IMU_SETTINGS_FILE
def __init__(self, antenna_data):
processAbstract.__init__(self)
self.antenna_data = antenna_data
self.ready = False
self.first = True
def job(self):
""" Abstract """
raise
def process(self):
print("Using settings file " + self.SETTINGS_FILE + ".ini")
#print("Settings file does not exist, will be created")
# if not os.path.exists(self.SETTINGS_FILE + ".ini"):
s = RTIMU.Settings(self.SETTINGS_FILE)
self.imu = RTIMU.RTIMU(s)
if (not self.imu.IMUInit()):
print("IMU Init Failed", self.SETTINGS_FILE)
sys.exit(1)
else:
print("IMU Init Succeeded")
self.ready = True
# initialising fusion parameters
self.imu.setSlerpPower(0.02)
self.imu.setGyroEnable(True)
self.imu.setAccelEnable(True)
self.imu.setCompassEnable(True)
self.poll_interval = self.imu.IMUGetPollInterval()
while self.kill_pill.empty():
self.job()
time.sleep(10 / 1000)
| {
"content_hash": "e925537210ff483f638e637c27bb6654",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 67,
"avg_line_length": 28.695652173913043,
"alnum_prop": 0.6098484848484849,
"repo_name": "Dronolab/antenna-tracking",
"id": "815f0ccb3127d24cbf98ae82ab925cb70c580cf9",
"size": "1320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Sensors/imuAbstract.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59079"
}
],
"symlink_target": ""
} |
import numpy as np
import six
from six.moves import range
from ..core.utils import make_optional_arg_into_id_array
from .structured_quad import links as squad_links
def neighbor_active_link_at_cell(grid, inds, *args):
"""neighbor_active_link_at_cell(grid, link_ids [, cell_ids])
Return an array of the active link ids for neighbors of *cell_id* cells.
*link_ids* is an index into the links of a cell as measured
clockwise starting from the south.
If *cell_ids* is not given, return neighbors for all cells in the grid.
Parameters
----------
grid : RasterModelGrid
Source grid.
link_inds : array_like
IDs of links
cell_ids : array_like, optional
IDs of cells for which to get links
"""
cell_ids = make_optional_arg_into_id_array(grid.number_of_cells, *args)
node_ids = grid.node_at_cell[cell_ids]
links = grid._active_links_at_node(node_ids).T
if not isinstance(inds, np.ndarray):
inds = np.array(inds)
return links[range(len(cell_ids)), inds]
def neighbor_node_at_cell(grid, inds, *args):
""" node_id_of_cell_neighbor(grid, neighbor_ids [, cell_ids])
Return an array of the node ids for neighbors of *cell_id* cells.
*neighbor_ids* is an index into the neighbors of a cell as measured
clockwise starting from the south.
If *cell_ids* is not given, return neighbors for all cells in the grid.
Parameters
----------
grid : RasterModelGrid
Input grid.
neighbor_ids : array_like
IDs of the neighbor nodes.
cell_ids : array_like, optional
IDs of cell about which to get neighbors.
Returns
-------
ndarray
Node IDs for given neighbors of cells.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.grid.raster_funcs import neighbor_node_at_cell
>>> grid = RasterModelGrid(4, 5, 1.0)
>>> neighbor_node_at_cell(grid, 0, 0)
array([1])
Get the lower and the the upper neighbors for all the cells.
>>> neighbor_node_at_cell(grid, 0)
array([1, 2, 3, 6, 7, 8])
>>> neighbor_node_at_cell(grid, 2)
array([11, 12, 13, 16, 17, 18])
As an alternative to the above, use fancy-indexing to get both sets of
neighbors with one call.
>>> neighbor_node_at_cell(grid, np.array([0, 2]), [1, 4])
array([[ 2, 12],
[ 7, 17]])
"""
cell_ids = make_optional_arg_into_id_array(grid.number_of_cells, *args)
node_ids = grid.node_at_cell[cell_ids]
neighbors = grid.active_neighbors_at_node[node_ids]
if not isinstance(inds, np.ndarray):
inds = np.array(inds)
# return neighbors[range(len(cell_ids)), 3 - inds]
return (
np.take(np.take(neighbors, range(len(cell_ids)), axis=0),
3 - inds, axis=1))
def corner_node_at_cell(grid, inds, *args):
"""node_id_of_cell_corner(grid, corner_ids [, cell_ids])
Return an array of the node ids for diagonal neighbors of *cell_id* cells.
*corner_ids* is an index into the corners of a cell as measured
clockwise starting from the southeast.
If *cell_ids* is not given, return neighbors for all cells in the grid.
Parameters
----------
grid : RasterModelGrid
Input grid.
corner_ids : array_like
IDs of the corner nodes.
cell_ids : array_like, optional
IDs of cell about which to get corners
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.grid.raster_funcs import corner_node_at_cell
>>> grid = RasterModelGrid(4, 5, 1.0)
>>> corner_node_at_cell(grid, 0, 0)
array([2])
Get the lower-right and the the upper-left corners for all the cells.
>>> corner_node_at_cell(grid, 0)
array([2, 3, 4, 7, 8, 9])
>>> corner_node_at_cell(grid, 2)
array([10, 11, 12, 15, 16, 17])
As an alternative to the above, use fancy-indexing to get both sets of
corners with one call.
>>> corner_node_at_cell(grid, np.array([0, 2]), [1, 4])
array([[ 3, 11],
[ 8, 16]])
"""
cell_ids = make_optional_arg_into_id_array(grid.number_of_cells, *args)
node_ids = grid.node_at_cell[cell_ids]
diagonals = grid._get_diagonal_list(node_ids)
if not isinstance(inds, np.ndarray):
inds = np.array(inds)
return (
np.take(np.take(diagonals, range(len(cell_ids)), axis=0),
3 - inds, axis=1))
# return diagonals[range(len(cell_ids)), 3 - inds]
def calculate_flux_divergence_at_nodes(grid, active_link_flux, out=None):
"""Net flux into or out of nodes.
Same as calculate_flux_divergence_at_core_cells, but works with and
returns a list of net unit fluxes that corresponds to all nodes, rather
than just core nodes.
Parameters
----------
grid : RasterModelGrid
Input grid.
active_link_flux : array_like
Flux values at links.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See Also
--------
calculate_flux_divergence_at_active_cells
Notes
-----
Note that we DO compute net unit fluxes at boundary nodes (even though
these don't have active cells associated with them, and often don't have
cells of any kind, because they are on the perimeter). It's up to the
user to decide what to do with these boundary values.
Examples
--------
Calculate the gradient of values at a grid's nodes.
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 5), spacing=1.0)
>>> u = np.array([0., 1., 2., 3., 0.,
... 1., 2., 3., 2., 3.,
... 0., 1., 2., 1., 2.,
... 0., 0., 2., 2., 0.])
>>> grad = rmg.calc_grad_at_link(u)[rmg.active_links]
>>> grad # doctest: +NORMALIZE_WHITESPACE
array([ 1., 1., -1.,
1., 1., -1., 1.,
-1., -1., -1.,
1., 1., -1., 1.,
-1., 0., 1.])
Calculate the divergence of the gradients at each node.
>>> flux = - grad # downhill flux proportional to gradient
>>> rmg.calculate_flux_divergence_at_nodes(flux)
... # doctest: +NORMALIZE_WHITESPACE
array([ 0., -1., -1., 1., 0.,
-1., 2., 4., -2., 1.,
-1., 0., 1., -4., 1.,
0., -1., 0., 1., 0.])
If calculate_gradients_at_nodes is called inside a loop, you can
improve speed by creating an array outside the loop. For example, do
this once, before the loop:
>>> df = rmg.zeros(centering='node') # outside loop
>>> rmg.number_of_nodes
20
Then do this inside the loop so that the function will not have to create
the df array but instead puts values into the *df* array.
>>> df = rmg.calculate_flux_divergence_at_nodes(flux, out=df)
>>> grid = RasterModelGrid((4, 5), spacing=(1, 2))
>>> u = np.array([0., 1., 2., 3., 0.,
... 1., 2., 3., 2., 3.,
... 0., 1., 2., 1., 2.,
... 0., 0., 2., 2., 0.])
>>> grad = grid.calc_grad_at_link(2 * u)[grid.active_links]
>>> grad # doctest: +NORMALIZE_WHITESPACE
array([ 2., 2., -2.,
1., 1., -1., 1.,
-2., -2., -2.,
1., 1., -1., 1.,
-2., 0., 2.])
>>> grid.calculate_flux_divergence_at_nodes(- grad)
... # doctest: +NORMALIZE_WHITESPACE
array([ 0., -1., -1., 1., 0.,
-1., 2., 4., -2., 1.,
-1., 0., 1., -4., 1.,
0., -1., 0., 1., 0.])
"""
assert len(active_link_flux) == grid.number_of_active_links, \
"incorrect length of active_link_flux array"
# If needed, create net_unit_flux array
if out is None:
out = grid.empty(centering='node')
out.fill(0.)
net_unit_flux = out
assert len(net_unit_flux) == grid.number_of_nodes
is_vert_link = squad_links.is_vertical_link(grid.shape, grid.active_links)
vert_links = grid.active_links[is_vert_link]
horiz_links = grid.active_links[~ is_vert_link]
flux = np.zeros(grid.number_of_links + 1)
flux[vert_links] = active_link_flux[is_vert_link] * grid.dy
flux[horiz_links] = active_link_flux[~ is_vert_link] * grid.dx
net_unit_flux[:] = (
(flux[grid._node_active_outlink_matrix2[0][:]] +
flux[grid._node_active_outlink_matrix2[1][:]]) -
(flux[grid._node_active_inlink_matrix2[0][:]] +
flux[grid._node_active_inlink_matrix2[1][:]])) / grid.cellarea
return net_unit_flux
def calculate_slope_aspect_bfp(xs, ys, zs):
"""Calculate slope and aspect.
.. codeauthor:: Katy Barnhart <katherine.barnhart@colorado.edu>
Fits a plane to the given N points with given *xs*, *ys*, and *zs* values
using single value decomposition.
Returns a tuple of (*slope*, *aspect*) based on the normal vector to the
best fit plane.
.. note::
This function does not check if the points fall on a line, rather
than a plane.
"""
if not len(xs) == len(ys) == len(zs):
raise ValueError('array must be the same length')
# step 1: subtract the centroid from the points
# step 2: create a 3XN matrix of the points for SVD
# in python, the unit normal to the best fit plane is
# given by the third column of the U matrix.
mat = np.vstack((xs - np.mean(xs), ys - np.mean(ys), zs - np.mean(zs)))
U, _, _ = np.linalg.svd(mat)
normal = U[:, 2]
# step 3: calculate the aspect
asp = 90.0 - np.degrees(np.arctan2(normal[1], normal[0]))
asp = asp % 360.0
# step 4: calculate the slope
slp = 90.0 - np.degrees(np.arcsin(normal[2]))
return slp, asp
def find_nearest_node(rmg, coords, mode='raise'):
"""Find the node nearest a point.
Find the index to the node nearest the given x, y coordinates.
Coordinates are provided as numpy arrays in the *coords* tuple.
*coords* is tuple of coordinates, one for each dimension.
The *mode* keyword to indicate what to do if a point is outside of the
grid. Valid choices are the same as that used with the numpy function
`ravel_multi_index`.
A point is considered to be outside of the grid if it is outside the
perimeter of the grid by one half of the grid spacing.
Parameters
----------
rmg : RasterModelGrid
The source grid.
coords : tuple
Coordinates of point as (x, y)
mode : {'raise', 'wrap', 'clip'}, optional
Action to take if a point is outside of the grid.
Returns
-------
array_like :
Indices of the nodes nearest the given coordinates.
Examples
--------
Create a grid of 4 by 5 nodes with unit spacing.
>>> import landlab
>>> from landlab.grid.raster_funcs import find_nearest_node
>>> rmg = landlab.RasterModelGrid(4, 5)
The points can be either a tuple of scalars or of arrays.
>>> find_nearest_node(rmg, (0.2, 0.6))
5
>>> find_nearest_node(rmg, (np.array([1.6, 3.6]), np.array([2.3, .7])))
array([12, 9])
The *mode* keyword indicates what to do if a point is outside of the
grid.
>>> find_nearest_node(rmg, (-0.6, 0.6), mode='raise')
Traceback (most recent call last):
...
ValueError: invalid entry in coordinates array
>>> find_nearest_node(rmg, (-0.6, 0.6), mode='clip')
5
>>> find_nearest_node(rmg, (-0.6, 0.6), mode='wrap')
9
"""
if isinstance(coords[0], np.ndarray):
return _find_nearest_node_ndarray(rmg, coords, mode=mode)
else:
return find_nearest_node(
rmg, (np.array(coords[0]), np.array(coords[1])), mode=mode)
def _find_nearest_node_ndarray(rmg, coords, mode='raise'):
"""Find the node nearest to a point.
Parameters
----------
rmg : RasterModelGrid
A RasterModelGrid.
coords : tuple of float
Coordinates of test points as *x*, then *y*.
mode : {'raise', 'wrap', 'clip'}, optional
What to do with out-of-bounds indices (as with
numpy.ravel_multi_index).
Returns
-------
ndarray
Nodes that are closest to the points.
Examples
--------
>>> from landlab.grid.raster_funcs import _find_nearest_node_ndarray
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> grid = RasterModelGrid((4, 5))
>>> _find_nearest_node_ndarray(grid, (.25, 1.25))
5
>>> _find_nearest_node_ndarray(grid, (.75, 2.25))
11
>>> grid = RasterModelGrid((4, 5), spacing=(3, 4))
>>> _find_nearest_node_ndarray(grid, (3.1, 4.1))
6
"""
column_indices = np.int_(
np.around((coords[0] - rmg.node_x[0]) / rmg.dx))
row_indices = np.int_(
np.around((coords[1] - rmg.node_y[0]) / rmg.dy))
return rmg.grid_coords_to_node_id(row_indices, column_indices, mode=mode)
def _value_is_in_bounds(value, bounds):
"""Check if a value is within bounds.
Parameters
----------
value : float or ndarray
The test value.
bounds : (lower, upper)
The lower and upper bounds.
Returns
-------
bool
``True`` if the value is within the bounds. Otherwise, ``False``.
Examples
--------
>>> from landlab.grid.raster_funcs import _value_is_in_bounds
>>> import numpy as np
>>> _value_is_in_bounds(.5, (0, 1))
True
>>> _value_is_in_bounds(1, (0, 1))
False
>>> _value_is_in_bounds(0, (0, 1))
True
>>> _value_is_in_bounds(np.array((0, 1)), (0, 1))
array([ True, False], dtype=bool)
"""
dummy = value >= bounds[0]
dummy &= value < bounds[1]
return dummy
def _value_is_within_axis_bounds(rmg, value, axis):
"""Check if a value is within the bounds of a grid axis.
Parameters
----------
rmg : RasterModelGrid
A RasterModelGrid.
value : float
The test value.
axis : int
The axis.
Returns
-------
bool
``True`` if the value is within the axis bounds. Otherwise, ``False``.
Examples
--------
>>> from landlab.grid.raster_funcs import _value_is_within_axis_bounds
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((4, 5))
>>> _value_is_within_axis_bounds(rmg, 3.1, 0)
False
>>> _value_is_within_axis_bounds(rmg, 2.9, 0)
True
>>> _value_is_within_axis_bounds(rmg, 4.1, 1)
False
>>> _value_is_within_axis_bounds(rmg, 3.9, 1)
True
"""
axis_coord = rmg.node_axis_coordinates(axis)
return _value_is_in_bounds(value, (axis_coord[0], axis_coord[-1]))
def is_coord_on_grid(rmg, coords, axes=(0, 1)):
"""Check if coordinates are contained on a grid.
Parameters
----------
rmg : RasterModelGrid
Source grid.
coords : tuple
Coordinates of point as (x, y)
axes : tuple, optional
Check bounds only on a particular axis
Examples
--------
Create a grid that ranges from x=0 to x=4, and y=0 to y=3.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.raster_funcs import is_coord_on_grid
>>> grid = RasterModelGrid(4, 5)
>>> is_coord_on_grid(grid, (3.999, 2.999))
True
Check two points with one call. Numpy broadcasting rules apply for the
point coordinates.
>>> is_coord_on_grid(grid, ([3.9, 4.1], 2.9))
array([ True, False], dtype=bool)
>>> is_coord_on_grid(grid, ([3.9, 4.1], 2.9), axes=(0, ))
array([ True, True], dtype=bool)
"""
coords = np.broadcast_arrays(*coords)
is_in_bounds = _value_is_within_axis_bounds(rmg, coords[1 - axes[0]],
axes[0])
for axis in axes[1:]:
is_in_bounds &= _value_is_within_axis_bounds(rmg, coords[1 - axis],
axis)
return is_in_bounds
| {
"content_hash": "45ccba05dc900dc606c5262395bc825b",
"timestamp": "",
"source": "github",
"line_count": 513,
"max_line_length": 78,
"avg_line_length": 31.019493177387915,
"alnum_prop": 0.5888895871300195,
"repo_name": "csherwood-usgs/landlab",
"id": "00f9066fb39a57da260a08039df05018796b7b51",
"size": "15913",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "landlab/grid/raster_funcs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1359"
},
{
"name": "PowerShell",
"bytes": "6112"
},
{
"name": "Python",
"bytes": "2844194"
},
{
"name": "Shell",
"bytes": "2773"
}
],
"symlink_target": ""
} |
import string
def ispangram(sentence, alphabet=string.ascii_lowercase):
alphaset = set(alphabet)
return alphaset <= set(sentence.lower())
| {
"content_hash": "3656527f04e6d054ac49486ce10c21ff",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 57,
"avg_line_length": 29.4,
"alnum_prop": 0.7414965986394558,
"repo_name": "serge-sans-paille/pythran",
"id": "cb5aa5a632136c34347c1b46d569b657259a791d",
"size": "358",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pythran/tests/rosetta/pangram.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "2074873"
},
{
"name": "Cython",
"bytes": "1701"
},
{
"name": "Jupyter Notebook",
"bytes": "27461"
},
{
"name": "Makefile",
"bytes": "1162"
},
{
"name": "Python",
"bytes": "2025760"
}
],
"symlink_target": ""
} |
"""
keychain
==============
"""
from setuptools import setup, find_packages
setup(
name='keychain',
version='0.14.2.0',
url='https://github.com/blockstack/keychain-manager-py',
license='MIT',
author='Blockstack Developers',
author_email='support@blockstack.com',
description="""Library for BIP32 hierarchical deterministic keychains / wallets.""",
keywords='bitcoin blockchain bip32 HD hierarchical deterministic keychain wallet',
packages=find_packages(),
zip_safe=False,
install_requires=[
'bitmerchant>=0.1.8',
'keylib>=0.1.0',
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet',
'Topic :: Security :: Cryptography',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| {
"content_hash": "ccef062f5fbbc8a0e39e47d78c737a6d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 88,
"avg_line_length": 28.939393939393938,
"alnum_prop": 0.6230366492146597,
"repo_name": "blockstack/keychain-manager-py",
"id": "81d84a44a69d96d7594d4086d2f0516c1410bf3e",
"size": "977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16052"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from cms.admin.forms import PageUserForm, PageUserGroupForm
from cms.admin.permissionadmin import GenericCmsPermissionAdmin
from cms.exceptions import NoPermissionsException
from cms.models import PageUser, PageUserGroup
from cms.utils.compat.dj import get_user_model
from cms.utils.compat.forms import UserAdmin
from cms.utils.conf import get_cms_setting
from cms.utils.permissions import get_subordinate_users
from django.contrib.admin import site
user_model = get_user_model()
admin_class = UserAdmin
for model, admin_instance in site._registry.items():
if model == user_model:
admin_class = admin_instance.__class__
class PageUserAdmin(admin_class, GenericCmsPermissionAdmin):
form = PageUserForm
add_form = PageUserForm
model = PageUser
def get_queryset(self, request):
qs = super(PageUserAdmin, self).get_queryset(request)
try:
user_id_set = get_subordinate_users(request.user).values_list('id', flat=True)
return qs.filter(pk__in=user_id_set)
except NoPermissionsException:
return self.model.objects.get_empty_query_set()
class PageUserGroupAdmin(admin.ModelAdmin, GenericCmsPermissionAdmin):
form = PageUserGroupForm
list_display = ('name', 'created_by')
fieldsets = [
(None, {'fields': ('name',)}),
]
def get_fieldsets(self, request, obj=None):
return self.update_permission_fieldsets(request, obj)
if get_cms_setting('PERMISSION'):
admin.site.register(PageUser, PageUserAdmin)
admin.site.register(PageUserGroup, PageUserGroupAdmin)
| {
"content_hash": "b5e2461c64c7466a1b84ede214cf20da",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 90,
"avg_line_length": 35.47826086956522,
"alnum_prop": 0.7236519607843137,
"repo_name": "astagi/django-cms",
"id": "501ca0ae3f68ed7e44984e655fda371285929ea4",
"size": "1656",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "cms/admin/useradmin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "89894"
},
{
"name": "JavaScript",
"bytes": "431099"
},
{
"name": "Makefile",
"bytes": "2973"
},
{
"name": "Python",
"bytes": "3210523"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
} |
"""The test for sensor device automation."""
from datetime import timedelta
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.sensor import DOMAIN, SensorDeviceClass
from homeassistant.components.sensor.device_trigger import ENTITY_TRIGGERS
from homeassistant.const import CONF_PLATFORM, PERCENTAGE, STATE_UNKNOWN
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import (
MockConfigEntry,
async_fire_time_changed,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
from tests.testing_config.custom_components.test.sensor import UNITS_OF_MEASUREMENT
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg, enable_custom_integrations):
"""Test we get the expected triggers from a sensor."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
for device_class in SensorDeviceClass:
entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES[device_class].unique_id,
device_id=device_entry.id,
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": trigger["type"],
"device_id": device_entry.id,
"entity_id": platform.ENTITIES[device_class].entity_id,
}
for device_class in SensorDeviceClass
if device_class in UNITS_OF_MEASUREMENT
for trigger in ENTITY_TRIGGERS[device_class]
if device_class != "none"
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert len(triggers) == 26
assert triggers == expected_triggers
@pytest.mark.parametrize(
"set_state,device_class_reg,device_class_state,unit_reg,unit_state",
[
(False, SensorDeviceClass.BATTERY, None, PERCENTAGE, None),
(True, None, SensorDeviceClass.BATTERY, None, PERCENTAGE),
],
)
async def test_get_trigger_capabilities(
hass,
device_reg,
entity_reg,
set_state,
device_class_reg,
device_class_state,
unit_reg,
unit_state,
):
"""Test we get the expected capabilities from a sensor trigger."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_id = entity_reg.async_get_or_create(
DOMAIN,
"test",
platform.ENTITIES["battery"].unique_id,
device_id=device_entry.id,
original_device_class=device_class_reg,
unit_of_measurement=unit_reg,
).entity_id
if set_state:
hass.states.async_set(
entity_id,
None,
{"device_class": device_class_state, "unit_of_measurement": unit_state},
)
expected_capabilities = {
"extra_fields": [
{
"description": {"suffix": PERCENTAGE},
"name": "above",
"optional": True,
"type": "float",
},
{
"description": {"suffix": PERCENTAGE},
"name": "below",
"optional": True,
"type": "float",
},
{"name": "for", "optional": True, "type": "positive_time_period_dict"},
]
}
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device_entry.id
)
assert len(triggers) == 1
for trigger in triggers:
capabilities = await async_get_device_automation_capabilities(
hass, DeviceAutomationType.TRIGGER, trigger
)
assert capabilities == expected_capabilities
async def test_get_trigger_capabilities_none(
hass, device_reg, entity_reg, enable_custom_integrations
):
"""Test we get the expected capabilities from a sensor trigger."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
triggers = [
{
"platform": "device",
"device_id": "8770c43885354d5fa27604db6817f63f",
"domain": "sensor",
"entity_id": "sensor.beer",
"type": "is_battery_level",
},
{
"platform": "device",
"device_id": "8770c43885354d5fa27604db6817f63f",
"domain": "sensor",
"entity_id": platform.ENTITIES["none"].entity_id,
"type": "is_battery_level",
},
]
expected_capabilities = {}
for trigger in triggers:
capabilities = await async_get_device_automation_capabilities(
hass, DeviceAutomationType.TRIGGER, trigger
)
assert capabilities == expected_capabilities
async def test_if_fires_not_on_above_below(
hass, calls, caplog, enable_custom_integrations
):
"""Test for value triggers firing."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "battery_level",
},
"action": {"service": "test.automation"},
}
]
},
)
assert "must contain at least one of below, above" in caplog.text
async def test_if_fires_on_state_above(hass, calls, enable_custom_integrations):
"""Test for value triggers firing."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "battery_level",
"above": 10,
},
"action": {
"service": "test.automation",
"data_template": {
"some": "bat_low {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 9)
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 11)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "bat_low device - {} - 9 - 11 - None".format(
sensor1.entity_id
)
async def test_if_fires_on_state_below(hass, calls, enable_custom_integrations):
"""Test for value triggers firing."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "battery_level",
"below": 10,
},
"action": {
"service": "test.automation",
"data_template": {
"some": "bat_low {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 11)
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 9)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "bat_low device - {} - 11 - 9 - None".format(
sensor1.entity_id
)
async def test_if_fires_on_state_between(hass, calls, enable_custom_integrations):
"""Test for value triggers firing."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "battery_level",
"above": 10,
"below": 20,
},
"action": {
"service": "test.automation",
"data_template": {
"some": "bat_low {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 9)
await hass.async_block_till_done()
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 11)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "bat_low device - {} - 9 - 11 - None".format(
sensor1.entity_id
)
hass.states.async_set(sensor1.entity_id, 21)
await hass.async_block_till_done()
assert len(calls) == 1
hass.states.async_set(sensor1.entity_id, 19)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "bat_low device - {} - 21 - 19 - None".format(
sensor1.entity_id
)
async def test_if_fires_on_state_change_with_for(
hass, calls, enable_custom_integrations
):
"""Test for triggers firing with delay."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
sensor1 = platform.ENTITIES["battery"]
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": sensor1.entity_id,
"type": "battery_level",
"above": 10,
"for": {"seconds": 5},
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
}
]
},
)
await hass.async_block_till_done()
assert hass.states.get(sensor1.entity_id).state == STATE_UNKNOWN
assert len(calls) == 0
hass.states.async_set(sensor1.entity_id, 10)
hass.states.async_set(sensor1.entity_id, 11)
await hass.async_block_till_done()
assert len(calls) == 0
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=10))
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert (
calls[0].data["some"]
== f"turn_off device - {sensor1.entity_id} - 10 - 11 - 0:00:05"
)
| {
"content_hash": "3da62d4cdaf6ea519de663095e01f929",
"timestamp": "",
"source": "github",
"line_count": 475,
"max_line_length": 87,
"avg_line_length": 33.58526315789474,
"alnum_prop": 0.5106876449570613,
"repo_name": "rohitranjan1991/home-assistant",
"id": "e37b0e9470fb97f10017b6d77db254c14183cc66",
"size": "15953",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/sensor/test_device_trigger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
} |
"""
This module implements the functionality to take any Python expression as a
string and fix all numbers and other things before evaluating it,
thus
1/2
returns
Integer(1)/Integer(2)
We use the Python ast module for that, which is in python2.6 and later. It is
well documented at docs.python.org.
Some tips to understand how this works: use dump() to get a nice representation
of any node. Then write a string of what you want to get, e.g.
"Integer(1)", parse it, dump it and you'll see that you need to do
"Call(Name('Integer', Load()), [node], [], None, None)". You don't need to
bother with lineno and col_offset, just call fix_missing_locations() before
returning the node.
If the ast module is not available (python2.4 and 2.5), we use the old compiler
module.
"""
from sympy import Basic
try:
import ast
ast_enabled = True
except ImportError:
ast_enabled = False
if ast_enabled:
from ast import parse, NodeTransformer, Call, Name, Load, \
fix_missing_locations, Str
class Transform(NodeTransformer):
def __init__(self, local_dict, global_dict):
NodeTransformer.__init__(self)
self.local_dict = local_dict
self.global_dict = global_dict
def visit_Num(self, node):
if isinstance(node.n, int):
return fix_missing_locations(Call(Name('Integer', Load()),
[node], [], None, None))
elif isinstance(node.n, float):
return fix_missing_locations(Call(Name('Real', Load()),
[node], [], None, None))
return node
def visit_Name(self, node):
if node.id in self.local_dict:
return node
elif node.id in self.global_dict:
name_obj = self.global_dict[node.id]
if isinstance(name_obj, (Basic, type)) or callable(name_obj):
return node
elif node.id in ['True', 'False']:
return node
return fix_missing_locations(Call(Name('Symbol', Load()),
[Str(node.id)], [], None, None))
def visit_Lambda(self, node):
if len(node.args.args) == 0:
args = [Str("x")]
else:
args = node.args.args
args = [self.visit(arg) for arg in args]
body = self.visit(node.body)
n = Call(Name('Lambda', Load()), args + [body], [], None, None)
return fix_missing_locations(n)
def parse_expr(s, local_dict):
"""
Converts the string "s" to a SymPy expression, in local_dict.
It converts all numbers to Integers before feeding it to Python and
automatically creates Symbols.
"""
from sympify import SympifyError
if ast_enabled:
global_dict = {}
exec 'from sympy import *' in global_dict
try:
a = parse(s.strip(), mode="eval")
except SyntaxError:
raise SympifyError("Cannot parse.")
a = Transform(local_dict, global_dict).visit(a)
e = compile(a, "<string>", "eval")
return eval(e, local_dict, global_dict)
else:
# in python2.4 and 2.5, the "ast" module is not available, so we need
# to use our old implementation:
from ast_parser_python24 import SymPyParser
try:
return SymPyParser(local_dict=local_dict).parse_expr(s)
except SyntaxError:
raise SympifyError("sorry")
| {
"content_hash": "5895db40ad91ff825d449317c847303b",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 79,
"avg_line_length": 34.13725490196079,
"alnum_prop": 0.5924755887421023,
"repo_name": "KevinGoodsell/sympy",
"id": "aab972f9f6547a03ba7024ded6b390ebb6e1fcc0",
"size": "3482",
"binary": false,
"copies": "4",
"ref": "refs/heads/cache",
"path": "sympy/core/ast_parser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6800097"
},
{
"name": "Scheme",
"bytes": "125"
}
],
"symlink_target": ""
} |
from MidiOutStream import MidiOutStream
class MidiOutPassThrough(MidiOutStream):
"""
This class i mainly used for testing the event dispatcher. The
methods just returns the passed parameters as a tupple.
"""
#####################
## Midi channel events
def note_on(self, channel, note, velocity, time=None):
return channel, note, velocity, time
def note_off(self, channel, note, velocity, time=None):
return channel, note, velocity, time
def aftertouch(self, channel, note, velocity, time=None):
return channel, note, velocity, time
def continuous_controller(self, channel, controller, value, time=None):
return channel, controller, value, time
def patch_change(self, channel, patch, time=None):
return channel, patch, time
def channel_pressure(self, channel, pressure, time=None):
return channel, pressure, time
#####################
## defined continuous controller events
# def cc_
#####################
## Common events
def system_exclusive(self, data, time=None):
return data, time
def song_position_pointer(self, hiPos, loPos, time=None):
return hiPos, loPos, time
def song_select(self, songNumber, time=None):
return songNumber, time
def tuning_request(self, time=None):
return time
#########################
# header does not really belong here. But anyhoo!!!
def header(self, format, nTracks, division):
return format, nTracks, division
def eof(self):
return 'eof'
#####################
## meta events
def start_of_track(self, n_track=0):
return n_track
def end_of_track(self, n_track=0, time=None):
return n_track, time
def sequence_number(self, hiVal, loVal, time=None):
return hiVal, loVal, time
def text(self, text, time=None):
return text, time
def copyright(self, text, time=None):
return text, time
def sequence_name(self, text, time=None):
return text, time
def instrument_name(self, text, time=None):
return text, time
def lyric(self, text, time=None):
return text, time
def marker(self, text, time=None):
return text, time
def cuepoint(self, text, time=None):
return text, time
def midi_port(self, value, time=None):
return value, time
def tempo(self, value, time=None):
return value, time
def smtp_offset(self, hour, minute, second, frame, framePart, time=None):
return hour, minute, second, frame, framePart, time
def time_signature(self, nn, dd, cc, bb, time=None):
return nn, dd, cc, bb, time
def key_signature(self, sf, mi, time=None):
return sf, mi, time
def sequencer_specific(self, data, time=None):
return data, time
#####################
## realtime events
def timing_clock(self, time=None):
return time
def song_start(self, time=None):
return time
def song_stop(self, time=None):
return time
def song_continue(self, time=None):
return time
def active_sensing(self, time=None):
return time
def system_reset(self, time=None):
return time
if __name__ == '__main__':
midiOut = MidiOutStream()
midiOut.note_on(0, 63, 127, 0)
midiOut.note_off(0, 63, 127, 384)
| {
"content_hash": "b60fadbc47ebc0c6eeb035ffdc3d69a9",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 77,
"avg_line_length": 19.13186813186813,
"alnum_prop": 0.5921883974727168,
"repo_name": "pymir3/pymir3",
"id": "39f4f553c6ea93711dcaa8e831c4cdf48397a9bb",
"size": "3482",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mir3/lib/midi/experimental/MidiOutPassThrough.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "535278"
},
{
"name": "Shell",
"bytes": "70718"
}
],
"symlink_target": ""
} |
import os
import sys
dir = os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(os.path.join(dir, 'app'))
# testing
import mock
import unittest
from mock import patch
# program
import app.scrape.scrape as Scraper
class TestScraperFunctions(unittest.TestCase):
'''Unit tests for testing if the scraper is working as expected.'''
def test_scrape_urls_returns_array(self):
d = Scraper.ScrapeURLs(0, verbose=True)
assert type(d) == type([])
def test_scrape_content_returns_dictionary(self):
u = Scraper.ScrapeURLs(0, verbose=True)
d = Scraper.ScrapeContent(u[0]['url'], verbose=True)
assert type(d) == type({})
| {
"content_hash": "bc2df6f79b6babf8ac3bdd67a76e30db",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.7078313253012049,
"repo_name": "luiscape/hdxscraper-opennepal",
"id": "92cd27c7bf1578a1e6e0744369b20261e8d0ab08",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_scraper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "141"
},
{
"name": "Python",
"bytes": "33824"
},
{
"name": "Shell",
"bytes": "563"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf import settings
DEFAULT_REALM = 'DJANGO'
def get_setting(setting_name, default=None):
if hasattr(settings, setting_name):
return getattr(settings, setting_name)
else:
return default
def get_backend(setting_name, default_backend_class_path):
path = get_setting(setting_name, default_backend_class_path)
from django.core import exceptions
from importlib import import_module
path_components = path.rsplit('.', 1)
if not len(path_components) == 2:
raise exceptions.ImproperlyConfigured('%s isn\'t a classname' % path)
try:
mod = import_module(path_components[0])
except ImportError as e:
raise exceptions.ImproperlyConfigured('Error importing module %s: "%s"' %
(path_components[0], e))
try:
cls = getattr(mod, path_components[1])
except AttributeError:
raise exceptions.ImproperlyConfigured('module "%s" does not define a "%s" class' %
(path_components[0], path_components[1]))
return cls()
| {
"content_hash": "e9c264ed23d37889ef742a012953b4be",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 90,
"avg_line_length": 34.94117647058823,
"alnum_prop": 0.6355218855218855,
"repo_name": "dimagi/django-digest",
"id": "1611c7e5050a61d2fd4c79cfb3758ff8cdb755b7",
"size": "1188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_digest/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "65961"
}
],
"symlink_target": ""
} |
MRINAME = 'MRI'
MRI_HOME = 'http://extensions.services.openoffice.org/project/MRI'
MRI_ID = 'mytools.mri'
MRI_DIR = None
def set_mri_dir(ctx):
global MRI_DIR
import mytools_Mri.tools
MRI_DIR = mytools_Mri.tools.get_extension_dirurl(ctx, MRI_ID)
class ConfigNames(object):
config_node = '/mytools.Mri.Configuration/Settings'
sdk_path = 'SDKDirectory'
browser = 'Browser'
pos_size = 'WindowPosSize'
font_name = 'CharFontName'
char_size = 'CharHeight'
sorted = 'Sorted'
abbrev = 'Abbreviated'
detailed = 'Detailed'
show_labels = 'ShowLabels'
origin = 'MRIOrigin'
show_code = 'ShowCode'
code_type = 'CodeType'
use_pseud_props = 'UsePseudProperty'
grid = 'UseGrid'
use_tab = 'UseTab'
macros = 'Macros'
ref_by_doxygen = "DoxygenRef"
IGNORED_INTERFACES = {'com.sun.star.script.browse.XBrowseNode'}
IGNORED_PROPERTIES = {'ActiveLayer', 'AsProperty', 'ClientMap', 'FontSlant',
'LayoutSize', 'Modified', 'PropertyToDefault', 'UIConfigurationManager',
'ParaIsNumberingRestart', 'NumberingLevel', 'NumberingStartValue', 'NumberingStartLevel', 'DataArray', 'FormulaArray', 'Printer', 'Material'}
# value descriptions
EMPTYSTR = '""'
VOIDVAL = '-void-'
NONSTRVAL = '-%s-' # values can not be converted to strings
VALUEERROR = '-Error-'
# additional informations
PSEUDPORP = 'Pseud ' # pseud property
IGNORED = 'Ignored' # listed in IGNORED_PROPERTIES
NOTACCESSED = '-----'
WRITEONLY = 'WriteOnly'
ATTRIBUTE = 'Attr.'
# abbreviated string
ABBROLD = 'com.sun.star.'
ABBRNEW = '.'
| {
"content_hash": "c30d635d2991a4d1a56e1437dc058e36",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 145,
"avg_line_length": 30.21153846153846,
"alnum_prop": 0.6817313812858052,
"repo_name": "hanya/MRI",
"id": "19b17e4112ee97c8b25fae19b3d541a795ba8217",
"size": "2160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pythonpath/mytools_Mri/values.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4191"
},
{
"name": "Makefile",
"bytes": "364"
},
{
"name": "Python",
"bytes": "496584"
},
{
"name": "Shell",
"bytes": "366"
}
],
"symlink_target": ""
} |
"""State recorders for BPN states."""
import collections
import chains
import numpy
import logging
logger = logging.getLogger('bpn.mcmcbpn.recorders')
from bpn import structures
from defaults import (SUPERDEBUG, SUPERDEBUG_MODE, OUTPUT_BUFFER_SIZE,
TRANSITIONS_BUFFER_SIZE)
# This is a bit of a hack to reduce memory usage of the records. Rather
# than create a new string for every transition detailing the type, this
# will allow just creating a reference to a string.
TRANSITION_TYPES = {
'link_prior': 'link_prior',
'link_false_pos': 'link_false_pos',
'link_false_neg': 'link_false_neg',
'term_prior': 'term_prior',
'term_false_pos': 'term_false_pos',
'term_false_neg': 'term_false_neg',
'link_selection': 'link_selection',
'link_unselection': 'link_unselection',
'link_swap': 'link_swap',
'term_selection': 'term_selection',
'term_unselection': 'term_unselection',
}
SELECTION = 'selection'
UNSELECTION = 'unselection'
class OverallStateRecord(object):
__slots__ = (
'transition_type',
'log_transition_ratio',
'log_state_likelihood',
'accepted',
'log_rejection_prob'
)
def __init__(
self,
transition_type,
log_transition_ratio,
log_state_likelihood,
accepted,
log_rejection_prob
):
self.transition_type = TRANSITION_TYPES[transition_type]
self.log_transition_ratio = log_transition_ratio
self.log_state_likelihood = log_state_likelihood
self.accepted = accepted
self.log_rejection_prob = log_rejection_prob
def to_dict(self):
"""Converts the record to a dictionary for output."""
d = {
'transition_type': self.transition_type,
'log_transition_ratio': self.log_transition_ratio,
'log_state_likelihood': self.log_state_likelihood,
'accepted': self.accepted,
'log_rejection_prob': self.log_rejection_prob,
}
return d
class DetailedStateRecord(OverallStateRecord):
__slots__ = (
'link_false_pos',
'link_false_neg',
'link_prior',
'num_selected_links',
'num_unselected_links',
'num_selected_active_interactions',
'num_selected_inactive_interactions',
'num_unselected_active_interactions',
'num_unselected_inactive_interactions'
)
def __init__(
self,
transition_type,
log_transition_ratio,
log_state_likelihood,
accepted,
log_rejection_prob,
link_false_pos,
link_false_neg,
link_prior,
num_selected_links,
num_unselected_links,
num_selected_active_interactions,
num_selected_inactive_interactions,
num_unselected_active_interactions,
num_unselected_inactive_interactions
):
super(DetailedStateRecord, self).__init__(
transition_type,
log_transition_ratio,
log_state_likelihood,
accepted,
log_rejection_prob
)
self.link_false_pos = link_false_pos
self.link_false_neg = link_false_neg
self.link_prior = link_prior
self.num_selected_links = num_selected_links
self.num_unselected_links = num_unselected_links
self.num_selected_active_interactions = (
num_selected_active_interactions)
self.num_selected_inactive_interactions = (
num_selected_inactive_interactions)
self.num_unselected_active_interactions = (
num_unselected_active_interactions)
self.num_unselected_inactive_interactions = (
num_unselected_inactive_interactions)
def to_dict(self):
d = super(DetailedStateRecord, self).to_dict()
additional = {
'link_false_pos': self.link_false_pos,
'link_false_neg': self.link_false_neg,
'link_prior': self.link_prior,
'num_selected_links': self.num_selected_links,
'num_unselected_links': self.num_unselected_links,
'num_selected_active_interactions': (
self.num_selected_active_interactions),
'num_selected_inactive_interactions': (
self.num_selected_inactive_interactions),
'num_unselected_active_interactions': (
self.num_unselected_active_interactions),
'num_unselected_inactive_interactions': (
self.num_unselected_inactive_interactions),
}
d.update(additional)
return d
class TermsStateRecord(DetailedStateRecord):
__slots__ = (
'num_selected_terms',
'num_unselected_terms'
)
def __init__(
self,
transition_type,
log_transition_ratio,
log_state_likelihood,
accepted,
log_rejection_prob,
link_false_pos,
link_false_neg,
link_prior,
num_selected_links,
num_unselected_links,
num_selected_active_interactions,
num_selected_inactive_interactions,
num_unselected_active_interactions,
num_unselected_inactive_interactions,
num_selected_terms,
num_unselected_terms
):
super(TermsStateRecord, self).__init__(
transition_type,
log_transition_ratio,
log_state_likelihood,
accepted,
log_rejection_prob,
link_false_pos,
link_false_neg,
link_prior,
num_selected_links,
num_unselected_links,
num_selected_active_interactions,
num_selected_inactive_interactions,
num_unselected_active_interactions,
num_unselected_inactive_interactions
)
self.num_selected_terms = num_selected_terms
self.num_unselected_terms = num_unselected_terms
def to_dict(self):
d = super(TermsStateRecord, self).to_dict()
additional = {
'num_selected_terms': self.num_selected_terms,
'num_unselected_terms': self.num_unselected_terms,
}
d.update(additional)
return d
class IndependentTermsStateRecord(TermsStateRecord):
__slots__ = ('term_prior',)
def __init__(
self,
transition_type,
log_transition_ratio,
log_state_likelihood,
accepted,
log_rejection_prob,
link_false_pos,
link_false_neg,
link_prior,
num_selected_links,
num_unselected_links,
num_selected_active_interactions,
num_selected_inactive_interactions,
num_unselected_active_interactions,
num_unselected_inactive_interactions,
num_selected_terms,
num_unselected_terms,
term_prior
):
super(IndependentTermsStateRecord, self).__init__(
transition_type,
log_transition_ratio,
log_state_likelihood,
accepted,
log_rejection_prob,
link_false_pos,
link_false_neg,
link_prior,
num_selected_links,
num_unselected_links,
num_selected_active_interactions,
num_selected_inactive_interactions,
num_unselected_active_interactions,
num_unselected_inactive_interactions,
num_selected_terms,
num_unselected_terms,
)
self.term_prior = term_prior
def to_dict(self):
d = super(IndependentTermsStateRecord, self).to_dict()
d['term_prior'] = self.term_prior
return d
class GenesBasedStateRecord(IndependentTermsStateRecord):
__slots__ = (
'term_false_pos',
'term_false_neg',
'num_selected_active_genes',
'num_selected_inactive_genes',
'num_unselected_active_genes',
'num_unselected_inactive_genes'
)
def __init__(
self,
transition_type,
log_transition_ratio,
log_state_likelihood,
accepted,
log_rejection_prob,
link_false_pos,
link_false_neg,
link_prior,
num_selected_links,
num_unselected_links,
num_selected_active_interactions,
num_selected_inactive_interactions,
num_unselected_active_interactions,
num_unselected_inactive_interactions,
num_selected_terms,
num_unselected_terms,
term_prior,
term_false_pos,
term_false_neg,
num_selected_active_genes,
num_selected_inactive_genes,
num_unselected_active_genes,
num_unselected_inactive_genes
):
super(GenesBasedStateRecord, self).__init__(
transition_type,
log_transition_ratio,
log_state_likelihood,
accepted,
log_rejection_prob,
link_false_pos,
link_false_neg,
link_prior,
num_selected_links,
num_unselected_links,
num_selected_active_interactions,
num_selected_inactive_interactions,
num_unselected_active_interactions,
num_unselected_inactive_interactions,
num_selected_terms,
num_unselected_terms,
term_prior,
)
self.term_false_pos = term_false_pos
self.term_false_neg = term_false_neg
self.num_selected_active_genes = num_selected_active_genes
self.num_selected_inactive_genes = num_selected_inactive_genes
self.num_unselected_active_genes = num_unselected_active_genes
self.num_unselected_inactive_genes = (
num_unselected_inactive_genes)
def to_dict(self):
d = super(GenesBasedStateRecord, self).to_dict()
additional = {
'term_false_pos': self.term_false_pos,
'term_false_neg': self.term_false_neg,
'num_selected_active_genes': (
self.num_selected_active_genes),
'num_selected_inactive_genes': (
self.num_selected_inactive_genes),
'num_unselected_active_genes': (
self.num_unselected_active_genes),
'num_unselected_inactive_genes': (
self.num_unselected_inactive_genes),
}
d.update(additional)
return d
class StateRecorder(object):
pass
class PLNStateRecorder(StateRecorder):
"""A class to record the states of a `PLNMarkovChain` instance."""
def __init__(
self,
annotated_interactions,
parameters_csvwriter,
links_csvwriter,
transitions_csvwriter,
transitions_buffer_size=TRANSITIONS_BUFFER_SIZE
):
"""Create a new instance.
:Parameters:
- `annotated_interactions`: an `AnnotatedInteractionsGraph`
instance
- `parameters_csvwriter`: a `csv.DictWriter` instance for
outputting parameter results with these fields:
``'parameter'``, ``'value'``, ``'probability'``
- `links_csvwriter`: a `csv.DictWriter` instance for
outputting links results ``'term1'``, ``'term2'``,
``'probability'``
- `transitions_csvwriter`: a `csv.DictWriter` instance for
outputting transitions data
- `transitions_buffer_size`: number of records to maintain
before outputting transitions information [default: ``10000``]
"""
self.records_made = 0
self.selected_links_tallies = dict.fromkeys(
annotated_interactions.get_all_links(), 0)
# We'll use nested defaultdicts to track the values for each
# parameter that we see. In this way, we will not have to know
# ahead of time what parameters are contained in the parameter
# state and what values they may take.
self.parameter_tallies = collections.defaultdict(
lambda : collections.defaultdict(int))
self._transitions_data = []
self.parameters_csvwriter = parameters_csvwriter
self.links_csvwriter = links_csvwriter
self.transitions_csvwriter = transitions_csvwriter
self._transitions_buffer_size = transitions_buffer_size
def record_links_state(self, links_state):
"""Record the links selected in this state.
:Parameters:
- `links_state`: a `PLNLinksState` instance
"""
for selected_link in links_state.selected_links:
self.selected_links_tallies[selected_link] += 1
def record_parameters_state(self, parameters_state):
"""Record the parameters in this state.
:Parameters:
- `parameters_state`: a `PLNParametersState` instance
"""
for param_name in parameters_state.parameter_names:
param_value = getattr(parameters_state, param_name)
self.parameter_tallies[param_name][param_value] += 1
def record_transition(self, markov_chain):
"""Records the information about the previous transition that
led to this state.
"""
record = OverallStateRecord(*markov_chain.last_transition_info)
self._transitions_data.append(record)
def record_state(self, markov_chain):
"""Record the features of the current state.
:Parameters:
- `markov_chain`: a `PLNMarkovChain` instance
"""
self.record_links_state(markov_chain.current_state.links_state)
self.record_parameters_state(
markov_chain.current_state.parameters_state)
self.record_transition(markov_chain)
self.records_made += 1
# Output the transitions data if we've progressed through enough
# steps.
if not (self.records_made % self._transitions_buffer_size):
logger.info("Writing and flushing transitions data.")
self.write_transition_states()
self._flush_transitions_data()
def write_links_probabilities(self, buffer_size=OUTPUT_BUFFER_SIZE):
"""Output the final probabilities for the links.
:Parameters:
- `buffer_size`: the number of records to write to disk at once
"""
num_total_steps = float(self.records_made)
output_records = []
for i, link_tally in enumerate(
self.selected_links_tallies.iteritems()):
term1, term2 = link_tally[0]
link_selection_count = link_tally[1]
link_probability = link_selection_count / num_total_steps
output_records.append(
{
'term1': term1,
'term2': term2,
'probability': link_probability
}
)
# Periodically flush results to disk
if not ((i + 1) % buffer_size):
self.links_csvwriter.writerows(output_records)
# Flush the scores
output_records = []
# Flush any remaining records
if output_records:
self.links_csvwriter.writerows(output_records)
def write_parameters_probabilities(self):
"""Output the final probabilities for the parameters."""
num_total_steps = float(self.records_made)
output_records = []
param_names = self.parameter_tallies.keys()
param_names.sort()
for param_name in param_names:
distribution = self.parameter_tallies[param_name]
param_values = distribution.keys()
param_values.sort()
for param_value in param_values:
value_probability = (distribution[param_value] /
num_total_steps)
output_records.append(
{
'parameter': param_name,
'value': param_value,
'probability': value_probability
}
)
self.parameters_csvwriter.writerows(output_records)
def _flush_transitions_data(self):
"""Flushes the transitions data that is stored.
NOTE: Should only be done after outputting transition data
(e.g., writing to file)
"""
self._transitions_data = []
def write_transition_states(self, buffer_size=OUTPUT_BUFFER_SIZE):
"""Writes the transition state information for the Markov chain
to CSV files.
:Parameters:
- `buffer_size`: the number of records to write to disk at once
"""
output_records = []
for i, transition_info in enumerate(self._transitions_data):
record = transition_info.to_dict()
output_records.append(record)
# Periodically flush results to disk
if not ((i + 1) % buffer_size):
self.transitions_csvwriter.writerows(output_records)
# Flush the scores
output_records = []
# Flush any remaining records
if output_records:
self.transitions_csvwriter.writerows(output_records)
class ArrayStateRecorder(PLNStateRecorder):
"""Similar to `PLNStateRecorder`, however, adapted to record the
state of `ArrayLinksState` instances.
"""
def __init__(
self,
annotated_interactions,
parameters_csvwriter,
links_csvwriter,
transitions_csvwriter,
transitions_buffer_size=TRANSITIONS_BUFFER_SIZE
):
"""Create a new instance.
:Parameters:
- `annotated_interactions`: an `AnnotatedInteractionsGraph`
instance
- `parameters_csvwriter`: a `csv.DictWriter` instance for
outputting parameter results with these fields:
``'parameter'``, ``'value'``, ``'probability'``
- `links_csvwriter`: a `csv.DictWriter` instance for
outputting links results ``'term1'``, ``'term2'``,
``'probability'``
- `transitions_csvwriter`: a `csv.DictWriter` instance for
outputting transitions data
- `transitions_buffer_size`: number of records to maintain
before outputting transitions information [default: ``10000``]
"""
self.records_made = 0
self.links = annotated_interactions.get_all_links()
self.selected_links_tallies = numpy.zeros(len(self.links),
numpy.int)
# We'll use nested defaultdicts to track the values for each
# parameter that we see. In this way, we will not have to know
# ahead of time what parameters are contained in the parameter
# state and what values they may take.
self.parameter_tallies = collections.defaultdict(
lambda : collections.defaultdict(int))
self._transitions_data = []
self.parameters_csvwriter = parameters_csvwriter
self.links_csvwriter = links_csvwriter
self.transitions_csvwriter = transitions_csvwriter
self._transitions_buffer_size = transitions_buffer_size
def record_links_state(self, links_state):
"""Record the links selected in this state.
:Parameters:
- `links_state`: an `ArrayLinksState` instance
"""
self.selected_links_tallies += links_state.link_selections
def write_links_probabilities(self, buffer_size=OUTPUT_BUFFER_SIZE):
"""Output the final probabilities for the links.
:Parameters:
- `buffer_size`: the number of records to write to disk at once
"""
num_total_steps = float(self.records_made)
link_probabilities = (self.selected_links_tallies /
num_total_steps)
output_records = []
for i, link in enumerate(self.links):
term1, term2 = link
link_probability = link_probabilities[i]
output_records.append(
{
'term1': term1,
'term2': term2,
'probability': link_probability
}
)
# Periodically flush results to disk
if not ((i + 1) % buffer_size):
self.links_csvwriter.writerows(output_records)
# Flush the scores
output_records = []
# Flush any remaining records
if output_records:
self.links_csvwriter.writerows(output_records)
class DetailedArrayStateRecorder(ArrayStateRecorder):
"""Similar to `ArrayStateRecorder`, only it records more information
about each step.
"""
def record_transition(self, markov_chain):
"""Record all the numbers of the current state.
:Parameters:
- `markov_chain`: a `PLNMarkovChain` instance
"""
transition_info = markov_chain.last_transition_info
overall_state = markov_chain.current_state
parameters_state = overall_state.parameters_state
links_state = overall_state.links_state
record = DetailedStateRecord(
transition_info[0],
transition_info[1],
transition_info[2],
transition_info[3],
transition_info[4],
parameters_state.link_false_pos,
parameters_state.link_false_neg,
parameters_state.link_prior,
links_state.calc_num_selected_links(),
links_state.calc_num_unselected_links(),
links_state.calc_num_selected_active_interactions(),
links_state.calc_num_selected_inactive_interactions(),
links_state.calc_num_unselected_active_interactions(),
links_state.calc_num_unselected_inactive_interactions(),
)
self._transitions_data.append(record)
class TermsBasedStateRecorder(ArrayStateRecorder):
"""Similar to `PLNStateRecorder`, however, adapted to record the
state of `TermsBasedOverallState` instances.
"""
def __init__(
self,
annotated_interactions,
parameters_csvwriter,
links_csvwriter,
terms_csvwriter,
transitions_csvwriter,
transitions_buffer_size=TRANSITIONS_BUFFER_SIZE
):
"""Create a new instance.
:Parameters:
- `annotated_interactions`: an `AnnotatedInteractionsGraph`
instance
- `parameters_csvwriter`: a `csv.DictWriter` instance for
outputting parameter results with these fields:
``'parameter'``, ``'value'``, ``'probability'``
- `links_csvwriter`: a `csv.DictWriter` instance for
outputting links results ``'term1'``, ``'term2'``,
``'probability'``
- `terms_csvwriter`: a `csv.DictWriter` instance for
outputting terms results with these fields: ``'term'``,
``'probability'``
- `transitions_csvwriter`: a `csv.DictWriter` instance for
outputting transitions data
- `transitions_buffer_size`: number of records to maintain
before outputting transitions information [default: ``10000``]
"""
self._annotated_interactions = annotated_interactions
self.records_made = 0
num_terms = self._annotated_interactions.calc_num_terms()
self.selected_terms_tallies = numpy.zeros(num_terms, int)
self.selected_links_tallies = structures.symzeros(num_terms,
int)
# We'll use nested defaultdicts to track the values for each
# parameter that we see. In this way, we will not have to know
# ahead of time what parameters are contained in the parameter
# state and what values they may take.
self.parameter_tallies = collections.defaultdict(
lambda : collections.defaultdict(int))
self._transitions_data = []
self.parameters_csvwriter = parameters_csvwriter
self.links_csvwriter = links_csvwriter
self.terms_csvwriter = terms_csvwriter
self.transitions_csvwriter = transitions_csvwriter
self._transitions_buffer_size = transitions_buffer_size
def record_links_state(self, links_state):
"""Record the links selected in this state.
:Parameters:
- `links_state`: a `PLNLinksState` instance
"""
super(TermsBasedStateRecorder, self).record_links_state(
links_state)
self.selected_terms_tallies += links_state.term_selections
def write_terms_probabilities(self, buffer_size=OUTPUT_BUFFER_SIZE):
"""Output the final probabilities for the links.
:Parameters:
- `buffer_size`: the number of records to write to disk at once
"""
num_total_steps = float(self.records_made)
term_probabilities = (self.selected_terms_tallies /
num_total_steps)
output_records = []
# This the indices of any terms which were marked as selected
# one or more times throughout all recorded steps of the Markov
# chain.
selected_terms_indices = self.selected_terms_tallies.nonzero()[0]
for i, index in enumerate(selected_terms_indices):
# Get the actual terms from the indices.
term = self._annotated_interactions.get_term_from_index(
index)
term_probability = term_probabilities[index]
output_records.append(
{
'term': term,
'probability': term_probability
}
)
# Periodically flush results to disk
if not ((i + 1) % buffer_size):
self.terms_csvwriter.writerows(output_records)
# Flush the scores
output_records = []
# Flush any remaining records
if output_records:
self.terms_csvwriter.writerows(output_records)
def write_links_probabilities(self, buffer_size=OUTPUT_BUFFER_SIZE):
"""Output the final probabilities for the links.
:Parameters:
- `buffer_size`: the number of records to write to disk at once
"""
num_total_steps = float(self.records_made)
link_probabilities = (self.selected_links_tallies /
num_total_steps)
output_records = []
# This gives the pairwise ``(term1_index, term2_index)`` indices
# of any links which were marked as selected 1 or more times
# throughout all recorded steps of the Markov chain.
selected_links_indices = zip(
*numpy.triu(self.selected_links_tallies).nonzero())
for i, index in enumerate(selected_links_indices):
# Get the actual terms from the indices.
term1, term2 = self._annotated_interactions.get_termed_link(
index)
link_probability = link_probabilities[index]
output_records.append(
{
'term1': term1,
'term2': term2,
'probability': link_probability
}
)
# Periodically flush results to disk
if not ((i + 1) % buffer_size):
self.links_csvwriter.writerows(output_records)
# Flush the scores
output_records = []
# Flush any remaining records
if output_records:
self.links_csvwriter.writerows(output_records)
class DetailedTermsBasedStateRecorder(TermsBasedStateRecorder):
"""Similar to `TermsBasedStateRecorder`, but records more
information about each state.
"""
def record_transition(self, markov_chain):
"""Record all the numbers of the current state.
:Parameters:
- `markov_chain`: a `PLNMarkovChain` instance
"""
transition_info = markov_chain.last_transition_info
overall_state = markov_chain.current_state
parameters_state = overall_state.parameters_state
links_state = overall_state.links_state
record = TermsStateRecord(
transition_info[0],
transition_info[1],
transition_info[2],
transition_info[3],
transition_info[4],
parameters_state.link_false_pos,
parameters_state.link_false_neg,
parameters_state.link_prior,
links_state.calc_num_selected_links(),
links_state.calc_num_unselected_links(),
links_state.calc_num_selected_active_interactions(),
links_state.calc_num_selected_inactive_interactions(),
links_state.calc_num_unselected_active_interactions(),
links_state.calc_num_unselected_inactive_interactions(),
links_state.calc_num_selected_terms(),
links_state.calc_num_unselected_terms(),
)
self._transitions_data.append(record)
class DetailedIndependentTermsBasedStateRecorder(
DetailedTermsBasedStateRecorder):
"""Similar to `DetailedTermsBasedStateRecorder`, however, adapted to
record the state of `IndependentTermsBasedOverallState` instances.
"""
def record_transition(self, markov_chain):
"""Record all the numbers of the current state.
:Parameters:
- `overall_state`: an `ArrayOverallState` instance
"""
transition_info = markov_chain.last_transition_info
overall_state = markov_chain.current_state
parameters_state = overall_state.parameters_state
links_state = overall_state.links_state
record = IndependentTermsStateRecord(
transition_info[0],
transition_info[1],
transition_info[2],
transition_info[3],
transition_info[4],
parameters_state.link_false_pos,
parameters_state.link_false_neg,
parameters_state.link_prior,
links_state.calc_num_selected_links(),
links_state.calc_num_unselected_links(),
links_state.calc_num_selected_active_interactions(),
links_state.calc_num_selected_inactive_interactions(),
links_state.calc_num_unselected_active_interactions(),
links_state.calc_num_unselected_inactive_interactions(),
links_state.calc_num_selected_terms(),
links_state.calc_num_unselected_terms(),
parameters_state.term_prior,
)
self._transitions_data.append(record)
class DetailedGenesBasedStateRecorder(
DetailedIndependentTermsBasedStateRecorder):
"""Similar to `DetailedIndependentTermsBasedStateRecorder`, however,
adapted to record the state of `GenesBasedOverallState` instances.
"""
def record_transition(self, markov_chain):
"""Record all the numbers of the current state.
:Parameters:
- `overall_state`: an `ArrayOverallState` instance
"""
transition_info = markov_chain.last_transition_info
overall_state = markov_chain.current_state
parameters_state = overall_state.parameters_state
links_state = overall_state.links_state
record = GenesBasedStateRecord(
transition_info[0],
transition_info[1],
transition_info[2],
transition_info[3],
transition_info[4],
parameters_state.link_false_pos,
parameters_state.link_false_neg,
parameters_state.link_prior,
links_state.calc_num_selected_links(),
links_state.calc_num_unselected_links(),
links_state.calc_num_selected_active_interactions(),
links_state.calc_num_selected_inactive_interactions(),
links_state.calc_num_unselected_active_interactions(),
links_state.calc_num_unselected_inactive_interactions(),
links_state.calc_num_selected_terms(),
links_state.calc_num_unselected_terms(),
parameters_state.term_prior,
parameters_state.term_false_pos,
parameters_state.term_false_neg,
links_state.calc_num_selected_active_genes(),
links_state.calc_num_selected_inactive_genes(),
links_state.calc_num_unselected_active_genes(),
links_state.calc_num_unselected_inactive_genes(),
)
self._transitions_data.append(record)
class FrequencyDetailedArrayStateRecorder(DetailedArrayStateRecorder):
"""Similar to 'DetailedArrayStateRecorder', only that is records
information regarding the frequency of each and every state visited.
More specifically, if the newly suggeseted state is the same as
the old state, then we still count the new state as having visited
a new state.
"""
def __init__(
self,
annotated_interactions,
parameters_csvwriter,
links_csvwriter,
transitions_csvwriter,
transitions_buffer_size=TRANSITIONS_BUFFER_SIZE
):
"""Create a new instance.
:Parameters:
- `annotated_interactions`: an `AnnotatedInteractionsArray`
instance
- `parameter_distributions`: a dictionary with the names of the
parameters and their possible distribution values
"""
super(FrequencyDetailedArrayStateRecorder, self).__init__(
annotated_interactions, parameters_csvwriter,
links_csvwriter, transitions_csvwriter,
transitions_buffer_size
)
self.annotated_interactions = annotated_interactions
# state_frequencies stores the number of steps in which each
# state has been observed
self.state_frequencies = {}
# state_arrival_frequencies stores for each state the number of
# steps in which it was a state in the current step but not in
# the previous step (the number of steps the chain "arrived" at
# that state
self.state_arrival_frequencies = collections.defaultdict(int)
def record_state_frequency(self, markov_chain):
"""Record the links selected in this state through the
super and also record the number of times this state
has been selected.
:Parameters:
- `links_state`: a `PLNLinksState` instance
"""
state_key = markov_chain.current_state.serialize_state()
state_log_likelihood = markov_chain.last_transition_info[2]
# If we didn't see this state in the previous step, increment
# the arrival count.
# NOTE: the fourth item in the transition information tells
# whether or not the last transition was accepted.
if markov_chain.last_transition_info[3]:
self.state_arrival_frequencies[state_key] += 1
# Record every state that is visited regardless of previous state
if state_key in self.state_frequencies:
self.state_frequencies[state_key][1] += 1
else:
self.state_frequencies[state_key] = [state_log_likelihood,
1]
def record_state(self, markov_chain):
"""Record the features of the current state.
:Parameters:
- `markov_chain`: a `PLNMarkovChain` instance
"""
super(FrequencyDetailedArrayStateRecorder, self).record_state(
markov_chain)
self.record_state_frequency(markov_chain)
def write_state_frequencies(
self,
file_handler,
activity_threshold,
transition_ratio,
link_false_pos,
link_false_neg,
link_prior,
parameters_state_class,
links_state_class
):
"""Outputs the frequencies and likelihoods of visited states.
Outputs a tab delimited file is output containing the likelihood
of each state visited, the number of times each state was
visited, and the probability visiting a state. Probability of
visiting a state is calculated by dividing the number of times
of a state is visited by the total number of states visited or
steps.
"""
total_arrivals = float(
sum(self.state_arrival_frequencies.values()))
total_states = float(self.records_made)
# The following code traverses the entire hash map of
# states(state_frequencies) that have been visited. To
# calculate the likelihood from each state that is visited,
# a markov_chain is created. However, the markov_chain accepts
# an array of indices as seed links and the hash map has the
# indices stored as an array of booleans. Therefore, we convert
# the array of booleans into an array of indices that correspond to
# the links that are selected for the given state.
file_handler.write("log_likelihood\tfrequency\t"
"frequency_proportion\tarrival_frequency\t"
"arrival_frequency_proportion\n")
for state_key in self.state_frequencies.iterkeys():
log_likelihood, frequency = self.state_frequencies[state_key]
arrival_frequency = self.state_arrival_frequencies[state_key]
record = {
'log_likelihood': log_likelihood,
'frequency': frequency,
'frequency_proportion': (frequency /
total_states),
'arrival_frequency': arrival_frequency,
'arrival_frequency_proportion': (arrival_frequency /
total_arrivals)
}
outstr = ("{log_likelihood}\t{frequency}\t"
"{frequency_proportion}\t{arrival_frequency}\t"
"{arrival_frequency_proportion}\n").format(
**record)
file_handler.write(outstr)
#class TermsBasedFrequencyStateRecorder(TermsBasedStateRecorder,
#FrequencyDetailedArrayStateRecorder):
#"""Similar to 'FrequencyDetailedArrayStateRecorder', only that it
#records information regarding the frequency the markov chain visits
#each state.
#"""
#def __init__(
#self,
#annotated_interactions,
#parameters_csvwriter,
#links_csvwriter,
#terms_csvwriter,
#transitions_csvwriter,
#transitions_buffer_size=TRANSITIONS_BUFFER_SIZE
#):
#"""Create a new instance.
#:Parameters:
#- `annotated_interactions`: an `AnnotatedInteractionsGraph`
#instance
#- `parameters_csvwriter`: a `csv.DictWriter` instance for
#outputting parameter results with these fields:
#``'parameter'``, ``'value'``, ``'probability'``
#- `links_csvwriter`: a `csv.DictWriter` instance for
#outputting links results ``'term1'``, ``'term2'``,
#``'probability'``
#- `terms_csvwriter`: a `csv.DictWriter` instance for
#outputting terms results with these fields: ``'term'``,
#``'probability'``
#- `transitions_csvwriter`: a `csv.DictWriter` instance for
#outputting transitions data
#- `transitions_buffer_size`: number of records to maintain
#"""
#TermsBasedStateRecorder.__init__(
#self,
#annotated_interactions,
#parameters_csvwriter,
#links_csvwriter,
#terms_csvwriter,
#transitions_csvwriter,
#transitions_buffer_size
#)
#self.state_frequencies = collections.defaultdict(int)
#self.state_arrival_frequencies = collections.defaultdict(int)
#self.previous_link_selection = None
#def record_links_state(self, links_state):
#"""Record the links selected in this state through the
#super and also record the number of times this state
#has been selected.
#:Parameters:
#- `links_state`: a `PLNLinksState` instance
#"""
#super(FrequencyDetailedArrayStateRecorder, self).record_links_state(
#links_state)
## Only record those states that are not the same as the previous state
#if self.previous_link_selection == None:
#self.previous_link_selection = links_state.link_selections
#elif (cmp(self.previous_link_selection.all(), links_state.link_selections.all())) == 0:
#self.state_arrival_frequencies[tuple(links_state.link_selections)] += 1
#self.previous_link_selection = links_state.link_selections
## Record every state that is visited regardless of previous state
#self.state_frequencies[tuple(links_state.link_selections)] += 1
#def write_state_frequencies(self, file_handler, activity_threshold,
#transition_ratio, link_false_pos, link_false_neg,
#link_prior, term_prior, parameters_state_class, links_state_class):
#"""Calculates the likelihood from the visited states. A tab
#delimited file is output containing the likelihood of each
#state visited, the number of times each state was visited,
#and the probability visiting a state. Probability of visiting a
#state is calculated by dividing the number of times of a state
#is visited by the total number of states visited or steps.
#"""
## The following code traverses the entire hash map of
## states(state_frequencies) that have been visited. To
## calculate the likelihood from each state that is visited,
## a markov_chain is created. However, the markov_chain accepts
## an array of indices as seed links and the hash map has the
## indices stored as an array of booleans. Therefore, we convert
## the array of booleans into an array of indices that correspond to
## the links that are selected for the given state.
#markov_chain = None
#for tupled_state_key, visitation_count in self.state_frequencies.items():
#seed_links = []
#for index, link in enumerate(tupled_state_key):
#if link == True:
#seed_links.append(index)
#markov_chain = chains.TermsBasedMarkovChain(
#self,
#1,
#1,
#self.annotated_interactions,
#activity_threshold,
#transition_ratio,
#seed_links_indices=seed_links,
#link_false_pos=link_false_pos,
#link_false_neg=link_false_neg,
#link_prior=link_prior,
#term_prior=term_prior,
#parameters_state_class=parameters_state_class,
#links_state_class=links_state_class
#)
#file_handler.write(str(markov_chain.current_state.calc_log_likelihood())
#+ '\t' + str(visitation_count) + '\t' +
#str(self.state_arrival_frequencies[tupled_state_key])
#+ '\t' + str(visitation_count / float(self.records_made))
#+ '\n')
| {
"content_hash": "9fc7b929720b6b03bb679df42ce9fc17",
"timestamp": "",
"source": "github",
"line_count": 1169,
"max_line_length": 96,
"avg_line_length": 37.885372112917025,
"alnum_prop": 0.587518063583815,
"repo_name": "gotgenes/BiologicalProcessNetworks",
"id": "9f14480a7dabc5d08ef3cd09be943bf145dff209",
"size": "44471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bpn/mcmc/recorders.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "480176"
}
],
"symlink_target": ""
} |
"""A cache for immutable values (functions, constants, and classes).
These are immutable for a given interpreter, but may possibly change
if we run multiple Interpreters in sequence. This logic allows the
JIT-compiled machine code to quickly check if we're in the common
case of seeing the same value as previously.
"""
from rpython.rlib import jit
class ImmutCell(object):
_immutable_fields_ = ['constant_value', 'is_builtin']
_class_key = (None, None, None)
def __init__(self, constant_value, is_builtin=False):
assert constant_value is not None
self.constant_value = constant_value
self.currently_declared = constant_value
self.constant_value_is_currently_declared = True
self.is_builtin = is_builtin
def get_current_value(self):
if self.is_builtin or self.constant_value_is_currently_declared:
return self.constant_value # constant-folded
else:
return self.currently_declared
class GlobalImmutCacheVersion(object): pass
class GlobalImmutCache(object):
_immutable_fields_ = ['version?']
def __init__(self, space, initdict={}, force_lowcase=True):
self.space = space
self.all_cells = {}
self.force_lowcase = force_lowcase
for key, value in initdict.items():
self.set_builtin(key, value)
self.version = GlobalImmutCacheVersion()
def set_builtin(self, name, value):
self.set_cell(name, ImmutCell(value, is_builtin=True))
def reset(self):
# un-declare every non-builtin value
# Note: we don't need to reset the cache version here, as we're not
# deleting cell objects, merely emptying their contents.
for cell in self.all_cells.itervalues():
if not cell.is_builtin:
cell.currently_declared = None
cell.constant_value_is_currently_declared = False
@jit.elidable_promote()
def get_cell(self, name, version):
if self.force_lowcase:
name = name.lower()
try:
return self.all_cells[name]
except KeyError:
return None
def set_cell(self, name, newcell):
if self.force_lowcase:
name = name.lower()
assert name not in self.all_cells
self.all_cells[name] = newcell
self.version = GlobalImmutCacheVersion()
def has_definition(self, name):
cell = self.get_cell(name, self.version)
if cell is None:
return False
return cell.currently_declared is not None
def locate(self, name):
cell = self.get_cell(name, self.version)
if cell is None:
return None
return cell.get_current_value()
def declare_new(self, name, value):
assert value is not None
cell = self.get_cell(name, self.version)
if cell is None:
cell = ImmutCell(value)
self.set_cell(name, cell)
else:
assert cell.currently_declared is None
assert not cell.constant_value_is_currently_declared
cell.currently_declared = value
cell.constant_value_is_currently_declared = (
value is cell.constant_value)
return cell
def create_class(self, interp, name, decl, key):
"Special case for classes"
cell = self.get_cell(name, self.version)
if cell is not None:
if cell._class_key == key:
cell.currently_declared = cell.constant_value
cell.constant_value_is_currently_declared = True
decl.redefine_old_class(interp, cell.constant_value)
return
kls = decl.define_new_class(interp)
cell = self.declare_new(name, kls)
decl._immut_cell = cell
if cell.constant_value_is_currently_declared:
cell._class_key = key
| {
"content_hash": "738d47360ddc5e2fb0eab148d3b3580c",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 75,
"avg_line_length": 34.990990990990994,
"alnum_prop": 0.6207518022657055,
"repo_name": "xhava/hippyvm",
"id": "78eb466be036c8c6f146a234c8f344fc16d54469",
"size": "3884",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hippy/immut_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1559"
},
{
"name": "C",
"bytes": "2544055"
},
{
"name": "C++",
"bytes": "255972"
},
{
"name": "HTML",
"bytes": "415"
},
{
"name": "JavaScript",
"bytes": "453641"
},
{
"name": "Makefile",
"bytes": "4793"
},
{
"name": "PHP",
"bytes": "15041037"
},
{
"name": "Python",
"bytes": "2503719"
},
{
"name": "Shell",
"bytes": "15527"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import sys
from django import forms
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import get_resolver
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, render
from django.template import Context, RequestContext, TemplateDoesNotExist
from django.views.debug import technical_500_response, SafeExceptionReporterFilter
from django.views.decorators.debug import (sensitive_post_parameters,
sensitive_variables)
from django.utils.log import getLogger
from . import BrokenException, except_args
from .models import Article
def index_page(request):
"""Dummy index page"""
return HttpResponse('<html><body>Dummy page</body></html>')
def custom_create(request):
"""
Calls create_object generic view with a custom form class.
"""
class SlugChangingArticleForm(forms.ModelForm):
"""Custom form class to overwrite the slug."""
class Meta:
model = Article
def save(self, *args, **kwargs):
self.instance.slug = 'some-other-slug'
return super(SlugChangingArticleForm, self).save(*args, **kwargs)
from django.views.generic.create_update import create_object
return create_object(request,
post_save_redirect='/create_update/view/article/%(slug)s/',
form_class=SlugChangingArticleForm)
def raises(request):
# Make sure that a callable that raises an exception in the stack frame's
# local vars won't hijack the technical 500 response. See:
# http://code.djangoproject.com/ticket/15025
def callable():
raise Exception
try:
raise Exception
except Exception:
return technical_500_response(request, *sys.exc_info())
def raises404(request):
resolver = get_resolver(None)
resolver.resolve('')
def raises403(request):
raise PermissionDenied
def redirect(request):
"""
Forces an HTTP redirect.
"""
return HttpResponseRedirect("target/")
def view_exception(request, n):
raise BrokenException(except_args[int(n)])
def template_exception(request, n):
return render_to_response('debug/template_exception.html',
{'arg': except_args[int(n)]})
# Some views to exercise the shortcuts
def render_to_response_view(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_to_response_view_with_request_context(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=RequestContext(request))
def render_to_response_view_with_mimetype(request):
return render_to_response('debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, mimetype='application/x-rendertest')
def render_view(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
})
def render_view_with_base_context(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, context_instance=Context())
def render_view_with_content_type(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, content_type='application/x-rendertest')
def render_view_with_status(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, status=403)
def render_view_with_current_app(request):
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app")
def render_view_with_current_app_conflict(request):
# This should fail because we don't passing both a current_app and
# context_instance:
return render(request, 'debug/render_test.html', {
'foo': 'FOO',
'bar': 'BAR',
}, current_app="foobar_app", context_instance=RequestContext(request))
def raises_template_does_not_exist(request):
# We need to inspect the HTML generated by the fancy 500 debug view but
# the test client ignores it, so we send it explicitly.
try:
return render_to_response('i_dont_exist.html')
except TemplateDoesNotExist:
return technical_500_response(request, *sys.exc_info())
def send_log(request, exc_info):
logger = getLogger('django.request')
# The default logging config has a logging filter to ensure admin emails are
# only sent with DEBUG=False, but since someone might choose to remove that
# filter, we still want to be able to test the behavior of error emails
# with DEBUG=True. So we need to remove the filter temporarily.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
orig_filters = admin_email_handler.filters
admin_email_handler.filters = []
logger.error('Internal Server Error: %s' % request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
admin_email_handler.filters = orig_filters
def non_sensitive_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables('sauce')
@sensitive_post_parameters('bacon-key', 'sausage-key')
def sensitive_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
@sensitive_variables()
@sensitive_post_parameters()
def paranoid_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
class UnsafeExceptionReporterFilter(SafeExceptionReporterFilter):
"""
Ignores all the filtering done by its parent class.
"""
def get_post_parameters(self, request):
return request.POST
def get_traceback_frame_variables(self, request, tb_frame):
return tb_frame.f_locals.items()
@sensitive_variables()
@sensitive_post_parameters()
def custom_exception_reporter_filter_view(request):
# Do not just use plain strings for the variables' values in the code
# so that the tests don't return false positives when the function's source
# is displayed in the exception report.
cooked_eggs = ''.join(['s', 'c', 'r', 'a', 'm', 'b', 'l', 'e', 'd'])
sauce = ''.join(['w', 'o', 'r', 'c', 'e', 's', 't', 'e', 'r', 's', 'h', 'i', 'r', 'e'])
request.exception_reporter_filter = UnsafeExceptionReporterFilter()
try:
raise Exception
except Exception:
exc_info = sys.exc_info()
send_log(request, exc_info)
return technical_500_response(request, *exc_info)
| {
"content_hash": "082eb6c1cd0db84ec70c5bebda5507c3",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 91,
"avg_line_length": 35.37826086956522,
"alnum_prop": 0.6410224898611282,
"repo_name": "mixman/djangodev",
"id": "f2b4e79322cd66cad8004a8a98176956f9fa160d",
"size": "8137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regressiontests/views/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "88362"
},
{
"name": "Python",
"bytes": "7834206"
},
{
"name": "Shell",
"bytes": "9076"
}
],
"symlink_target": ""
} |
"""
Copyright (C) 2017 SunSpec Alliance
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import os
import time
import sunspec.core.modbus.client as modbus
import sunspec.core.device as device
import sunspec.core.util as util
import sunspec.core.suns as suns
from sunspec.core.util import SunSpecError
RTU = 'RTU'
TCP = 'TCP'
MAPPED = 'Mapped'
PARITY_NONE = modbus.PARITY_NONE
PARITY_EVEN = modbus.PARITY_EVEN
class SunSpecClientError(SunSpecError):
pass
class ClientDevice(device.Device):
"""ClientDevice
A derived class based on :const:`sunspec.core.device.Device`. It adds Modbus
device access capability to the device base class.
Parameters:
device_type :
Device type. Possible values: :const:`RTU`, :const:`TCP`,
:const:`MAPPED`.
slave_id :
Modbus slave id.
name :
For :const:`RTU` devices, the name of the serial port such as 'com4'
or '/dev/tty2'. For :const:`MAPPED` devices, the name of the modbus
map file.
pathlist :
Pathlist object containing alternate paths to support files.
baudrate :
For :const:`RTU` devices, baud rate such as 9600 or 19200. Defaulted
by modbus module to 9600.
parity :
For :const:`RTU` devices, parity. Possible values:
:const:`PARITY_NONE`, :const:`PARITY_EVEN`
Defaulted by modbus module to :const:`PARITY_NONE`.
ipaddr :
For :const:`TCP` devices, device IP address.
ipport :
For :const:`TCP` devices, device IP port. Defaulted by modbus module
to 502.
timeout :
Modbus request timeout in seconds. Fractional seconds are permitted
such as .5.
trace :
Enable low level trace.
Raises:
SunSpecClientError: Raised for any sunspec module error.
Attributes:
type
Device type. Possible values: :const:`RTU`, :const:`TCP`,
:const:`MAPPED`.
name
For :const:`RTU` devices, the name of the serial port such as 'com4'
or '/dev/tty2'. For :const:`MAPPED` devices, the name of the modbus
map file.
pathlist
Pathlist object containing alternate paths to support files.
slave_id
Modbus slave id.
modbus_device
Modbus device object. Object type is based on the device type.
retry_count
Request retry count. Currently not used.
base_addr_list
List of Modbus base addresses to try when scanning a device for the
first time.
"""
def __init__(self, device_type, slave_id=None, name=None, pathlist = None, baudrate=None, parity=None, ipaddr=None, ipport=None,
timeout=None, trace=False):
device.Device.__init__(self, addr=None)
self.type = device_type
self.name = name
self.pathlist = pathlist
self.slave_id = slave_id
self.modbus_device = None
self.retry_count = 2
self.base_addr_list = [40000, 0, 50000]
try:
if device_type == RTU:
self.modbus_device = modbus.ModbusClientDeviceRTU(slave_id, name, baudrate, parity, timeout, self, trace)
elif device_type == TCP:
self.modbus_device = modbus.ModbusClientDeviceTCP(slave_id, ipaddr, ipport, timeout, self, trace)
elif device_type == MAPPED:
if name is not None:
self.modbus_device = modbus.ModbusClientDeviceMapped(slave_id, name, pathlist, self)
else:
if self.modbus_device is not None:
self.modbus_device.close()
raise SunSpecClientError('Map file required for mapped device')
except modbus.ModbusClientError, e:
if self.modbus_device is not None:
self.modbus_device.close()
raise SunSpecClientError('Modbus error: %s' % str(e))
def close(self):
if self.modbus_device is not None:
self.modbus_device.close()
def read(self, addr, count):
"""Read Modbus device registers.
Parameters:
addr :
Starting Modbus address.
count :
Register count.
Returns:
Byte string containing register contents.
"""
try:
if self.modbus_device is not None:
return self.modbus_device.read(addr, count)
else:
raise SunSpecClientError('No modbus device set for SunSpec device')
except modbus.ModbusClientError, e:
raise SunSpecClientError('Modbus read error: %s' % str(e))
def write(self, addr, data):
"""Write Modbus device registers.
Parameters:
addr :
Starting Modbus address.
count :
Byte string containing register contents.
"""
try:
if self.modbus_device is not None:
return self.modbus_device.write(addr, data)
else:
raise SunSpecClientError('No modbus device set for SunSpec device')
except modbus.ModbusClientError, e:
raise SunSpecClientError('Modbus write error: %s' % str(e))
def read_points(self):
"""Read the points for all models in the device from the physical
device.
"""
for model in self.models_list:
model.read_points()
def scan(self, progress=None, delay=None):
"""Scan all the models of the physical device and create the
corresponding model objects within the device object based on the
SunSpec model definitions.
"""
error = ''
connect = False
if self.modbus_device and type(self.modbus_device) == modbus.ModbusClientDeviceTCP:
self.modbus_device.connect()
connect = True
if delay is not None:
time.sleep(delay)
if self.base_addr is None:
for addr in self.base_addr_list:
# print 'trying base address %s' % (addr)
try:
data = self.read(addr, 3)
if data[:4] == 'SunS':
self.base_addr = addr
# print 'device base address = %d' % self.base_addr
break
else:
error = 'Device responded - not SunSpec register map'
except SunSpecClientError, e:
if not error:
error = str(e)
if delay is not None:
time.sleep(delay)
if self.base_addr is not None:
# print 'base address = %s' % (self.base_addr)
model_id = util.data_to_u16(data[4:6])
addr = self.base_addr + 2
while model_id != suns.SUNS_END_MODEL_ID:
# read model and model len separately due to some devices not supplying
# count for the end model id
data = self.read(addr + 1, 1)
if data and len(data) == 2:
if progress is not None:
cont = progress('Scanning model %s' % (model_id))
if not cont:
raise SunSpecClientError('Device scan terminated')
model_len = util.data_to_u16(data)
# print 'model_id = %s model_len = %s' % (model_id, model_len)
# move address past model id and length
model = ClientModel(self, model_id, addr + 2, model_len)
# print 'loading model %s at %d' % (model_id, addr + 2)
try:
model.load()
except Exception, e:
model.load_error = str(e)
self.add_model(model)
addr += model_len + 2
data = self.read(addr, 1)
if data and len(data) == 2:
model_id = util.data_to_u16(data)
else:
break
else:
break
if delay is not None:
time.sleep(delay)
else:
if not error:
error = 'Unknown error'
raise SunSpecClientError(error)
if connect:
self.modbus_device.disconnect()
class ClientModel(device.Model):
"""A derived class based on :const:`sunspec.core.device.Model`. It adds
Modbus device access capability to the model base class.
Parameters:
dev :
Device object associated with the model.
mid :
Model id.
addr :
Starting Modbus address of the model.
mlen :
Model length in Modbus registers.
index :
Model index.
Raises:
SunSpecClientError: Raised for any sunspec module error.
"""
def __init__(self, dev=None, mid=None, addr=0, mlen=None, index=1):
device.Model.__init__(self, device=dev, mid=mid, addr=addr, mlen=mlen, index=index)
def load(self):
"""Create the block and point objects within the model object based on
the corresponding SunSpec model definition.
"""
device.Model.load(self, block_class=ClientBlock, point_class=ClientPoint)
def read_points(self):
"""Read all points in the model from the physical device.
"""
if self.model_type is not None:
# read current model
try:
end_index = len(self.read_blocks)
if end_index == 1:
data = self.device.read(self.addr, self.len)
else:
data = ''
index = 0
while index < end_index:
addr = self.read_blocks[index]
index += 1
if index < end_index:
read_len = self.read_blocks[index] - addr
else:
read_len = self.addr + self.len - addr
data += self.device.read(addr, read_len)
if data:
# print 'data len = ', len(data)
data_len = len(data)/2
if data_len != self.len:
raise SunSpecClientError('Error reading model %s' % self.model_type)
# for each repeating block
for block in self.blocks:
# scale factor points
for pname, point in block.points_sf.iteritems():
offset = int(point.addr) - int(self.addr)
if point.point_type.data_to is not None:
byte_offset = offset * 2
# print pname, point, offset, byte_offset, (byte_offset + (int(point.point_type.len) * 2)), point.point_type.len
point.value_base = point.point_type.data_to(data[byte_offset:byte_offset + (int(point.point_type.len) * 2)])
if not point.point_type.is_impl(point.value_base):
point.value_base = None
else:
raise SunSpecClientError('No data_to function set for %s : %s' % (pname, point.point_type))
# non-scale factor points
for pname, point in block.points.iteritems():
offset = int(point.addr) - int(self.addr)
if point.point_type.data_to is not None:
byte_offset = offset * 2
# print pname, point, offset, byte_offset, (byte_offset + (int(point.point_type.len) * 2)), point.point_type.len
point.value_base = point.point_type.data_to(data[byte_offset:byte_offset + (int(point.point_type.len) * 2)])
if point.point_type.is_impl(point.value_base):
if point.sf_point is not None:
point.value_sf = point.sf_point.value_base
else:
point.value_base = None
point.value_sf = None
else:
raise SunSpecClientError('No data_to function set for %s : %s' % (pname, point.point_type))
except SunSpecError, e:
raise SunSpecClientError(e)
except modbus.ModbusClientError, e:
raise SunSpecClientError('Modbus error: %s' % str(e))
except:
raise
def write_points(self):
"""Write all points that have been modified since the last write
operation to the physical device.
"""
addr = None
next_addr = None
data = ''
for block in self.blocks:
for point in block.points_list:
if point.dirty:
point_addr = int(point.addr)
point_len = int(point.point_type.len)
point_data = point.point_type.to_data(point.value_base, (point_len * 2))
if addr is None:
addr = point_addr
data = ''
else:
if point_addr != next_addr:
block.model.device.write(addr, data)
addr = point_addr
data = ''
next_addr = point_addr + point_len
data += point_data
point.dirty = False
if addr is not None:
block.model.device.write(addr, data)
addr = None
class ClientBlock(device.Block):
"""A derived class based on :const:`sunspec.core.device.Block`. It adds
Modbus device access capability to the block base class.
Parameters:
model :
Model object associated with the block.
addr :
Starting Modbus address of the block.
blen :
Block length in Modbus registers.
block_type :
The block type object associated with block in the model
definition.
index :
Block index.
"""
def __init__(self, model, addr, blen, block_type, index=1):
device.Block.__init__(self, model, addr, blen, block_type, index)
class ClientPoint(device.Point):
"""A derived class based on :const:`sunspec.core.device.Point`. It adds
Modbus device access capability to the point base class.
Parameters:
block :
Block object associated with the point.
point_type :
The point type object associated with point in the model definition.
addr :
Starting Modbus address of the point.
sf_point :
Point object associated with the point scale factor if present.
value :
Point value.
Raises:
SunSpecClientError: Raised for any sunspec module error.
"""
def __init__(self, block=None, point_type=None, addr=None, sf_point=None, value=None):
device.Point.__init__(self, block, point_type, addr, sf_point, value)
def write(self):
"""Write the point to the physical device.
"""
data = self.point_type.to_data(self.value_base, (int(self.point_type.len) * 2))
self.block.model.device.write(int(self.addr), data)
self.dirty = False
class SunSpecClientModelBase(object):
"""This class forms the base class of the dynamically generated model
classes during SunSpecClientDevice initialization. In addition to the
attributes listed below, the model (fixed block) points are placed as
attributes on the model.
Parameters:
model :
The :const:`sunspec.core.device.Model` associated with the model.
name :
Model name as specified in the model definition.
Raises:
SunSpecClientError : Raised for any sunspec module error.
Attributes:
model
The :const:`sunspec.core.device.Model` object associated with the
model.
name
Model name as specified in the model definition.
repeating
Repeating block if the model contains one.
repeating_name
Repeating block name.
points
Names of the point attributes added to the model object.
"""
def __init__(self, model, name):
self.model = model
self.name = name
self.repeating = [None]
self.repeating_name = 'repeating'
if len(model.blocks) > 1:
block_class_name = self.__class__.__name__ + 'Repeating'
for block in model.blocks[1:]:
# set repeating block name and attribute if present
if block.block_type.name != self.repeating_name:
self.repeating_name = block.block_type.name
setattr(self, self.repeating_name, self.repeating)
# block_class_ = globals().get(block_class_name)
block_class = globals().get(block_class_name)
c = block_class(block, self.repeating_name)
self.repeating.append(c)
def _get_property(self, name):
point = self.model.points.get(name)
if point:
return point.value
def _set_property(self, name, value):
point = self.model.points.get(name)
if point:
point.value = value
def __getitem__(self, name):
return self._get_property(name)
# return self.__dict__.get(key, None)
def __setitem__(self, name, item):
return self._set_property(name, item)
# self.__dict__.set(key, item)
def read(self):
"""Read all points in the model from the physical device."""
self.model.read_points()
def write(self):
"""Write all points that have been modified since the last write
operation to the physical device."""
self.model.write_points()
def __str__(self):
s = '\n%s (%s):\n' % (self.name, self.model.id)
for name in self.points:
value = getattr(self, name)
if value is not None:
s += '%s: %s\n' % (name, str(value))
for block in self.repeating[1:]:
s += str(block)
return s
class SunSpecClientBlockBase(object):
"""SunSpecClientBlockBase
This class forms the base class of the dynamically generated repeating
block classes during SunSpecClientDevice initialization. In addition to
the attributes listed below, the repeating block points are placed as
attributes on the repeating block.
Parameters:
block :
The :const:`sunspec.core.device.Block` object associated with the
block.
name :
Repeating block name as specified in the model definition.
Attributes:
block
The :const:`sunspec.core.device.Block` object associated with the
block.
name
Block name as specified in the model definition.
points
Names of the point attributes added to the block object.
"""
def __init__(self, block, name):
self.block = block
self.name = name
def _get_property(self, name):
point = self.block.points.get(name)
if point:
return point.value
def _set_property(self, name, value):
point = self.block.points.get(name)
if point:
point.value = value
def __getitem__(self, name):
return self._get_property(name)
# return self.__dict__.get(key, None)
def __setitem__(self, key, item):
return self._set_property(name, item)
# self.__dict__.set(key, item)
def __str__(self):
s = '\n%s[%d]:\n' % (self.name, self.block.index)
for name in self.points:
value = getattr(self, name)
if value is not None:
s += '%s: %s\n' % (name, str(value))
return s
def model_class_get(model_id):
def add_property(self, name, value):
fget = lambda self: self._get_property(name)
fset = lambda self, value: self._set_property(name, value)
setattr(self, name, property(fget, fset))
def class_init(self, model, name):
SunSpecClientModelBase.__init__(self, model, name)
def block_class_init(self, block, name):
SunSpecClientBlockBase.__init__(self, block, name)
class_name = 'Model' + str(model_id)
class_ = globals().get(class_name)
if class_ is None:
class_ = type(class_name, (SunSpecClientModelBase,), {'__init__' : class_init})
globals()[class_name] = class_
setattr(class_, 'points', [])
model_type = None
try:
model_type = device.model_type_get(model_id)
except Exception, e:
setattr(class_, 'load_error', str(e))
if model_type is not None:
for point_type in model_type.fixed_block.points_list:
if point_type.type != suns.SUNS_TYPE_SUNSSF and point_type.type != suns.SUNS_TYPE_PAD:
add_property(class_, point_type.id, None)
class_.points.append(point_type.id)
### check for writable point?
block_type = model_type.repeating_block
if block_type is not None:
block_class_name = class_name + 'Repeating'
block_class = type(block_class_name, (SunSpecClientBlockBase,), {'__init__' : block_class_init})
globals()[block_class_name] = block_class
setattr(block_class, 'points', [])
for point_type in block_type.points_list:
if point_type.type != suns.SUNS_TYPE_SUNSSF and point_type.type != suns.SUNS_TYPE_PAD:
add_property(block_class, point_type.id, None)
block_class.points.append(point_type.id)
return class_
class SunSpecClientDevice(object):
"""This class wraps the sunspec.core.ClientDevice class to provide an
alternate syntax for scripting. By placing the model (fixed block) points,
and repeating block points directly on the model and repeating block objects
as attributes, the syntax for accessing them is simplified.
The model and block classes within the device are dynamically generated
based on the model type with the appropriate attributes being added during
creation.
Parameters:
device_type :
Device type. Possible values: :const:`RTU`, :const:`TCP`,
:const:`MAPPED`.
slave_id :
Modbus slave id
name :
For :const:`RTU` devices, the name of the serial port such as 'com4'
or '/dev/ttyUSB0'. For :const:`MAPPED` devices, the name of the
modbus map file.
pathlist :
Pathlist object containing alternate paths to support files.
baudrate :
For :const:`RTU` devices, baud rate such as 9600 or 19200. Defaulted
by modbus module to 9600.
parity :
For :const:`RTU` devices, parity. Possible values:
:const:`sunspec.core.client.PARITY_NONE`,
:const:`sunspec.core.client.PARITY_EVEN` Defaulted by modbus module
to :const:`PARITY_NONE`.
ipaddr :
For :const:`TCP` devices, device IP address.
ipport :
For :const:`TCP` devices, device IP port. Defaulted by modbus module
to 502.
timeout :
Modbus request timeout in seconds. Fractional seconds are permitted
such as .5.
trace :
Enable low level trace.
Raises:
SunSpecClientError: Raised for any sunspec module error.
Attributes:
device
The :const:`sunspec.core.client.ClientDevice` associated with this
object.
models
List of models present in the device in the order in which they
appear in the device. If there is a single instance of the model in
the device, the list element is a model object.
If there are multiple instances of the same model in the list, the
list element for that model is a list of the models of that type in
the order in which they appear in the device with the first element
having an index of 1.
"""
def __init__(self, device_type, slave_id=None, name=None, pathlist = None, baudrate=None, parity=None, ipaddr=None, ipport=None,
timeout=None, trace=False, scan_progress=None, scan_delay=None):
# super(self.__class__, self).__init__(device_type, slave_id, name, pathlist, baudrate, parity, ipaddr, ipport)
self.device = ClientDevice(device_type, slave_id, name, pathlist, baudrate, parity, ipaddr, ipport, timeout, trace)
self.models = []
try:
# scan device models
self.device.scan(progress=scan_progress, delay=scan_delay)
# create named attributes for each model
for model in self.device.models_list:
model_id = str(model.id)
c = model_class_get(model_id)
if model.model_type is not None:
name = model.model_type.name
else:
name = 'model_' + model_id
model_class = c(model, name)
existing = getattr(self, name, None)
# if model id already defined
if existing:
# if model id definition is not a list, turn it into a list and add existing model
if type(self[name]) is not list:
# model instance index starts at 1 so first first list element is None
setattr(self, name, [None])
self[name].append(existing)
# add new model to the list
self[name].append(model_class)
# if first model id instance, set attribute as model
else:
setattr(self, name, model_class)
self.models.append(name)
except Exception, e:
if self.device is not None:
self.device.close()
raise
def close(self):
"""Release resources associated with the device. Should be called when
the device object is no longer in use.
"""
self.device.close()
def read(self):
"""Read the points for all models in the device from the physical
device.
"""
self.device.read_points()
def __getitem__(self, key):
return self.__dict__.get(key, None)
def __setitem__(self, key, item):
self.__dict__.set(key, item)
def __str__(self):
s = ''
for model in self.models:
s += str(self[model])
return s
if __name__ == "__main__":
pass
| {
"content_hash": "9f149731517cc7342b65c2d15e5e7531",
"timestamp": "",
"source": "github",
"line_count": 830,
"max_line_length": 144,
"avg_line_length": 34.42409638554217,
"alnum_prop": 0.5532689346213076,
"repo_name": "jbm950/pysunspec",
"id": "d8fe710f796c1c0cefc1338c32227edfdc68a29d",
"size": "28573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sunspec/core/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "204862"
}
],
"symlink_target": ""
} |
from .dump import dump
from .toBytes import toBytes
#=============================================================================
| {
"content_hash": "d044019e418da9a2631fde7adde3d221",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 78,
"avg_line_length": 33,
"alnum_prop": 0.3181818181818182,
"repo_name": "TD22057/T-Home",
"id": "f8205b845bfb773fbab42cc3071f32aefc265223",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/tHome/util/hex/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "337261"
},
{
"name": "Shell",
"bytes": "5082"
}
],
"symlink_target": ""
} |
import FWCore.ParameterSet.Config as cms
import pickle
process = pickle.load(open('PSet.pkl', 'rb'))
| {
"content_hash": "d468f5cc90ff3211c3acab4bb5a2abc3",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 45,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.7623762376237624,
"repo_name": "avkhadiev/bbtoDijet",
"id": "5fd923dd7eea775860f317b4b06d7c24d971e59e",
"size": "101",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "bbtoDijetAnalyzer/test/crab_test_bbtoDijetAnalyzer_test/crab_bbtoDijetAnalyzer_test/inputs/PSet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "22726"
},
{
"name": "C++",
"bytes": "46033"
},
{
"name": "Python",
"bytes": "4558815"
}
],
"symlink_target": ""
} |
"""Test the listtransactions API."""
from decimal import Decimal
from io import BytesIO
from test_framework.messages import COIN, CTransaction
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
hex_str_to_bytes,
)
def tx_from_hex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].generate(1) # Get out of IBD
self.sync_all()
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 100)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "amount": Decimal("-100"), "confirmations": 0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("100"), "confirmations": 0})
# mine a block, confirmations should change:
blockhash = self.nodes[0].generate(1)[0]
blockheight = self.nodes[0].getblockheader(blockhash)['height']
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "amount": Decimal("-100"), "confirmations": 1, "blockhash": blockhash, "blockheight": blockheight})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("100"), "confirmations": 1, "blockhash": blockhash, "blockheight": blockheight})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 200)
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "send"},
{"amount": Decimal("-200")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "receive"},
{"amount": Decimal("200")})
# sendmany from node1: twice to self, twice to node2:
send_to = {self.nodes[0].getnewaddress(): 110,
self.nodes[1].getnewaddress(): 220,
self.nodes[0].getnewaddress(): 330,
self.nodes[1].getnewaddress(): 440}
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-110")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("110")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-220")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("220")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-330")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("330")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-440")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("440")},
{"txid": txid})
if not self.options.descriptors:
# include_watchonly is a legacy wallet feature, so don't test it for descriptor wallets
pubkey = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
multisig = self.nodes[1].createmultisig(1, [pubkey])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 100)
self.nodes[1].generate(1)
self.sync_all()
assert_equal(len(self.nodes[0].listtransactions(label="watchonly", include_watchonly=True)), 1)
assert_equal(len(self.nodes[0].listtransactions(dummy="watchonly", include_watchonly=True)), 1)
assert len(self.nodes[0].listtransactions(label="watchonly", count=100, include_watchonly=False)) == 0
assert_array_result(self.nodes[0].listtransactions(label="watchonly", count=100, include_watchonly=True),
{"category": "receive", "amount": Decimal("100")},
{"txid": txid, "label": "watchonly"})
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 10000)
assert not is_opt_in(self.nodes[0], txid_1)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})
self.sync_mempools()
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1)
assert_equal(utxo_to_use["safe"], True)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
assert_equal(utxo_to_use["safe"], False)
# Create tx2 using createrawtransaction
inputs = [{"txid": utxo_to_use["txid"], "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 9999.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransactionwithwallet(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert not is_opt_in(self.nodes[1], txid_2)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})
self.sync_mempools()
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 9999.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = tx_from_hex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = tx3_modified.serialize().hex()
tx3_signed = self.nodes[0].signrawtransactionwithwallet(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert is_opt_in(self.nodes[0], txid_3)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"})
self.sync_mempools()
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 9999.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransactionwithwallet(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert not is_opt_in(self.nodes[1], txid_4)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})
self.sync_mempools()
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = tx3_b.serialize().hex()
tx3_b_signed = self.nodes[0].signrawtransactionwithwallet(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, 0)
assert is_opt_in(self.nodes[0], txid_3b)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})
self.sync_mempools()
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert txid_3b not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
| {
"content_hash": "5c85c2c0f9cb0394e1dc89718afe46e9",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 150,
"avg_line_length": 52.02857142857143,
"alnum_prop": 0.5797181036060772,
"repo_name": "rnicoll/dogecoin",
"id": "45e7bd69f8cfc6b79d099f3bbf41f88039dc7445",
"size": "11140",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/functional/wallet_listtransactions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28173"
},
{
"name": "C",
"bytes": "1064604"
},
{
"name": "C++",
"bytes": "8101614"
},
{
"name": "CMake",
"bytes": "28560"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "M4",
"bytes": "215256"
},
{
"name": "Makefile",
"bytes": "117017"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "2237402"
},
{
"name": "QMake",
"bytes": "798"
},
{
"name": "Sage",
"bytes": "35184"
},
{
"name": "Scheme",
"bytes": "7554"
},
{
"name": "Shell",
"bytes": "153769"
}
],
"symlink_target": ""
} |
from tests import TestBase, db_config, app_config
from tests import dbhelper
from test_models.core import Partner, Customer, User, Language
from hoops.utils import OutputFormat
from hoops import create_api
class ModelsTestBase(TestBase):
def populate_helper(self, lang=True, partner='test', customers=['test'], users=['test']):
if lang:
lang = self._add(Language(lang='en', name='English'))
if partner:
partner = self._add(Partner(language=lang, output_format=OutputFormat.JSON, name=partner))
customers = [self._add(Customer(partner=partner, name=ident, my_identifier=ident)) for ident in customers]
users = [self._add(User(partner=partner, customer=customers[0], my_identifier=ident)) for ident in users]
return dict(lang=lang, partner=partner, customers=customers, users=users)
def _add(self, obj):
return dbhelper.add(obj, self.db)
@classmethod
def get_app(cls):
app, cls.db = create_api(db_config=db_config,
app_config=app_config,
app_name='hoops',
flask_conf={'DEBUG': True,
'ENVIRONMENT_NAME': 'test'})
return app
| {
"content_hash": "fe91941900a2c13e3dbfb66c3dc9be22",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 114,
"avg_line_length": 42.5,
"alnum_prop": 0.6086274509803922,
"repo_name": "jfillmore/hoops",
"id": "2e522d87a2c744c96cb4ba2fa2ebbc82bac53077",
"size": "1275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/models_tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "223116"
},
{
"name": "Shell",
"bytes": "1051"
}
],
"symlink_target": ""
} |
import unittest
import warnings
from twilio.base.obsolete import deprecated_method
class DeprecatedMethodTest(unittest.TestCase):
def test_deprecation_decorator(self):
@deprecated_method
def old_method():
return True
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
# Call function that should raise a warning, but still execute
self.assertTrue(old_method())
self.assertTrue(len(caught_warnings))
self.assertEqual(
str(caught_warnings[0].message),
'Function method .old_method() is deprecated'
)
assert issubclass(caught_warnings[0].category, DeprecationWarning)
def test_deprecation_decorator_with_new_method(self):
@deprecated_method('new_method')
def old_method():
return True
with warnings.catch_warnings(record=True) as caught_warnings:
warnings.simplefilter("always")
# Call function that should raise a warning, but still execute
self.assertTrue(old_method())
self.assertTrue(len(caught_warnings))
self.assertEqual(
str(caught_warnings[0].message),
'Function method .old_method() is deprecated in favor of .new_method()'
)
assert issubclass(caught_warnings[0].category, DeprecationWarning)
| {
"content_hash": "784e5ddbc0be4b274abb0c4519ced1f8",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 87,
"avg_line_length": 34.857142857142854,
"alnum_prop": 0.6263661202185792,
"repo_name": "twilio/twilio-python",
"id": "96927e13fb95cd25c5cc180f7fdaf02f53b274df",
"size": "1464",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/base/test_deprecation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
} |
import ldap
import ldap.modlist
from keystone import exception
from oslo_log import log as logging
from oslo_config import cfg
LOG = logging.getLogger('nova.%s' % __name__)
def _getLdapInfo(attr, conffile="/etc/ldap.conf"):
try:
f = open(conffile)
except IOError:
if conffile == "/etc/ldap.conf":
# fallback to /etc/ldap/ldap.conf, which will likely
# have less information
f = open("/etc/ldap/ldap.conf")
for line in f:
if line.strip() == "":
continue
if line.split()[0].lower() == attr.lower():
return line.split(None, 1)[1].strip()
break
def _open_ldap():
ldapHost = _getLdapInfo("uri")
sslType = _getLdapInfo("ssl")
binddn = cfg.CONF.ldap.user
bindpw = cfg.CONF.ldap.password
ds = ldap.initialize(ldapHost)
ds.protocol_version = ldap.VERSION3
if sslType == "start_tls":
ds.start_tls_s()
try:
ds.simple_bind_s(binddn, bindpw)
return ds
except ldap.CONSTRAINT_VIOLATION:
LOG.debug("LDAP bind failure: Too many failed attempts.\n")
except ldap.INVALID_DN_SYNTAX:
LOG.debug("LDAP bind failure: The bind DN is incorrect... \n")
except ldap.NO_SUCH_OBJECT:
LOG.debug("LDAP bind failure: "
"Unable to locate the bind DN account.\n")
except ldap.UNWILLING_TO_PERFORM as msg:
LOG.debug("LDAP bind failure: "
"The LDAP server was unwilling to perform the action"
" requested.\nError was: %s\n" % msg[0]["info"])
except ldap.INVALID_CREDENTIALS:
LOG.debug("LDAP bind failure: Password incorrect.\n")
return None
# ds is presumed to be an already-open ldap connection
def _all_groups(ds):
basedn = cfg.CONF.wmfhooks.ldap_group_base_dn
allgroups = ds.search_s(basedn, ldap.SCOPE_ONELEVEL)
return allgroups
# ds is presumed to be an already-open ldap connection
def _get_next_gid_number(ds):
highest = cfg.CONF.wmfhooks.minimum_gid_number
for group in _all_groups(ds):
if 'gidNumber' in group[1]:
number = int(group[1]['gidNumber'][0])
if number > highest:
highest = number
# Fixme: Check against a hard max gid number limit?
return highest + 1
# ds should be an already-open ldap connection.
#
# groupname is the name of the group to create, probably project-<projectname>
def _get_ldap_group(ds, groupname):
basedn = cfg.CONF.wmfhooks.ldap_group_base_dn
searchdn = "cn=%s,%s" % (groupname, basedn)
try:
thisgroup = ds.search_s(searchdn, ldap.SCOPE_BASE)
return thisgroup
except ldap.LDAPError:
return None
def delete_ldap_project_group(project_id):
basedn = cfg.CONF.wmfhooks.ldap_group_base_dn
groupname = "project-%s" % project_id.encode('utf-8')
dn = "cn=%s,%s" % (groupname, basedn)
ds = _open_ldap()
if not ds:
LOG.error("Failed to connect to ldap; Leak a project group.")
raise exception.ValidationError()
try:
ds.delete_s(dn)
except ldap.LDAPError as e:
LOG.warning("Failed to delete %s from ldap: %s" % (dn, e))
# delete everything under the project subtree
basedn = cfg.CONF.wmfhooks.ldap_project_base_dn
projectbase = "cn=%s,%s" % (project_id, basedn)
search = ds.search_s(projectbase, ldap.SCOPE_SUBTREE)
delete_list = [record for record, _ in search]
delete_list.reverse()
for record in delete_list:
try:
ds.delete_s(record)
except ldap.LDAPError as e:
LOG.warning("Failed to delete %s from ldap" % (record, e))
def sync_ldap_project_group(project_id, keystone_assignments):
groupname = "project-%s" % project_id.encode('utf-8')
LOG.info("Syncing keystone project membership with ldap group %s"
% groupname)
ds = _open_ldap()
if not ds:
LOG.error("Failed to connect to ldap; cannot set up new project.")
raise exception.ValidationError()
allusers = set()
for key in keystone_assignments:
allusers |= set(keystone_assignments[key])
if 'novaobserver' in allusers:
allusers.remove('novaobserver')
basedn = cfg.CONF.wmfhooks.ldap_user_base_dn
members = ["uid=%s,%s" % (user.encode('utf-8'), basedn)
for user in allusers]
basedn = cfg.CONF.wmfhooks.ldap_group_base_dn
dn = "cn=%s,%s" % (groupname, basedn)
existingEntry = _get_ldap_group(ds, groupname)
if existingEntry:
# We're modifying an existing group
oldEntry = existingEntry[0][1]
newEntry = oldEntry.copy()
newEntry['member'] = members
modlist = ldap.modlist.modifyModlist(oldEntry, newEntry)
if modlist:
ds.modify_s(dn, modlist)
else:
# We're creating a new group from scratch.
# There is a potential race between _get_next_git_number()
# and ds.add_s, so we make a few attempts.
# around this function.
groupEntry = {}
groupEntry['member'] = members
groupEntry['objectClass'] = ['groupOfNames', 'posixGroup', 'top']
groupEntry['cn'] = [groupname]
for i in range(0, 4):
groupEntry['gidNumber'] = [str(_get_next_gid_number(ds))]
modlist = ldap.modlist.addModlist(groupEntry)
try:
ds.add_s(dn, modlist)
break
except ldap.LDAPError:
LOG.warning("Failed to create group, attempt number %s: %s" %
(i, modlist))
def create_sudo_defaults(project_id):
ds = _open_ldap()
if not ds:
LOG.error("Failed to connect to ldap; Unable to create sudo rules.")
raise exception.ValidationError()
userbasedn = cfg.CONF.wmfhooks.ldap_user_base_dn
basedn = cfg.CONF.wmfhooks.ldap_project_base_dn
projectbase = "cn=%s,%s" % (project_id, basedn)
# We may or may not already have one of these... if it fails just move on.
projectEntry = {}
projectEntry['objectClass'] = ['extensibleobject', 'groupofnames', 'top']
projectEntry['member'] = ["uid=%s,%s" % (cfg.CONF.wmfhooks.admin_user, userbasedn)]
modlist = ldap.modlist.addModlist(projectEntry)
try:
ds.add_s(projectbase, modlist)
except ldap.LDAPError as e:
LOG.warning("Failed to create project base %s in ldap: %s" % (projectbase, e))
sudoerbase = "ou=sudoers,%s" % projectbase
sudoEntry = {}
sudoEntry['objectClass'] = ['organizationalunit', 'top']
modlist = ldap.modlist.addModlist(sudoEntry)
try:
ds.add_s(sudoerbase, modlist)
except ldap.LDAPError as e:
LOG.warning("Failed to create base sudoer group: %s" % e)
sudoEntry = {}
defaultdn = "cn=default-sudo,%s" % sudoerbase
sudoEntry['objectClass'] = ['sudoRole']
sudoEntry['sudoUser'] = ['%%project-%s' % project_id.encode('utf8')]
sudoEntry['sudoCommand'] = ['ALL']
sudoEntry['sudoOption'] = ['!authenticate']
sudoEntry['sudoHost'] = ['ALL']
sudoEntry['cn'] = ['default-sudo']
modlist = ldap.modlist.addModlist(sudoEntry)
try:
ds.add_s(defaultdn, modlist)
except ldap.LDAPError as e:
LOG.warning("Failed to create default sudoer entry: %s" % e)
defaultasdn = "cn=default-sudo-as,%s" % sudoerbase
# The runas entry is the same as the default entry, plus one field
sudoEntry['sudoRunAsUser'] = ["%%project-%s" % project_id.encode('utf8')]
sudoEntry['cn'] = ['default-sudo-as']
modlist = ldap.modlist.addModlist(sudoEntry)
try:
ds.add_s(defaultasdn, modlist)
except ldap.LDAPError as e:
LOG.warning("Failed to create default sudo-as entry: %s" % e)
| {
"content_hash": "adad2c66cc0f94b81ee207283a81c3f9",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 87,
"avg_line_length": 34.65625,
"alnum_prop": 0.6205075357464898,
"repo_name": "wikimedia/mediawiki-vagrant",
"id": "e77067d08f53b512879f4ee8f1c409d8ea7c5b9f",
"size": "8420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "puppet/modules/role/files/keystone/wmfkeystonehooks/ldapgroups.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "428"
},
{
"name": "Gherkin",
"bytes": "11020"
},
{
"name": "HTML",
"bytes": "305937"
},
{
"name": "JavaScript",
"bytes": "1514"
},
{
"name": "Lua",
"bytes": "17271"
},
{
"name": "PHP",
"bytes": "4731"
},
{
"name": "Pascal",
"bytes": "2494"
},
{
"name": "Puppet",
"bytes": "524369"
},
{
"name": "Python",
"bytes": "67879"
},
{
"name": "Ruby",
"bytes": "835340"
},
{
"name": "Shell",
"bytes": "33494"
},
{
"name": "Vim Script",
"bytes": "25"
}
],
"symlink_target": ""
} |
import os
import flask_sqlalchemy as fsa
import pytest
from flask import Flask
from flask.testing import FlaskClient
from flask_resty.testing import ApiClient
# -----------------------------------------------------------------------------
@pytest.fixture
def app():
app = Flask(__name__)
app.testing = True
return app
@pytest.fixture
def db(app):
app.config["SQLALCHEMY_DATABASE_URI"] = os.environ.get(
"DATABASE_URL", "sqlite://"
)
# TODO: Remove once this is the default.
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
return fsa.SQLAlchemy(app)
@pytest.fixture
def client(app):
app.test_client_class = ApiClient
return app.test_client()
@pytest.fixture
def base_client(app):
app.test_client_class = FlaskClient
return app.test_client()
| {
"content_hash": "64606c33cc2cb998cac42d9e2db9c01e",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 19.853658536585368,
"alnum_prop": 0.6277641277641277,
"repo_name": "taion/flask-jsonapiview",
"id": "d8d52e0db6c1e93e775b8b9b50555cd30b6faac6",
"size": "814",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111164"
}
],
"symlink_target": ""
} |
from dateutil.parser import parse
import os
import json
"""
We must prevent CashupUrlGenerator from being imported when we only want to
generated documentation of the classes in this file.
"""
if not os.environ.get('DOC'):
from kounta.cashup import CashupUrlGenerator
class BaseObject:
"""
Used as the parent for all objects returned from the API. It main purpose is
to allow documentation to be built into the object instead of using plain
`dict`s.
"""
def __init__(self, obj, client, company):
"""
:type company: Company|None
:type client: kounta.client.BasicClient
:type obj: dict
"""
assert isinstance(company, Company) or company is None
self.obj = obj
self._client = client
self._company = company
def __getattr__(self, item):
"""
Returns an attribute as it was originally set in the raw object.
:type item: str
"""
return self.obj[item]
def __str__(self):
"""
When converting any API object to a string the original JSON fetched
will be returned.
It is important to recognise that this JSON may not represent the actual
state of the object behind it because some calls may make further API
requests.
"""
return json.dumps(self.obj)
def _make_address(self, field):
"""
Test if a `field` is not None and return an Address object. Otherwise
return None
:type field: str
"""
address = self.obj[field]
if address:
return Address(address, self._client, self._company)
return None
def _get_addresses(self, url):
"""
:return: Address[]
"""
url = '/v1/companies/%d/%s' % (self._company.id, url)
addresses = self._client.get_url(url)
return [Address(address, self._client, self._company) for address in
addresses]
def _get_cashups(self, url, **kwargs):
"""
:return: Cashup[]
"""
generator = CashupUrlGenerator()
url = '%s/%s' % (url, generator.get_url(**kwargs))
cashups = self._client.get_url(url)
return [Cashup(cashup, self._client, self._company) for cashup
in cashups]
def _get_categories(self, url):
"""
:return: Category[]
"""
categories = self._client.get_url(url)
return [Category(category, self._client, self._company) for category in
categories]
class Address(BaseObject):
"""
Addresses are physical or postal locations belonging to a staff member,
customer, company or site.
"""
@property
def id(self):
"""
Address ID.
:return: int
"""
return self.obj['id']
@property
def city(self):
"""
City/suburb.
:return: str
"""
return self.obj['city']
@property
def lines(self):
"""
Address lines.
:return: str[]
"""
return self.obj['lines']
@property
def zone(self):
"""
Zone/state.
:return: str
"""
return self.obj['zone']
@property
def postal_code(self):
"""
Postal code.
:return: str
"""
return self.obj['postal_code']
@property
def country(self):
"""
Country.
:return: str
"""
return self.obj['country']
class Company(BaseObject):
"""
Companies are businesses who use Kounta at their points of sale. A company
may have one or more registers running Kounta on one or more sites.
"""
@property
def id(self):
"""
Company ID.
:return: int
"""
return self.obj['id']
@property
def name(self):
"""
Company name.
:return: str
"""
return self.obj['name']
@property
def shipping_address(self):
"""
Shipping address.
:return: Address
"""
return self._make_address('shipping_address')
@property
def postal_address(self):
"""
Postal address.
:return: Address
"""
return self._make_address('postal_address')
@property
def addresses(self):
"""
All addresses attached to this company.
:return: Address[]
"""
url = '/v1/companies/%d/addresses.json' % self.id
addresses = self._client.get_url(url)
return [Address(address, self._client, self) for address in addresses]
@property
def business_number(self):
"""
ABN, ACN or whatever is applicable as the business number.
:return: str
"""
return self.obj['business_number']
@property
def contact_staff_member(self):
"""
Contact staff member.
:return: Staff
"""
return Staff(self.obj['contact_staff_member'], self._client, self)
@property
def image(self):
"""
Avatar image.
:return: str
"""
return self.obj['image']
@property
def website(self):
"""
Website.
:return: str
"""
return self.obj['website']
@property
def currency(self):
"""
Currency code.
:return: str
"""
return self.obj['currency']
@property
def timezone(self):
"""
Timezone information.
:return: Timezone
"""
return Timezone(self.obj['timezone'], self._client, self)
@property
def sites(self):
"""
Fetch all sites for this company.
:return: Site[]
"""
sites = self._client.get_url('/v1/companies/%d/sites.json' % self.id)
return [Site(site, self._client, self) for site in sites]
@property
def registers(self):
"""
Fetch all registers for this company.
:return: Register[]
"""
url = '/v1/companies/%d/registers.json' % self.id
registers = self._client.get_url(url)
return [Register(register, self._client, self._company) for register in
registers]
@property
def created_at(self):
"""
When the company was created.
:return: datetime
"""
return parse(self.obj['created_at'])
@property
def updated_at(self):
"""
When the company was last modified.
:return: datetime
"""
return parse(self.obj['updated_at'])
def cashups(self, **kwargs):
"""
Fetch cashups for a company. Refer to documentation for Cashups for more
information.
:rtype : Cashup[]
"""
return self._get_cashups('/v1/companies/%d' % self.id, **kwargs)
@property
def categories(self):
"""
All categories for this company.
:rtype : Category[]
"""
url = '/v1/companies/%d/categories.json' % self.id
return self._get_categories(url)
class Permission(BaseObject):
@property
def code(self):
"""
:rtype : str
"""
return self.obj['code']
@property
def name(self):
"""
:rtype : str
"""
return self.obj['name']
@property
def domain(self):
"""
:rtype : str
"""
return self.obj['domain']
class Timezone(BaseObject):
"""
A timezone represents a time offset at a geographical location.
"""
@property
def offset(self):
"""
:rtype : str
"""
return self.obj['offset']
@property
def name(self):
"""
:rtype : str
"""
return self.obj['name']
class Staff(BaseObject):
"""
Staff members are people who work for the authenticated company.
"""
@property
def id(self):
"""
:rtype: int
"""
return self.obj['id']
@property
def first_name(self):
"""
:rtype: str
"""
return self.obj['first_name']
@property
def last_name(self):
"""
:rtype: str
"""
return self.obj['last_name']
@property
def is_admin(self):
"""
:rtype: boolean
"""
return self.obj['is_admin']
@property
def primary_email_address(self):
"""
:rtype: str
"""
return self.obj['primary_email_address']
@property
def email_addresses(self):
"""
:rtype: str[]
"""
return self.obj['email_addresses']
@property
def phone(self):
"""
:rtype: str
"""
return self.obj['phone']
@property
def mobile(self):
"""
:rtype: str
"""
return self.obj['mobile']
@property
def fax(self):
"""
:rtype: str
"""
return self.obj['fax']
@property
def shipping_address(self):
"""
:return: Address
"""
return self._make_address('shipping_address')
@property
def postal_address(self):
"""
:return: Address
"""
return self._make_address('postal_address')
@property
def permissions(self):
"""
:return: Permission[]
"""
return [Permission(p, self._client, self._company) for p in
self.obj['permissions']]
@property
def image(self):
"""
:rtype: str
"""
return self.obj['image']
@property
def created_at(self):
"""
:rtype: str
"""
return parse(self.obj['created_at'])
@property
def updated_at(self):
"""
:rtype: str
"""
return parse(self.obj['updated_at'])
@property
def addresses(self):
"""
All addresses attached to this staff member.
:return: Address[]
"""
return self._get_addresses('staff/%d/addresses.json' % self.id)
class Site(BaseObject):
"""
Sites are physical locations, such as outlets, offices etc, at which one or
more Kountas will be used.
"""
@property
def id(self):
"""
:return: int
"""
return self.obj['id']
@property
def name(self):
"""
:return: str
"""
return self.obj['name']
@property
def code(self):
"""
:return: str
"""
return self.obj['code']
@property
def contact_person(self):
"""
:return: Staff
"""
return Staff(self.obj['contact_person'], self._client, self._company)
@property
def business_number(self):
"""
:return: str
"""
return self.obj['business_number']
@property
def shipping_address(self):
"""
:return: Address
"""
return self._make_address('shipping_address')
@property
def postal_address(self):
"""
:return: Address
"""
return self._make_address('postal_address')
@property
def email(self):
"""
:return: str
"""
return self.obj['email']
@property
def mobile(self):
"""
:return: str
"""
return self.obj['mobile']
@property
def phone(self):
"""
:return: str
"""
return self.obj['phone']
@property
def fax(self):
"""
:return: str
"""
return self.obj['fax']
@property
def location(self):
"""
:return: Location
"""
return Location(self.obj['location'], self._client, self._company)
@property
def image(self):
"""
:return: str
"""
return self.obj['image']
@property
def website(self):
"""
:return: str
"""
return self.obj['website']
@property
def register_level_reconciliation(self):
"""
:return: boolean
"""
return self.obj['register_level_reconciliation']
@property
def price_list(self):
"""
:return: PriceList
"""
return PriceList(self.obj['price_list'], self._client, self._company)
@property
def created_at(self):
"""
:return: datetime
"""
return parse(self.obj['created_at'])
@property
def updated_at(self):
"""
:return: datetime
"""
return parse(self.obj['updated_at'])
@property
def addresses(self):
"""
All addresses attached to this site.
:return: Address[]
"""
return self._get_addresses('sites/%d/addresses.json' % self.id)
def cashups(self, **kwargs):
"""
Fetch cashups for a register. Refer to documentation for Cashups for
more information.
:rtype : Cashup[]
"""
url = '/v1/companies/%d/sites/%d' % (self._company.id, self.id)
return self._get_cashups(url, **kwargs)
@property
def categories(self):
"""
All categories for this site.
:rtype : Category[]
"""
url = '/v1/companies/%d/sites/%d/categories.json' % (self._company.id,
self.id)
return self._get_categories(url)
@property
def checkins(self):
"""
All checkins for this site.
:rtype : Checkin[]
"""
url = '/v1/companies/%d/sites/%d/checkins.json' % (self._company.id, self.id)
checkins = self._client.get_url(url)
return [Checkin(checkin, self._client, self._company) for checkin in
checkins]
class Category(BaseObject):
"""
Each product will belong to one or more categories.
"""
@property
def id(self):
"""
:return: int
"""
return self.obj['id']
@property
def name(self):
"""
:return: int
"""
return self.obj['name']
@property
def description(self):
"""
:return: str
"""
return self.obj['description']
@property
def image(self):
"""
:return: str
"""
return self.obj['image']
class Product(BaseObject):
"""
Products are saleable items in your inventory, including modifier products.
"""
@property
def id(self):
"""
:return: int
"""
return self.obj['id']
@property
def name(self):
"""
:return: int
"""
return self.obj['name']
@property
def description(self):
"""
:return: str
"""
return self.obj['description']
@property
def code(self):
"""
:return: str
"""
return self.obj['code']
@property
def barcode(self):
"""
:return: str
"""
return self.obj['barcode']
@property
def categories(self):
"""
All categories for this product.
:rtype : Category[]
"""
url = '/v1/companies/%d/products/%d/categories.json' % \
(self._company.id, self.id)
return self._get_categories(url)
class Checkin(BaseObject):
"""
Authenticated customers can use checkin service.
"""
@property
def customer_id(self):
"""
:return: int
"""
return self.obj['customer_id']
@property
def start_time(self):
"""
:return: datetime
"""
return parse(self.obj['start_time'])
@property
def duration(self):
"""
:return: int
"""
return self.obj['duration']
class Customer(BaseObject):
"""
Customers are people who buy from the authenticated company.
"""
@property
def id(self):
"""
:return: int
"""
return self.obj['id']
@property
def first_name(self):
"""
:return: str
"""
return self.obj['first_name']
@property
def last_name(self):
"""
:return: str
"""
return self.obj['last_name']
@property
def primary_email_address(self):
"""
:return: str
"""
return self.obj['primary_email_address']
@property
def image(self):
"""
:return: str
"""
return self.obj['image']
@property
def reference_id(self):
"""
:return: str
"""
return self.obj['reference_id']
@property
def addresses(self):
"""
All addresses attached to this customer.
:return: Address[]
"""
return self._get_addresses('customer/%d/addresses.json' % self.id)
class Inventory(BaseObject):
"""
Inventory indicates the quantity for a given product.
"""
@property
def id(self):
"""
:return: int
"""
return self.obj['id']
@property
def stock(self):
"""
:return: int
"""
return self.obj['stock']
class Line(BaseObject):
"""
Lines (also called order lines, sale lines or line items) describe the
products included in an order.
"""
@property
def number(self):
"""
The line number. This will start with `1`.
:return: int
"""
return self.obj['number']
@property
def product_id(self):
"""
:return: int
"""
return self.obj['product_id']
@property
def quantity(self):
"""
:return: int
"""
return self.obj['quantity']
@property
def notes(self):
"""
:return: str
"""
return self.obj['notes']
@property
def unit_price(self):
"""
:return: float
"""
return self.obj['unit_price']
@property
def price_variation(self):
"""
:return: float
"""
return self.obj['price_variation']
@property
def modifiers(self):
"""
:return: int[]
"""
return self.obj['modifiers']
class Order(BaseObject):
"""
Orders are also sometimes called sales or invoices.
"""
@property
def id(self):
"""
:return: int
"""
return self.obj['id']
@property
def status(self):
"""
:return: str
"""
return self.obj['status']
@property
def total(self):
"""
:return: float
"""
return self.obj['total']
@property
def total_tax(self):
"""
:return: float
"""
return self.obj['total_tax']
@property
def paid(self):
"""
:return: float
"""
return self.obj['paid']
@property
def created_at(self):
"""
:return: datetime
"""
return parse(self.obj['created_at'])
@property
def updated_at(self):
"""
:return: datetime
"""
return parse(self.obj['updated_at'])
class PaymentMethod(BaseObject):
"""
Payment methods are assigned to order payments.
"""
@property
def id(self):
"""
:return: int
"""
return self.obj['id']
@property
def name(self):
"""
:return: str
"""
return self.obj['name']
@property
def ledger_code(self):
"""
:return: str
"""
return self.obj['ledger_code']
class Payment(BaseObject):
"""
Payments (also called transactions) are financial transactions related to an
order.
"""
@property
def method_id(self):
"""
:return: int
"""
return self.obj['method_id']
@property
def amount(self):
"""
:return: float
"""
return self.obj['amount']
@property
def ref(self):
"""
:return: str
"""
return self.obj['ref']
class PriceList(BaseObject):
"""
Each site will be assigned a price list that determines ex tax unit prices
of each item on sale.
Price lists work by overriding prices in their parent lists (just like
subclassing in object-oriented programming). The base price list has a
parent_id of null.
"""
@property
def id(self):
"""
:return: int
"""
return self.obj['id']
@property
def name(self):
"""
:return: str
"""
return self.obj['name']
@property
def parent_id(self):
"""
:return: int
"""
return self.obj['parent_id']
class Register(BaseObject):
"""
Registers are iPads or other computers running Kounta.
"""
@property
def id(self):
"""
:return: int
"""
return self.obj['id']
@property
def code(self):
"""
:return: str
"""
return self.obj['code']
@property
def name(self):
"""
:return: str
"""
return self.obj['name']
@property
def site_id(self):
"""
:return: int
"""
return self.obj['site_id']
def cashups(self, **kwargs):
"""
Fetch cashups for a register. Refer to documentation for Cashups for
more information.
:rtype : Cashup[]
"""
url = '/v1/companies/%d/registers/%d' % (self._company.id, self.id)
return self._get_cashups(url, **kwargs)
class ShiftPeriod(BaseObject):
"""
Represents a block of time when dealing with `Shift`s.
"""
@property
def started_at(self):
"""
:return: datetime
"""
return parse(self.obj['started_at'])
@property
def finished_at(self):
"""
:return: datetime
"""
return parse(self.obj['finished_at'])
@property
def period(self):
"""
The timedelta between the start anf finish time.
:return: timedelta
"""
return self.started_at - self.finished_at
class Shift(ShiftPeriod):
"""
Shifts record staff check-ins, check-outs and breaks.
"""
@property
def staff_member(self):
"""
:return: Staff
"""
return Staff(self.obj['staff_member'], self._client, self._company)
@property
def site(self):
"""
:return: Site
"""
return Site(self.obj['site'], self._client, self._company)
@property
def breaks(self):
"""
:return: Shift[]
"""
return [Shift(shift, self._client, self._company) for shift in
self.obj['breaks']]
class Location(BaseObject):
"""
A geographical location with a latitude and longitude.
"""
@property
def latitude(self):
"""
:return: float
"""
return self.obj['latitude']
@property
def longitude(self):
"""
:return: float
"""
return self.obj['longitude']
class Tax(BaseObject):
"""
Each product can be assigned one or more tax, defined as a code, name, and
rate.
"""
@property
def id(self):
"""
:return: int
"""
return self.obj['id']
@property
def code(self):
"""
:return: str
"""
return self.obj['code']
@property
def name(self):
"""
:return: str
"""
return self.obj['name']
@property
def rate(self):
"""
:return: float
"""
return self.obj['rate']
class IncomeAccountAmount(BaseObject):
"""
An amount for a given tax type.
"""
@property
def tax_id(self):
"""
:return: int
"""
return self.obj['tax_id']
@property
def net(self):
"""
:return: float
"""
return self.obj['net']
@property
def tax(self):
"""
:return: float
"""
return self.obj['tax']
class Takings(BaseObject):
"""
Daily takings.
"""
@property
def recorded(self):
"""
:return: float
"""
return self.obj['recorded']
@property
def counted(self):
"""
:return: float
"""
return self.obj['counted']
class Adjustments(BaseObject):
"""
Adjustments to a reconciliation.
"""
@property
def cash_in(self):
"""
:return: float
"""
return self.obj['cash_in']
@property
def cash_out(self):
"""
:return: float
"""
return self.obj['cash_out']
class IncomeAccount(BaseObject):
"""
Income account.
"""
@property
def ledger_code(self):
"""
:return: string
"""
return self.obj['ledger_code']
@property
def amounts(self):
"""
:return: IncomeAccountAmount[]
"""
return [IncomeAccountAmount(amount, self._client, self._company)
for amount in self.obj['amounts']]
class Reconciliation(BaseObject):
"""
End-of-day reconciliation.
"""
@property
def payment_method(self):
"""
:return: PaymentMethod
"""
return PaymentMethod(self.obj['payment_method'], self._client,
self._company)
@property
def takings(self):
"""
:return: Takings
"""
return Takings(self.obj['takings'], self._client, self._company)
@property
def adjustments(self):
"""
:return: Adjustments
"""
return Adjustments(self.obj['adjustments'], self._client, self._company)
class Cashup(BaseObject):
"""
Cash-ups are end-of-day cash reconcilliations.
"""
@property
def id(self):
"""
:return: int
"""
return self.obj['id']
@property
def number(self):
"""
:return: int
"""
return self.obj['number']
@property
def processed(self):
"""
:return: boolean
"""
return self.obj['processed']
@property
def register_level_reconciliation(self):
"""
:return: boolean
"""
return self.obj['register_level_reconciliation']
@property
def register(self):
"""
:return: Register
"""
return Register(self.obj['register'], self._client, self._company)
@property
def site(self):
"""
:return: Site
"""
return Site(self.obj['site'], self._client, self._company)
@property
def staff_member(self):
"""
:return: Staff
"""
return Staff(self.obj['staff_member'], self._client, self._company)
@property
def income_accounts(self):
"""
:return: IncomeAccount[]
"""
return [IncomeAccount(account, self._client, self._company) for account
in self.obj['income_accounts']]
@property
def reconciliations(self):
"""
:return: Reconciliation[]
"""
return [Reconciliation(account, self._client, self._company) for account
in self.obj['reconciliations']]
@property
def created_at(self):
"""
:return: datetime
"""
return parse(self.obj['created_at'])
| {
"content_hash": "63a4f3a37fb7ae81ed1df4dc1528f5e2",
"timestamp": "",
"source": "github",
"line_count": 1361,
"max_line_length": 85,
"avg_line_length": 20.351212343864805,
"alnum_prop": 0.504837894432811,
"repo_name": "elliotchance/kounta-python",
"id": "3376d853a7593554d82729f89f0df0842097e813",
"size": "27698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kounta/objects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64429"
}
],
"symlink_target": ""
} |
import unittest
from . import test_ajax_jsf
from . import test_default_server
from . import test_google
from . import test_i18n
import sys
def suite():
return unittest.TestSuite((\
unittest.makeSuite(test_i18n.TestI18n),
))
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite())
sys.exit(not result.wasSuccessful())
| {
"content_hash": "955963302b4c3c4f8767b968ec967451",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 62,
"avg_line_length": 25.133333333333333,
"alnum_prop": 0.6843501326259946,
"repo_name": "dbo/selenium",
"id": "e45b8a2b1d6e649acc9225f140dd0bd57ef5cfec",
"size": "1185",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "py/test/selenium/selenium_test_suite_headless.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "825"
},
{
"name": "AppleScript",
"bytes": "2614"
},
{
"name": "Batchfile",
"bytes": "307"
},
{
"name": "C",
"bytes": "58321"
},
{
"name": "C#",
"bytes": "2584001"
},
{
"name": "C++",
"bytes": "1685915"
},
{
"name": "CSS",
"bytes": "25162"
},
{
"name": "HTML",
"bytes": "1852415"
},
{
"name": "Java",
"bytes": "5204290"
},
{
"name": "JavaScript",
"bytes": "5088484"
},
{
"name": "Makefile",
"bytes": "4655"
},
{
"name": "Python",
"bytes": "643301"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3086"
},
{
"name": "Ruby",
"bytes": "959530"
},
{
"name": "Shell",
"bytes": "993"
},
{
"name": "XSLT",
"bytes": "1047"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
import paddle
from paddle.fluid.data_feeder import convert_dtype
import paddle.fluid.core as core
from paddle.static import program_guard, Program
class TestEmptyLikeAPICommon(unittest.TestCase):
def __check_out__(self, out):
data_type = convert_dtype(out.dtype)
self.assertEqual(
data_type,
self.dst_dtype,
'dtype should be %s, but get %s' % (self.dst_dtype, data_type),
)
shape = out.shape
self.assertTupleEqual(
shape,
self.dst_shape,
'shape should be %s, but get %s' % (self.dst_shape, shape),
)
if data_type in ['float32', 'float64', 'int32', 'int64']:
max_value = np.nanmax(out)
min_value = np.nanmin(out)
always_non_full_zero = max_value >= min_value
always_full_zero = max_value == 0.0 and min_value == 0.0
self.assertTrue(
always_full_zero or always_non_full_zero,
'always_full_zero or always_non_full_zero.',
)
elif data_type in ['bool']:
total_num = out.size
true_num = np.sum(out)
false_num = np.sum(~out)
self.assertTrue(
total_num == true_num + false_num,
'The value should always be True or False.',
)
else:
self.assertTrue(False, 'invalid data type')
class TestEmptyLikeAPI(TestEmptyLikeAPICommon):
def setUp(self):
self.init_config()
def test_dygraph_api_out(self):
paddle.disable_static()
out = paddle.empty_like(self.x, self.dtype)
self.__check_out__(out.numpy())
paddle.enable_static()
def init_config(self):
self.x = np.random.random((200, 3)).astype("float32")
self.dtype = self.x.dtype
self.dst_shape = self.x.shape
self.dst_dtype = self.dtype
class TestEmptyLikeAPI2(TestEmptyLikeAPI):
def init_config(self):
self.x = np.random.random((200, 3)).astype("float64")
self.dtype = self.x.dtype
self.dst_shape = self.x.shape
self.dst_dtype = self.dtype
class TestEmptyLikeAPI3(TestEmptyLikeAPI):
def init_config(self):
self.x = np.random.random((200, 3)).astype("int")
self.dtype = self.x.dtype
self.dst_shape = self.x.shape
self.dst_dtype = self.dtype
class TestEmptyLikeAPI4(TestEmptyLikeAPI):
def init_config(self):
self.x = np.random.random((200, 3)).astype("int64")
self.dtype = self.x.dtype
self.dst_shape = self.x.shape
self.dst_dtype = self.dtype
class TestEmptyLikeAPI5(TestEmptyLikeAPI):
def init_config(self):
self.x = np.random.random((200, 3)).astype("bool")
self.dtype = self.x.dtype
self.dst_shape = self.x.shape
self.dst_dtype = self.dtype
class TestEmptyLikeAPI6(TestEmptyLikeAPI):
def init_config(self):
self.x = np.random.random((200, 3)).astype("float64")
self.dtype = "float32"
self.dst_shape = self.x.shape
self.dst_dtype = self.dtype
class TestEmptyLikeAPI7(TestEmptyLikeAPI):
def init_config(self):
self.x = np.random.random((200, 3)).astype("int")
self.dtype = "float32"
self.dst_shape = self.x.shape
self.dst_dtype = self.dtype
class TestEmptyLikeAPI8(TestEmptyLikeAPI):
def init_config(self):
self.x = np.random.random((200, 3)).astype("int64")
self.dtype = "float32"
self.dst_shape = self.x.shape
self.dst_dtype = self.dtype
class TestEmptyLikeAPI9(TestEmptyLikeAPI):
def init_config(self):
self.x = np.random.random((200, 3)).astype("bool")
self.dtype = "float32"
self.dst_shape = self.x.shape
self.dst_dtype = self.dtype
class TestEmptyLikeAPI10(TestEmptyLikeAPI):
def init_config(self):
self.x = np.random.random((200, 3)).astype("float32")
self.dtype = "bool"
self.dst_shape = self.x.shape
self.dst_dtype = self.dtype
class TestEmptyLikeAPI_Static(TestEmptyLikeAPICommon):
def setUp(self):
self.init_config()
def test_static_graph(self):
paddle.enable_static()
dtype = 'float32'
train_program = Program()
startup_program = Program()
with program_guard(train_program, startup_program):
x = np.random.random(self.x_shape).astype(dtype)
data_x = paddle.static.data(
'x', shape=self.data_x_shape, dtype=dtype
)
out = paddle.empty_like(data_x)
place = (
paddle.CUDAPlace(0)
if core.is_compiled_with_cuda()
else paddle.CPUPlace()
)
exe = paddle.static.Executor(place)
res = exe.run(train_program, feed={'x': x}, fetch_list=[out])
self.dst_dtype = dtype
self.dst_shape = x.shape
self.__check_out__(res[0])
paddle.disable_static()
def init_config(self):
self.x_shape = (200, 3)
self.data_x_shape = [200, 3]
class TestEmptyLikeAPI_Static2(TestEmptyLikeAPI_Static):
def init_config(self):
self.x_shape = (3, 200, 3)
self.data_x_shape = [-1, 200, 3]
class TestEmptyError(unittest.TestCase):
def test_attr(self):
def test_dtype():
x = np.random.random((200, 3)).astype("float64")
dtype = 'uint8'
result = paddle.empty_like(x, dtype=dtype)
self.assertRaises(TypeError, test_dtype)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "5ba39c3a3e86414e5878f2500ed42115",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 75,
"avg_line_length": 29.445026178010473,
"alnum_prop": 0.5869487908961594,
"repo_name": "luotao1/Paddle",
"id": "82ad72e11e5f2b2bf7ee8a8aa0316823a5703c31",
"size": "6236",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/test_empty_like_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
} |
"""
A collection of small helper functions for generating small pieces of datatable output in custom
methods on a view.
Because of uncontrolled external use of ``preload_record_data()`` by the view, each of these utility
functions allows for generic ``*args`` to pass through to the function, even though they aren't used
in any way.
"""
from functools import partial, wraps
import operator
from django.db.models import Model
from django.forms.utils import flatatt
from .utils import resolve_orm_path, XEDITABLE_FIELD_TYPES
from django.utils.timezone import localtime
def keyed_helper(helper):
"""
Decorator for helper functions that operate on direct values instead of model instances.
A keyed helper is one that can be used normally in the view's own custom callbacks, but also
supports direct access in the column declaration, such as in the example:
datatable_options = {
'columns': [
('Field Name', 'fieldname', make_boolean_checkmark(key=attrgetter('fieldname'))),
],
}
With the help of a ``sort``-style ``key`` argument, the helper can receive all the information
it requires in advance, so that the view doesn't have to go through the trouble of declaring
a custom callback method that simply returns the value of the ``make_boolean_checkmark()``
helper.
If the attribute being fetched is identical to the one pointed to in the column declaration,
even the ``key`` argument can be omitted:
('Field Name', 'fieldname', make_boolean_checkmark)),
"""
@wraps(helper)
def wrapper(instance=None, key=None, attr=None, *args, **kwargs):
if set((instance, key, attr)) == {None}:
# helper was called in place with neither important arg
raise ValueError(
"If called directly, helper function '%s' requires either a model"
" instance, or a 'key' or 'attr' keyword argument." % helper.__name__
)
if instance is not None:
return helper(instance, *args, **kwargs)
if key is None and attr is None:
attr = "self"
if attr:
if attr == "self":
key = lambda obj: obj # noqa: E731
else:
key = operator.attrgetter(attr)
# Helper is used directly in the columns declaration. A new callable is
# returned to take the place of a callback.
@wraps(helper)
def helper_wrapper(instance, *args, **kwargs):
return helper(key(instance), *args, **kwargs)
return helper_wrapper
wrapper._is_wrapped = True
return wrapper
@keyed_helper
def link_to_model(instance, text=None, *args, **kwargs):
"""
Returns HTML in the form::
<a href="{{ instance.get_absolute_url }}">{{ text }}</a>
If ``text`` is provided and isn't empty, it will be used as the hyperlinked text.
If ``text`` isn't available, then ``kwargs['rich_value']`` will be consulted instead.
Failing those checks, the helper will fall back to simply using ``unicode(instance)`` as the
link text.
If the helper is called in place (rather than providing the helper reference directly), it can
receive a special ``key`` argument, which is a mapping function that will receiving the instance
(once it is available) and return some value from that instance. That new value will be sent to
the helper in the place of the instance.
Examples::
# Generate a simple href tag for instance.get_absolute_url()
name = columns.TextColumn("Name", sources=['name'],
processor=link_to_model)
# Generate an href tag for instance.relatedobject.get_absolute_url()
# Note that without the use of link_to_model(key=...), the object going into
# the link_to_model helper would be the row instance, not the thing looked up by the
# column's sources.
name = columns.TextColumn("Name", sources=['relatedobject__name'],
processor=link_to_model(key=getattr('relatedobject')))
"""
if not text:
text = kwargs.get("rich_value") or str(instance)
return """<a href="{0}">{1}</a>""".format(instance.get_absolute_url(), text)
@keyed_helper
def make_boolean_checkmark(value, true_value="✔", false_value="✘", *args, **kwargs):
"""
Returns a unicode ✔ or ✘, configurable by pre-calling the helper with ``true_value`` and/or
``false_value`` arguments, based on the incoming value.
The value at ``kwargs['default_value']`` is checked to see if it casts to a boolean ``True`` or
``False``, and returns the appropriate representation.
Examples::
# A DateTimeField can be sent to the helper to detect whether
# or not it is None, and have a checkmark reflect that.
is_published = columns.DateTimeColumn("Published", sources=['published_date'],
processor=make_boolean_checkmark)
# Make the 'false_value' blank so that only True-like items have an icon
is_published = columns.DateTimeColumn("Published", sources=['published_date'],
processor=make_boolean_checkmark(false_value=""))
"""
value = kwargs.get("default_value", value)
if value:
return true_value
return false_value
def itemgetter(k, ellipsis=False, key=None):
"""
Looks up ``k`` as an index of the column's value.
If ``k`` is a ``slice`` type object, then ``ellipsis`` can be given as a string to use to
indicate truncation. Alternatively, ``ellipsis`` can be set to ``True`` to use a default
``'...'``.
If a ``key`` is given, it may be a function which maps the target value to something else
before the item lookup takes place.
Examples::
# Choose an item from a list source.
winner = columns.TextColumn("Winner", sources=['get_rankings'],
processor=itemgetter(0))
# Take instance.description[:30] and append "..." to the end if truncation occurs.
description = columns.TextColumn("Description", sources=['description'],
processor=itemgetter(slice(None, 30), ellipsis=True))
"""
def helper(instance, *args, **kwargs):
default_value = kwargs.get("default_value")
if default_value is None:
default_value = instance
value = default_value[k]
if (
ellipsis
and isinstance(k, slice)
and isinstance(value, str)
and len(default_value) > len(value)
):
if ellipsis is True:
value += "..."
else:
value += ellipsis
return value
if key:
helper = keyed_helper(helper)(key=key)
return helper
def attrgetter(attr, key=None):
"""
Looks up ``attr`` on the target value. If the result is a callable, it will be called in place
without arguments.
If a ``key`` is given, it may be a function which maps the target value to something else
before the attribute lookup takes place.
Examples::
# Explicitly selecting the sources and then using a processor to allow the model
# method to organize the data itself, you can still provide all the necessary
# ORM hints to the column.
# This is definitely superior to having sources=['get_address'].
address = columns.TextColumn("Address", sources=['street', 'city', 'state', 'zip'],
processor=attrgetter('get_address'))
"""
def helper(instance, *args, **kwargs):
value = instance
for bit in attr.split("."):
value = getattr(value, bit)
if callable(value):
value = value()
return value
if key:
helper = keyed_helper(helper)(key=key)
return helper
def format_date(format_string, localize=False, key=None):
"""
A pre-called helper to supply a date format string ahead of time, so that it can apply to each
date or datetime that this column represents. With Django >= 1.5, the ``localize=True`` keyword
argument can be given, or else can be supplied in the column's own declaration for the same
effect. (The date and datetime columns explicitly forward their ``localize`` setting to all
helpers.)
If the ``key`` argument is given, it may be a function which maps the target value to something
else before the date formatting takes place.
"""
if localize is not False and localtime is None:
raise Exception("Cannot use format_date argument 'localize' with Django < 1.5")
def helper(value, *args, **kwargs):
inner_localize = kwargs.get("localize", localize)
if inner_localize is not False and localtime is None:
raise Exception("Cannot use format_date argument 'localize' with Django < 1.5")
if key:
value = key(value)
else:
value = kwargs.get("default_value", value)
if not value: # Empty or missing default_value
return ""
if localize:
value = localtime(value)
return value.strftime(format_string)
if key:
return keyed_helper(helper)(key=key)
return helper
def format(format_string, cast=lambda x: x):
"""
A pre-called helper to supply a modern string format (the kind with {} instead of %s), so that
it can apply to each value in the column as it is rendered. This can be useful for string
padding like leading zeroes, or rounding floating point numbers to a certain number of decimal
places, etc.
If given, the ``cast`` argument should be a mapping function that coerces the input to whatever
type is required for the string formatting to work. Trying to push string data into a float
format will raise an exception, for example, so the ``float`` type itself could be given as
the ``cast`` function.
Examples::
# Perform some 0 padding
item_number = columns.FloatColumn("Item No.", sources=['number'],
processor=format("{:03d}))
# Force a string column value to coerce to float and round to 2 decimal places
rating = columns.TextColumn("Rating", sources=['avg_rating'],
processor=format("{:.2f}", cast=float))
"""
def helper(instance, *args, **kwargs):
value = kwargs.get("default_value")
if value is None:
value = instance
value = cast(value)
return format_string.format(value, obj=instance)
return helper
def make_xeditable(instance=None, extra_attrs=[], *args, **kwargs): # noqa: C901
"""
Converts the contents of the column into an ``<a>`` tag with the required DOM attributes to
power the X-Editable UI.
The following keyword arguments are all optional, but may be provided when pre-calling the
helper, to customize the output of the helper once it is run per object record:
* ``type`` - Defaults to the basic type of the HTML input ("text", "number", "datetime")
* ``title`` - Defaults to an empty string, controls the HTML "title" attribute.
* ``placeholder`` - Defaults to whatever "title" is, controls the HTML
"placeholder" attribute.
* ``url`` - Defaults to the ``request.path`` of the view, which will automatically
serve the X-Editable interface as long as it inherits from ``XEditableDatatableView``.
* ``source`` - Defaults to the ``request.path`` of the view, which will automatically
serve X-Editable requests for ``choices`` data about a field.
Supplying a list of names via ``extra_attrs`` will enable arbitrary other keyword arguments to
be rendered in the HTML as attribute as well. ``extra_attrs`` serves as a whitelist of extra
names so that unintended kwargs don't get rendered without your permission.
"""
if instance is None:
# Preloading kwargs into the helper for deferred execution
helper = partial(make_xeditable, extra_attrs=extra_attrs, *args, **kwargs)
return helper
# Immediate finalization, return the xeditable structure
data = kwargs.get("default_value", instance)
rich_data = kwargs.get("rich_value", data)
# Compile values to appear as "data-*" attributes on the anchor tag
default_attr_names = ["pk", "type", "url", "source", "title", "placeholder"]
valid_attr_names = set(default_attr_names + list(extra_attrs))
attrs = {}
for k, v in kwargs.items():
if k in valid_attr_names:
if k.startswith("data_"):
k = k[5:]
attrs["data-{0}".format(k)] = v
attrs["data-xeditable"] = "xeditable"
# Assign default values where they are not provided
field_name = kwargs["field_name"] # sent as a default kwarg to helpers
if isinstance(field_name, (tuple, list)):
# Legacy syntax
field_name = field_name[1]
if isinstance(field_name, (tuple, list)):
raise ValueError(
"'make_xeditable' helper needs a single-field data column,"
" not {0!r}".format(field_name)
)
attrs["data-name"] = field_name
if isinstance(rich_data, Model):
attrs["data-value"] = rich_data.pk
else:
attrs["data-value"] = rich_data
if "data-pk" not in attrs:
attrs["data-pk"] = instance.pk
if "data-url" not in attrs:
# Look for a backup data-url
provider_name = "get_update_url"
url_provider = getattr(kwargs.get("view"), provider_name, None)
if not url_provider:
url_provider = getattr(instance, provider_name, None)
if not url_provider and "view" in kwargs:
url_provider = lambda field_name: kwargs["view"].request.path # noqa: E731
else:
raise ValueError("'make_xeditable' cannot determine a value for 'url'.")
if url_provider:
attrs["data-url"] = url_provider(field_name=field_name)
if "data-placeholder" not in attrs:
attrs["data-placeholder"] = attrs.get("data-title", "")
if "data-type" not in attrs:
if hasattr(instance, "_meta"):
# Try to fetch a reasonable type from the field's class
if field_name == "pk": # special field name not in Model._meta.fields
field = instance._meta.pk
else:
field = resolve_orm_path(instance, field_name)
if field.choices:
field_type = "select"
else:
field_type = XEDITABLE_FIELD_TYPES.get(field.get_internal_type(), "text")
else:
field_type = "text"
attrs["data-type"] = field_type
# type=select elements need to fetch their valid choice options from an AJAX endpoint.
# Register the view for this lookup.
if attrs["data-type"] in ("select", "select2"):
if "data-source" not in attrs:
if "view" in kwargs:
attrs["data-source"] = "{url}?{field_param}={fieldname}".format(
**{
"url": kwargs["view"].request.path,
"field_param": kwargs["view"].xeditable_fieldname_param,
"fieldname": field_name,
}
)
if attrs["data-type"] == "select2":
attrs["data-source"] += "&select2=true"
else:
raise ValueError("'make_xeditable' cannot determine a value for 'source'.")
# Choice fields will want to display their readable label instead of db data
data = getattr(instance, "get_{0}_display".format(field_name), lambda: data)()
data = """<a href="#"{attrs}>{data}</a>""".format(attrs=flatatt(attrs), data=data)
return data
def make_processor(func, arg=None):
"""
A pre-called processor that wraps the execution of the target callable ``func``.
This is useful for when ``func`` is a third party mapping function that can take your column's
value and return an expected result, but doesn't understand all of the extra kwargs that get
sent to processor callbacks. Because this helper proxies access to ``func``, it can hold back
the extra kwargs for a successful call.
``func`` will be called once per object record, a single positional argument being the column
data retrieved via the column's :py:attr:`~datatableview.columns.Column.sources`
An optional ``arg`` may be given, which will be forwarded as a second positional argument to
``func``. This was originally intended to simplify using Django template filter functions as
``func``. If you need to sent more arguments, consider wrapping your ``func`` in a
``functools.partial``, and use that as ``func`` instead.
"""
def helper(instance, *args, **kwargs):
value = kwargs.get("default_value")
if value is None:
value = instance
if arg is not None:
extra_arg = [arg]
else:
extra_arg = []
return func(value, *extra_arg)
return helper
through_filter = make_processor
| {
"content_hash": "507fee6c073d0d4f0872a93ef57907b2",
"timestamp": "",
"source": "github",
"line_count": 441,
"max_line_length": 100,
"avg_line_length": 39.54875283446712,
"alnum_prop": 0.6204346081073333,
"repo_name": "pivotal-energy-solutions/django-datatable-view",
"id": "23d4922649fc7d084e1005781651bc40f83712f2",
"size": "17469",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datatableview/helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "17075"
},
{
"name": "HTML",
"bytes": "57307"
},
{
"name": "JavaScript",
"bytes": "7790"
},
{
"name": "Python",
"bytes": "247104"
}
],
"symlink_target": ""
} |
from sqlalchemy import column
from superset.db_engine_specs.pinot import PinotEngineSpec
from tests.db_engine_specs.base_tests import TestDbEngineSpec
class TestPinotDbEngineSpec(TestDbEngineSpec):
""" Tests pertaining to our Pinot database support """
def test_pinot_time_expression_sec_one_1m_grain(self):
col = column("tstamp")
expr = PinotEngineSpec.get_timestamp_expr(col, "epoch_s", "P1M")
result = str(expr.compile())
self.assertEqual(
result,
"DATETIMECONVERT(tstamp, '1:SECONDS:EPOCH', '1:SECONDS:EPOCH', '1:MONTHS')",
) # noqa
| {
"content_hash": "d6f0b09414376e86f9bef93fc6e68932",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 88,
"avg_line_length": 36.11764705882353,
"alnum_prop": 0.6775244299674267,
"repo_name": "airbnb/superset",
"id": "973282737b2def511bde20d332e6ff9a99bdd626",
"size": "1399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/db_engine_specs/pinot_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "62654"
},
{
"name": "HTML",
"bytes": "99610"
},
{
"name": "JavaScript",
"bytes": "585557"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "715013"
},
{
"name": "Shell",
"bytes": "1033"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.