hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
โ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
โ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
โ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
โ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
โ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
โ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
โ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
โ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
โ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1a8585d262994990f5f747b5ea6d114f365aef34
| 116
|
py
|
Python
|
ahmed-package/functions.py
|
gokcelahmed/ahmed-package
|
a6c34fb2d85105ad33063b840c84f70ff7a0aa4d
|
[
"MIT"
] | null | null | null |
ahmed-package/functions.py
|
gokcelahmed/ahmed-package
|
a6c34fb2d85105ad33063b840c84f70ff7a0aa4d
|
[
"MIT"
] | null | null | null |
ahmed-package/functions.py
|
gokcelahmed/ahmed-package
|
a6c34fb2d85105ad33063b840c84f70ff7a0aa4d
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
def printName():
name = input("Please enter your name: ")
print(name)
| 29
| 48
| 0.577586
|
18368c68efada810dc75011eacd391e1d6bdcf1d
| 7,935
|
py
|
Python
|
snakeai/gameplay/entities.py
|
robertomatheuspp/snake-ai-reinforcement
|
a94197472335f1ac348aa9f6a0f224569d667755
|
[
"MIT"
] | null | null | null |
snakeai/gameplay/entities.py
|
robertomatheuspp/snake-ai-reinforcement
|
a94197472335f1ac348aa9f6a0f224569d667755
|
[
"MIT"
] | null | null | null |
snakeai/gameplay/entities.py
|
robertomatheuspp/snake-ai-reinforcement
|
a94197472335f1ac348aa9f6a0f224569d667755
|
[
"MIT"
] | 1
|
2018-10-21T01:16:52.000Z
|
2018-10-21T01:16:52.000Z
|
import itertools
import random
import numpy as np
from collections import deque, namedtuple
class Point(namedtuple('PointTuple', ['x', 'y'])):
""" Represents a 2D point with named axes. """
def __add__(self, other):
""" Add two points coordinate-wise. """
return Point(self.x + other.x, self.y + other.y)
def __sub__(self, other):
""" Subtract two points coordinate-wise. """
return Point(self.x - other.x, self.y - other.y)
def clone(self):
return self + Point(0, 0)
class CellType(object):
""" Defines all types of cells that can be found in the game. """
EMPTY = 0
FRUIT = 1
SNAKE_HEAD = 2
SNAKE_BODY = 3
WALL = 4
class SnakeDirection(object):
""" Defines all possible directions the snake can take, as well as the corresponding offsets. """
NORTH = Point(0, -1)
EAST = Point(1, 0)
SOUTH = Point(0, 1)
WEST = Point(-1, 0)
ALL_SNAKE_DIRECTIONS = [
SnakeDirection.NORTH,
SnakeDirection.EAST,
SnakeDirection.SOUTH,
SnakeDirection.WEST,
]
class SnakeAction(object):
""" Defines all possible actions the agent can take in the environment. """
MAINTAIN_DIRECTION = 0
TURN_LEFT = 1
TURN_RIGHT = 2
ALL_SNAKE_ACTIONS = [
SnakeAction.MAINTAIN_DIRECTION,
SnakeAction.TURN_LEFT,
SnakeAction.TURN_RIGHT,
]
class Snake(object):
""" Represents the snake that has a position, can move, and change directions. """
def __init__(self, start_coord, length=3):
"""
Create a new snake.
Args:
start_coord: A point representing the initial position of the snake.
length: An integer specifying the initial length of the snake.
"""
# Place the snake vertically, heading north.
if start_coord:
self.body = deque([
Point(start_coord.x, start_coord.y + i)
for i in range(length)
])
self.direction = SnakeDirection.NORTH
self.directions = ALL_SNAKE_DIRECTIONS
@property
def head(self):
""" Get the position of the snake's head. """
return self.body[0]
@property
def tail(self):
""" Get the position of the snake's tail. """
return self.body[-1]
@property
def length(self):
""" Get the current length of the snake. """
return len(self.body)
def peek_next_move(self):
""" Get the point the snake will move to at its next step. """
return self.head + self.direction
def turn_left(self):
""" At the next step, take a left turn relative to the current direction. """
direction_idx = self.directions.index(self.direction)
self.direction = self.directions[direction_idx - 1]
def turn_right(self):
""" At the next step, take a right turn relative to the current direction. """
direction_idx = self.directions.index(self.direction)
self.direction = self.directions[(direction_idx + 1) % len(self.directions)]
def grow(self):
""" Grow the snake by 1 block from the head. """
self.body.appendleft(self.peek_next_move())
def move(self):
""" Move the snake 1 step forward, taking the current direction into account. """
self.body.appendleft(self.peek_next_move())
self.body.pop()
def clone(self):
cp = Snake( None, 10)
cp.body = deque(list(self.body))
cp.direction = self.direction
cp.directions = self.directions
return cp
class Field(object):
""" Represents the playing field for the Snake game. """
def __init__(self, level_map=None):
"""
Create a new Snake field.
Args:
level_map: a list of strings representing the field objects (1 string per row).
"""
self.level_map = level_map
self._cells = None
self._empty_cells = set()
self._level_map_to_cell_type = {
'S': CellType.SNAKE_HEAD,
's': CellType.SNAKE_BODY,
'#': CellType.WALL,
'O': CellType.FRUIT,
'.': CellType.EMPTY,
}
self._cell_type_to_level_map = {
cell_type: symbol
for symbol, cell_type in self._level_map_to_cell_type.items()
}
def __getitem__(self, point):
""" Get the type of cell at the given point. """
x, y = point
return self._cells[y, x]
def __setitem__(self, point, cell_type):
""" Update the type of cell at the given point. """
x, y = point
self._cells[y, x] = cell_type
# Do some internal bookkeeping to not rely on random selection of blank cells.
if cell_type == CellType.EMPTY:
self._empty_cells.add(point)
else:
if point in self._empty_cells:
self._empty_cells.remove(point)
def __str__(self):
return '\n'.join(
''.join(self._cell_type_to_level_map[cell] for cell in row)
for row in self._cells
)
@property
def size(self):
""" Get the size of the field (size == width == height). """
return len(self.level_map)
def create_level(self):
""" Create a new field based on the level map. """
try:
self._cells = np.array([
[self._level_map_to_cell_type[symbol] for symbol in line]
for line in self.level_map
])
self._empty_cells = {
Point(x, y)
for y in range(self.size)
for x in range(self.size)
if self[(x, y)] == CellType.EMPTY
}
except KeyError as err:
raise ValueError(f'Unknown level map symbol: "{err.args[0]}"')
def find_snake_head(self):
""" Find the snake's head on the field. """
for y in range(self.size):
for x in range(self.size):
if self[(x, y)] == CellType.SNAKE_HEAD:
return Point(x, y)
raise ValueError('Initial snake position not specified on the level map')
def get_random_empty_cell(self):
""" Get the coordinates of a random empty cell. """
return random.choice(list(self._empty_cells))
def place_snake(self, snake):
""" Put the snake on the field and fill the cells with its body. """
self[snake.head] = CellType.SNAKE_HEAD
for snake_cell in itertools.islice(snake.body, 1, len(snake.body)):
self[snake_cell] = CellType.SNAKE_BODY
def update_snake_footprint(self, old_head, old_tail, new_head):
"""
Update field cells according to the new snake position.
Environment must be as fast as possible to speed up agent training.
Therefore, we'll sacrifice some duplication of information between
the snake body and the field just to execute timesteps faster.
Args:
old_head: position of the head before the move.
old_tail: position of the tail before the move.
new_head: position of the head after the move.
"""
self[old_head] = CellType.SNAKE_BODY
# If we've grown at this step, the tail cell shouldn't move.
if old_tail:
self[old_tail] = CellType.EMPTY
# Support the case when we're chasing own tail.
if self[new_head] not in (CellType.WALL, CellType.SNAKE_BODY) or new_head == old_tail:
self[new_head] = CellType.SNAKE_HEAD
def clone(self):
cp = Field(level_map=self.level_map)
cp.level_map = self.level_map
# if self._cells:
cp._cells = self._cells #+ Point(0, 0)
cp._empty_cells = set(self._empty_cells)
cp._level_map_to_cell_type = self._level_map_to_cell_type
cp._cell_type_to_level_map = self._cell_type_to_level_map
return cp
| 31.74
| 101
| 0.600378
|
9ca9a64f8e2d05a8ffc4301ad082af79edc0dd8b
| 51,171
|
py
|
Python
|
samples/openapi3/client/petstore/python-experimental/petstore_api/api/pet_api.py
|
nitoqq/openapi-generator
|
d5ea62f9669b2f669065b02b701eedc816d9d3cd
|
[
"Apache-2.0"
] | 1
|
2020-08-07T08:38:39.000Z
|
2020-08-07T08:38:39.000Z
|
samples/openapi3/client/petstore/python-experimental/petstore_api/api/pet_api.py
|
nitoqq/openapi-generator
|
d5ea62f9669b2f669065b02b701eedc816d9d3cd
|
[
"Apache-2.0"
] | null | null | null |
samples/openapi3/client/petstore/python-experimental/petstore_api/api/pet_api.py
|
nitoqq/openapi-generator
|
d5ea62f9669b2f669065b02b701eedc816d9d3cd
|
[
"Apache-2.0"
] | 1
|
2019-10-06T12:57:47.000Z
|
2019-10-06T12:57:47.000Z
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
# python 2 and python 3 compatibility library
import six
from petstore_api.api_client import ApiClient
from petstore_api.exceptions import (
ApiTypeError,
ApiValueError
)
from petstore_api.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
int,
none_type,
str,
validate_and_convert_types
)
from petstore_api.model import pet
from petstore_api.model import api_response
class PetApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __add_pet(
self,
pet_pet,
**kwargs
):
"""Add a new pet to the store # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.add_pet(pet_pet, async_req=True)
>>> result = thread.get()
Args:
pet_pet (pet.Pet): Pet object that needs to be added to the store
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index', 0)
kwargs['pet_pet'] = \
pet_pet
return self.call_with_http_info(**kwargs)
self.add_pet = Endpoint(
settings={
'response_type': None,
'auth': [
'http_signature_test',
'petstore_auth'
],
'endpoint_path': '/pet',
'operation_id': 'add_pet',
'http_method': 'POST',
'servers': [
'http://petstore.swagger.io/v2',
'http://path-server-test.petstore.local/v2'
]
},
params_map={
'all': [
'pet_pet',
],
'required': [
'pet_pet',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'pet_pet':
(pet.Pet,),
},
'attribute_map': {
},
'location_map': {
'pet_pet': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [
'application/json',
'application/xml'
]
},
api_client=api_client,
callable=__add_pet
)
def __delete_pet(
self,
pet_id,
**kwargs
):
"""Deletes a pet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_pet(pet_id, async_req=True)
>>> result = thread.get()
Args:
pet_id (int): Pet id to delete
Keyword Args:
api_key (str): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index', 0)
kwargs['pet_id'] = \
pet_id
return self.call_with_http_info(**kwargs)
self.delete_pet = Endpoint(
settings={
'response_type': None,
'auth': [
'petstore_auth'
],
'endpoint_path': '/pet/{petId}',
'operation_id': 'delete_pet',
'http_method': 'DELETE',
'servers': [],
},
params_map={
'all': [
'pet_id',
'api_key',
],
'required': [
'pet_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'pet_id':
(int,),
'api_key':
(str,),
},
'attribute_map': {
'pet_id': 'petId',
'api_key': 'api_key',
},
'location_map': {
'pet_id': 'path',
'api_key': 'header',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [],
},
api_client=api_client,
callable=__delete_pet
)
def __find_pets_by_status(
self,
status,
**kwargs
):
"""Finds Pets by status # noqa: E501
Multiple status values can be provided with comma separated strings # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_pets_by_status(status, async_req=True)
>>> result = thread.get()
Args:
status ([str]): Status values that need to be considered for filter
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
[pet.Pet]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index', 0)
kwargs['status'] = \
status
return self.call_with_http_info(**kwargs)
self.find_pets_by_status = Endpoint(
settings={
'response_type': ([pet.Pet],),
'auth': [
'http_signature_test',
'petstore_auth'
],
'endpoint_path': '/pet/findByStatus',
'operation_id': 'find_pets_by_status',
'http_method': 'GET',
'servers': [],
},
params_map={
'all': [
'status',
],
'required': [
'status',
],
'nullable': [
],
'enum': [
'status',
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
('status',): {
"AVAILABLE": "available",
"PENDING": "pending",
"SOLD": "sold"
},
},
'openapi_types': {
'status':
([str],),
},
'attribute_map': {
'status': 'status',
},
'location_map': {
'status': 'query',
},
'collection_format_map': {
'status': 'csv',
}
},
headers_map={
'accept': [
'application/xml',
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__find_pets_by_status
)
def __find_pets_by_tags(
self,
tags,
**kwargs
):
"""Finds Pets by tags # noqa: E501
Multiple tags can be provided with comma separated strings. Use tag1, tag2, tag3 for testing. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_pets_by_tags(tags, async_req=True)
>>> result = thread.get()
Args:
tags ([str]): Tags to filter by
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
[pet.Pet]
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index', 0)
kwargs['tags'] = \
tags
return self.call_with_http_info(**kwargs)
self.find_pets_by_tags = Endpoint(
settings={
'response_type': ([pet.Pet],),
'auth': [
'http_signature_test',
'petstore_auth'
],
'endpoint_path': '/pet/findByTags',
'operation_id': 'find_pets_by_tags',
'http_method': 'GET',
'servers': [],
},
params_map={
'all': [
'tags',
],
'required': [
'tags',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'tags':
([str],),
},
'attribute_map': {
'tags': 'tags',
},
'location_map': {
'tags': 'query',
},
'collection_format_map': {
'tags': 'csv',
}
},
headers_map={
'accept': [
'application/xml',
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__find_pets_by_tags
)
def __get_pet_by_id(
self,
pet_id,
**kwargs
):
"""Find pet by ID # noqa: E501
Returns a single pet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_pet_by_id(pet_id, async_req=True)
>>> result = thread.get()
Args:
pet_id (int): ID of pet to return
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
pet.Pet
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index', 0)
kwargs['pet_id'] = \
pet_id
return self.call_with_http_info(**kwargs)
self.get_pet_by_id = Endpoint(
settings={
'response_type': (pet.Pet,),
'auth': [
'api_key'
],
'endpoint_path': '/pet/{petId}',
'operation_id': 'get_pet_by_id',
'http_method': 'GET',
'servers': [],
},
params_map={
'all': [
'pet_id',
],
'required': [
'pet_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'pet_id':
(int,),
},
'attribute_map': {
'pet_id': 'petId',
},
'location_map': {
'pet_id': 'path',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/xml',
'application/json'
],
'content_type': [],
},
api_client=api_client,
callable=__get_pet_by_id
)
def __update_pet(
self,
pet_pet,
**kwargs
):
"""Update an existing pet # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_pet(pet_pet, async_req=True)
>>> result = thread.get()
Args:
pet_pet (pet.Pet): Pet object that needs to be added to the store
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index', 0)
kwargs['pet_pet'] = \
pet_pet
return self.call_with_http_info(**kwargs)
self.update_pet = Endpoint(
settings={
'response_type': None,
'auth': [
'http_signature_test',
'petstore_auth'
],
'endpoint_path': '/pet',
'operation_id': 'update_pet',
'http_method': 'PUT',
'servers': [
'http://petstore.swagger.io/v2',
'http://path-server-test.petstore.local/v2'
]
},
params_map={
'all': [
'pet_pet',
],
'required': [
'pet_pet',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'pet_pet':
(pet.Pet,),
},
'attribute_map': {
},
'location_map': {
'pet_pet': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [
'application/json',
'application/xml'
]
},
api_client=api_client,
callable=__update_pet
)
def __update_pet_with_form(
self,
pet_id,
**kwargs
):
"""Updates a pet in the store with form data # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_pet_with_form(pet_id, async_req=True)
>>> result = thread.get()
Args:
pet_id (int): ID of pet that needs to be updated
Keyword Args:
name (str): Updated name of the pet. [optional]
status (str): Updated status of the pet. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index', 0)
kwargs['pet_id'] = \
pet_id
return self.call_with_http_info(**kwargs)
self.update_pet_with_form = Endpoint(
settings={
'response_type': None,
'auth': [
'petstore_auth'
],
'endpoint_path': '/pet/{petId}',
'operation_id': 'update_pet_with_form',
'http_method': 'POST',
'servers': [],
},
params_map={
'all': [
'pet_id',
'name',
'status',
],
'required': [
'pet_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'pet_id':
(int,),
'name':
(str,),
'status':
(str,),
},
'attribute_map': {
'pet_id': 'petId',
'name': 'name',
'status': 'status',
},
'location_map': {
'pet_id': 'path',
'name': 'form',
'status': 'form',
},
'collection_format_map': {
}
},
headers_map={
'accept': [],
'content_type': [
'application/x-www-form-urlencoded'
]
},
api_client=api_client,
callable=__update_pet_with_form
)
def __upload_file(
self,
pet_id,
**kwargs
):
"""uploads an image # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_file(pet_id, async_req=True)
>>> result = thread.get()
Args:
pet_id (int): ID of pet to update
Keyword Args:
additional_metadata (str): Additional data to pass to server. [optional]
file (file_type): file to upload. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
api_response.ApiResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index', 0)
kwargs['pet_id'] = \
pet_id
return self.call_with_http_info(**kwargs)
self.upload_file = Endpoint(
settings={
'response_type': (api_response.ApiResponse,),
'auth': [
'petstore_auth'
],
'endpoint_path': '/pet/{petId}/uploadImage',
'operation_id': 'upload_file',
'http_method': 'POST',
'servers': [],
},
params_map={
'all': [
'pet_id',
'additional_metadata',
'file',
],
'required': [
'pet_id',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'pet_id':
(int,),
'additional_metadata':
(str,),
'file':
(file_type,),
},
'attribute_map': {
'pet_id': 'petId',
'additional_metadata': 'additionalMetadata',
'file': 'file',
},
'location_map': {
'pet_id': 'path',
'additional_metadata': 'form',
'file': 'form',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'multipart/form-data'
]
},
api_client=api_client,
callable=__upload_file
)
def __upload_file_with_required_file(
self,
pet_id,
required_file,
**kwargs
):
"""uploads an image (required) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.upload_file_with_required_file(pet_id, required_file, async_req=True)
>>> result = thread.get()
Args:
pet_id (int): ID of pet to update
required_file (file_type): file to upload
Keyword Args:
additional_metadata (str): Additional data to pass to server. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
api_response.ApiResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index', 0)
kwargs['pet_id'] = \
pet_id
kwargs['required_file'] = \
required_file
return self.call_with_http_info(**kwargs)
self.upload_file_with_required_file = Endpoint(
settings={
'response_type': (api_response.ApiResponse,),
'auth': [
'petstore_auth'
],
'endpoint_path': '/fake/{petId}/uploadImageWithRequiredFile',
'operation_id': 'upload_file_with_required_file',
'http_method': 'POST',
'servers': [],
},
params_map={
'all': [
'pet_id',
'required_file',
'additional_metadata',
],
'required': [
'pet_id',
'required_file',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'pet_id':
(int,),
'required_file':
(file_type,),
'additional_metadata':
(str,),
},
'attribute_map': {
'pet_id': 'petId',
'required_file': 'requiredFile',
'additional_metadata': 'additionalMetadata',
},
'location_map': {
'pet_id': 'path',
'required_file': 'form',
'additional_metadata': 'form',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'multipart/form-data'
]
},
api_client=api_client,
callable=__upload_file_with_required_file
)
class Endpoint(object):
def __init__(self, settings=None, params_map=None, root_map=None,
headers_map=None, api_client=None, callable=None):
"""Creates an endpoint
Args:
settings (dict): see below key value pairs
'response_type' (tuple/None): response type
'auth' (list): a list of auth type keys
'endpoint_path' (str): the endpoint path
'operation_id' (str): endpoint string identifier
'http_method' (str): POST/PUT/PATCH/GET etc
'servers' (list): list of str servers that this endpoint is at
params_map (dict): see below key value pairs
'all' (list): list of str endpoint parameter names
'required' (list): list of required parameter names
'nullable' (list): list of nullable parameter names
'enum' (list): list of parameters with enum values
'validation' (list): list of parameters with validations
root_map
'validations' (dict): the dict mapping endpoint parameter tuple
paths to their validation dictionaries
'allowed_values' (dict): the dict mapping endpoint parameter
tuple paths to their allowed_values (enum) dictionaries
'openapi_types' (dict): param_name to openapi type
'attribute_map' (dict): param_name to camelCase name
'location_map' (dict): param_name to 'body', 'file', 'form',
'header', 'path', 'query'
collection_format_map (dict): param_name to `csv` etc.
headers_map (dict): see below key value pairs
'accept' (list): list of Accept header strings
'content_type' (list): list of Content-Type header strings
api_client (ApiClient) api client instance
callable (function): the function which is invoked when the
Endpoint is called
"""
self.settings = settings
self.params_map = params_map
self.params_map['all'].extend([
'async_req',
'_host_index',
'_preload_content',
'_request_timeout',
'_return_http_data_only',
'_check_input_type',
'_check_return_type'
])
self.params_map['nullable'].extend(['_request_timeout'])
self.validations = root_map['validations']
self.allowed_values = root_map['allowed_values']
self.openapi_types = root_map['openapi_types']
extra_types = {
'async_req': (bool,),
'_host_index': (int,),
'_preload_content': (bool,),
'_request_timeout': (none_type, int, (int,), [int]),
'_return_http_data_only': (bool,),
'_check_input_type': (bool,),
'_check_return_type': (bool,)
}
self.openapi_types.update(extra_types)
self.attribute_map = root_map['attribute_map']
self.location_map = root_map['location_map']
self.collection_format_map = root_map['collection_format_map']
self.headers_map = headers_map
self.api_client = api_client
self.callable = callable
def __validate_inputs(self, kwargs):
for param in self.params_map['enum']:
if param in kwargs:
check_allowed_values(
self.allowed_values,
(param,),
kwargs[param]
)
for param in self.params_map['validation']:
if param in kwargs:
check_validations(
self.validations,
(param,),
kwargs[param],
configuration=self.api_client.configuration
)
if kwargs['_check_input_type'] is False:
return
for key, value in six.iteritems(kwargs):
fixed_val = validate_and_convert_types(
value,
self.openapi_types[key],
[key],
False,
kwargs['_check_input_type'],
configuration=self.api_client.configuration
)
kwargs[key] = fixed_val
def __gather_params(self, kwargs):
params = {
'body': None,
'collection_format': {},
'file': {},
'form': [],
'header': {},
'path': {},
'query': []
}
for param_name, param_value in six.iteritems(kwargs):
param_location = self.location_map.get(param_name)
if param_location is None:
continue
if param_location:
if param_location == 'body':
params['body'] = param_value
continue
base_name = self.attribute_map[param_name]
if (param_location == 'form' and
self.openapi_types[param_name] == (file_type,)):
params['file'][param_name] = [param_value]
elif (param_location == 'form' and
self.openapi_types[param_name] == ([file_type],)):
# param_value is already a list
params['file'][param_name] = param_value
elif param_location in {'form', 'query'}:
param_value_full = (base_name, param_value)
params[param_location].append(param_value_full)
if param_location not in {'form', 'query'}:
params[param_location][base_name] = param_value
collection_format = self.collection_format_map.get(param_name)
if collection_format:
params['collection_format'][base_name] = collection_format
return params
def __call__(self, *args, **kwargs):
""" This method is invoked when endpoints are called
Example:
pet_api = PetApi()
pet_api.add_pet # this is an instance of the class Endpoint
pet_api.add_pet() # this invokes pet_api.add_pet.__call__()
which then invokes the callable functions stored in that endpoint at
pet_api.add_pet.callable or self.callable in this class
"""
return self.callable(self, *args, **kwargs)
def call_with_http_info(self, **kwargs):
try:
_host = self.settings['servers'][kwargs['_host_index']]
except IndexError:
if self.settings['servers']:
raise ApiValueError(
"Invalid host index. Must be 0 <= index < %s" %
len(self.settings['servers'])
)
_host = None
for key, value in six.iteritems(kwargs):
if key not in self.params_map['all']:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `%s`" %
(key, self.settings['operation_id'])
)
# only throw this nullable ApiValueError if _check_input_type
# is False, if _check_input_type==True we catch this case
# in self.__validate_inputs
if (key not in self.params_map['nullable'] and value is None
and kwargs['_check_input_type'] is False):
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `%s`" %
(key, self.settings['operation_id'])
)
for key in self.params_map['required']:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`%s`" % (key, self.settings['operation_id'])
)
self.__validate_inputs(kwargs)
params = self.__gather_params(kwargs)
accept_headers_list = self.headers_map['accept']
if accept_headers_list:
params['header']['Accept'] = self.api_client.select_header_accept(
accept_headers_list)
content_type_headers_list = self.headers_map['content_type']
if content_type_headers_list:
header_list = self.api_client.select_header_content_type(
content_type_headers_list)
params['header']['Content-Type'] = header_list
return self.api_client.call_api(
self.settings['endpoint_path'], self.settings['http_method'],
params['path'],
params['query'],
params['header'],
body=params['body'],
post_params=params['form'],
files=params['file'],
response_type=self.settings['response_type'],
auth_settings=self.settings['auth'],
async_req=kwargs['async_req'],
_check_type=kwargs['_check_return_type'],
_return_http_data_only=kwargs['_return_http_data_only'],
_preload_content=kwargs['_preload_content'],
_request_timeout=kwargs['_request_timeout'],
_host=_host,
collection_formats=params['collection_format'])
| 36.36887
| 174
| 0.461824
|
ab6b647a401389f015c1e0901a213f9b87bc8081
| 23,111
|
py
|
Python
|
hyperopt/tests/test_spark.py
|
michaelmior/hyperopt
|
63b5b9bf379fc55f6a158e17c400c1d8bb780fff
|
[
"BSD-3-Clause"
] | 1
|
2021-01-25T13:43:35.000Z
|
2021-01-25T13:43:35.000Z
|
hyperopt/tests/test_spark.py
|
bugrabuga/hyperopt
|
63b5b9bf379fc55f6a158e17c400c1d8bb780fff
|
[
"BSD-3-Clause"
] | null | null | null |
hyperopt/tests/test_spark.py
|
bugrabuga/hyperopt
|
63b5b9bf379fc55f6a158e17c400c1d8bb780fff
|
[
"BSD-3-Clause"
] | null | null | null |
import contextlib
import logging
import os
import shutil
import tempfile
import time
import timeit
import unittest
import numpy as np
from pyspark.sql import SparkSession
from six import StringIO
from hyperopt import SparkTrials, anneal, base, fmin, hp
from .test_fmin import test_quadratic1_tpe
@contextlib.contextmanager
def patch_logger(name, level=logging.INFO):
"""patch logger and give an output"""
io_out = StringIO()
log = logging.getLogger(name)
log.setLevel(level)
log.handlers = []
handler = logging.StreamHandler(io_out)
log.addHandler(handler)
try:
yield io_out
finally:
log.removeHandler(handler)
class TestTempDir:
@classmethod
def make_tempdir(cls, dir="/tmp"):
"""
:param dir: Root directory in which to create the temp directory
"""
cls.tempdir = tempfile.mkdtemp(prefix="hyperopt_tests_", dir=dir)
@classmethod
def remove_tempdir(cls):
shutil.rmtree(cls.tempdir)
class BaseSparkContext:
"""
Mixin which sets up a SparkContext for tests
"""
NUM_SPARK_EXECUTORS = 4
@classmethod
def setup_spark(cls):
cls._spark = (
SparkSession.builder.master(
f"local[{BaseSparkContext.NUM_SPARK_EXECUTORS}]"
)
.appName(cls.__name__)
.getOrCreate()
)
cls._sc = cls._spark.sparkContext
cls.checkpointDir = tempfile.mkdtemp()
cls._sc.setCheckpointDir(cls.checkpointDir)
# Small tests run much faster with spark.sql.shuffle.partitions=4
cls._spark.conf.set("spark.sql.shuffle.partitions", "4")
@classmethod
def teardown_spark(cls):
cls._spark.stop()
cls._sc = None
shutil.rmtree(cls.checkpointDir)
@property
def spark(self):
return self._spark
@property
def sc(self):
return self._sc
class TestSparkContext(unittest.TestCase, BaseSparkContext):
@classmethod
def setUpClass(cls):
cls.setup_spark()
@classmethod
def tearDownClass(cls):
cls.teardown_spark()
def test_spark_context(self):
rdd1 = self.sc.parallelize(range(10), 10)
rdd2 = rdd1.map(lambda x: x + 1)
sum2 = rdd2.sum()
assert sum2 == 55
def fn_succeed_within_range(x):
"""
Test function to test the handling failures for `fmin`. When run `fmin` with `max_evals=8`,
it has 7 successful trial runs and 1 failed run.
:param x:
:return: 1 when -3 < x < 3, and RuntimeError otherwise
"""
if -3 < x < 3:
return 1
else:
raise RuntimeError
class FMinTestCase(unittest.TestCase, BaseSparkContext):
@classmethod
def setUpClass(cls):
cls.setup_spark()
cls._sc.setLogLevel("OFF")
@classmethod
def tearDownClass(cls):
cls.teardown_spark()
def sparkSupportsJobCancelling(self):
return hasattr(self.sc.parallelize([1]), "collectWithJobGroup")
def check_run_status(
self, spark_trials, output, num_total, num_success, num_failure
):
self.assertEqual(
spark_trials.count_total_trials(),
num_total,
"Wrong number of total trial runs: Expected {e} but got {r}.".format(
e=num_total, r=spark_trials.count_total_trials()
),
)
self.assertEqual(
spark_trials.count_successful_trials(),
num_success,
"Wrong number of successful trial runs: Expected {e} but got {r}.".format(
e=num_success, r=spark_trials.count_successful_trials()
),
)
self.assertEqual(
spark_trials.count_failed_trials(),
num_failure,
"Wrong number of failed trial runs: Expected {e} but got {r}.".format(
e=num_failure, r=spark_trials.count_failed_trials()
),
)
log_output = output.getvalue().strip()
self.assertIn(
"Total Trials: " + str(num_total),
log_output,
"""Logging "Total Trials: {num}" missing from the log: {log}""".format(
num=str(num_total), log=log_output
),
)
self.assertIn(
str(num_success) + " succeeded",
log_output,
"""Logging "{num} succeeded " missing from the log: {log}""".format(
num=str(num_success), log=log_output
),
)
self.assertIn(
str(num_failure) + " failed",
log_output,
""" Logging "{num} failed " missing from the log: {log}""".format(
num=str(num_failure), log=log_output
),
)
def assert_task_succeeded(self, log_output, task):
self.assertIn(
f"trial {task} task thread exits normally",
log_output,
"""Debug info "trial {task} task thread exits normally" missing from log:
{log_output}""".format(
task=task, log_output=log_output
),
)
def assert_task_failed(self, log_output, task):
self.assertIn(
f"trial {task} task thread catches an exception",
log_output,
"""Debug info "trial {task} task thread catches an exception" missing from log:
{log_output}""".format(
task=task, log_output=log_output
),
)
def test_quadratic1_tpe(self):
# TODO: Speed this up or remove it since it is slow (1 minute on laptop)
spark_trials = SparkTrials(parallelism=4)
test_quadratic1_tpe(spark_trials)
def test_trial_run_info(self):
spark_trials = SparkTrials(parallelism=4)
with patch_logger("hyperopt-spark") as output:
fmin(
fn=fn_succeed_within_range,
space=hp.uniform("x", -5, 5),
algo=anneal.suggest,
max_evals=8,
return_argmin=False,
trials=spark_trials,
rstate=np.random.RandomState(99),
)
self.check_run_status(
spark_trials, output, num_total=8, num_success=7, num_failure=1
)
expected_result = {"loss": 1.0, "status": "ok"}
for trial in spark_trials._dynamic_trials:
if trial["state"] == base.JOB_STATE_DONE:
self.assertEqual(
trial["result"],
expected_result,
"Wrong result has been saved: Expected {e} but got {r}.".format(
e=expected_result, r=trial["result"]
),
)
elif trial["state"] == base.JOB_STATE_ERROR:
err_message = trial["misc"]["error"][1]
self.assertIn(
"RuntimeError",
err_message,
"Missing {e} in {r}.".format(e="RuntimeError", r=err_message),
)
self.assertIn(
"Traceback (most recent call last)",
err_message,
"Missing {e} in {r}.".format(e="Traceback", r=err_message),
)
num_success = spark_trials.count_by_state_unsynced(base.JOB_STATE_DONE)
self.assertEqual(
num_success,
7,
"Wrong number of successful trial runs: Expected {e} but got {r}.".format(
e=7, r=num_success
),
)
num_failure = spark_trials.count_by_state_unsynced(base.JOB_STATE_ERROR)
self.assertEqual(
num_failure,
1,
"Wrong number of failed trial runs: Expected {e} but got {r}.".format(
e=1, r=num_failure
),
)
def test_accepting_sparksession(self):
spark_trials = SparkTrials(
parallelism=2, spark_session=SparkSession.builder.getOrCreate()
)
fmin(
fn=lambda x: x + 1,
space=hp.uniform("x", 5, 8),
algo=anneal.suggest,
max_evals=2,
trials=spark_trials,
)
def test_parallelism_arg(self):
# Computing max_num_concurrent_tasks
max_num_concurrent_tasks = self.sc._jsc.sc().maxNumConcurrentTasks()
self.assertEqual(
max_num_concurrent_tasks,
BaseSparkContext.NUM_SPARK_EXECUTORS,
"max_num_concurrent_tasks ({c}) did not equal "
"BaseSparkContext.NUM_SPARK_EXECUTORS ({e})".format(
c=max_num_concurrent_tasks, e=BaseSparkContext.NUM_SPARK_EXECUTORS
),
)
for spark_default_parallelism, max_num_concurrent_tasks in [(2, 4), (2, 0)]:
default_parallelism = max(
spark_default_parallelism, max_num_concurrent_tasks
)
# Test requested_parallelism is None or negative values.
for requested_parallelism in [None, -1]:
with patch_logger("hyperopt-spark") as output:
parallelism = SparkTrials._decide_parallelism(
requested_parallelism=requested_parallelism,
spark_default_parallelism=spark_default_parallelism,
max_num_concurrent_tasks=max_num_concurrent_tasks,
)
self.assertEqual(
parallelism,
default_parallelism,
"Failed to set parallelism to be default parallelism ({p})"
" ({e})".format(p=parallelism, e=default_parallelism),
)
log_output = output.getvalue().strip()
self.assertIn(
"Because the requested parallelism was None or a non-positive value, "
"parallelism will be set to ({d})".format(
d=default_parallelism
),
log_output,
"""set to default parallelism missing from log: {log_output}""".format(
log_output=log_output
),
)
# Test requested_parallelism which will trigger spark executor dynamic allocation.
with patch_logger("hyperopt-spark") as output:
parallelism = SparkTrials._decide_parallelism(
requested_parallelism=max_num_concurrent_tasks + 1,
spark_default_parallelism=spark_default_parallelism,
max_num_concurrent_tasks=max_num_concurrent_tasks,
)
self.assertEqual(
parallelism,
max_num_concurrent_tasks + 1,
"Expect parallelism to be ({e}) but get ({p})".format(
p=parallelism, e=max_num_concurrent_tasks + 1
),
)
log_output = output.getvalue().strip()
self.assertIn(
"Parallelism ({p}) is greater".format(
p=max_num_concurrent_tasks + 1
),
log_output,
"""Parallelism ({p}) missing from log: {log_output}""".format(
p=max_num_concurrent_tasks + 1, log_output=log_output
),
)
# Test requested_parallelism exceeds hard cap
with patch_logger("hyperopt-spark") as output:
parallelism = SparkTrials._decide_parallelism(
requested_parallelism=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED + 1,
spark_default_parallelism=spark_default_parallelism,
max_num_concurrent_tasks=max_num_concurrent_tasks,
)
self.assertEqual(
parallelism,
SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED,
"Failed to limit parallelism ({p}) to MAX_CONCURRENT_JOBS_ALLOWED ({e})".format(
p=parallelism, e=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
),
)
log_output = output.getvalue().strip()
self.assertIn(
"SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED ({c})".format(
c=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
),
log_output,
"""MAX_CONCURRENT_JOBS_ALLOWED value missing from log: {log_output}""".format(
log_output=log_output
),
)
def test_all_successful_trials(self):
spark_trials = SparkTrials(parallelism=1)
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn_succeed_within_range,
space=hp.uniform("x", -1, 1),
algo=anneal.suggest,
max_evals=1,
trials=spark_trials,
)
log_output = output.getvalue().strip()
self.assertEqual(spark_trials.count_successful_trials(), 1)
self.assertIn(
"fmin thread exits normally",
log_output,
"""Debug info "fmin thread exits normally" missing from
log: {log_output}""".format(
log_output=log_output
),
)
self.assert_task_succeeded(log_output, 0)
def test_all_failed_trials(self):
spark_trials = SparkTrials(parallelism=1)
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn_succeed_within_range,
space=hp.uniform("x", 5, 10),
algo=anneal.suggest,
max_evals=1,
trials=spark_trials,
return_argmin=False,
)
log_output = output.getvalue().strip()
self.assertEqual(spark_trials.count_failed_trials(), 1)
self.assert_task_failed(log_output, 0)
spark_trials = SparkTrials(parallelism=4)
# Here return_argmin is True (by default) and an exception should be thrown
with self.assertRaisesRegexp(Exception, "There are no evaluation tasks"):
fmin(
fn=fn_succeed_within_range,
space=hp.uniform("x", 5, 8),
algo=anneal.suggest,
max_evals=2,
trials=spark_trials,
)
def test_timeout_without_job_cancellation(self):
timeout = 4
spark_trials = SparkTrials(parallelism=1, timeout=timeout)
spark_trials._spark_supports_job_cancelling = False
def fn(x):
time.sleep(0.5)
return x
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn,
space=hp.uniform("x", -1, 1),
algo=anneal.suggest,
max_evals=10,
trials=spark_trials,
max_queue_len=1,
show_progressbar=False,
return_argmin=False,
)
log_output = output.getvalue().strip()
self.assertTrue(spark_trials._fmin_cancelled)
self.assertEqual(spark_trials._fmin_cancelled_reason, "fmin run timeout")
self.assertGreater(spark_trials.count_successful_trials(), 0)
self.assertGreater(spark_trials.count_cancelled_trials(), 0)
self.assertIn(
"fmin is cancelled, so new trials will not be launched",
log_output,
""" "fmin is cancelled, so new trials will not be launched" missing from log:
{log_output}""".format(
log_output=log_output
),
)
self.assertIn(
"SparkTrials will block",
log_output,
""" "SparkTrials will block" missing from log: {log_output}""".format(
log_output=log_output
),
)
self.assert_task_succeeded(log_output, 0)
def test_timeout_without_job_cancellation_fmin_timeout(self):
timeout = 4
spark_trials = SparkTrials(parallelism=1)
spark_trials._spark_supports_job_cancelling = False
def fn(x):
time.sleep(0.5)
return x
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn,
space=hp.uniform("x", -1, 1),
algo=anneal.suggest,
max_evals=10,
timeout=timeout,
trials=spark_trials,
max_queue_len=1,
show_progressbar=False,
return_argmin=False,
rstate=np.random.RandomState(99),
)
log_output = output.getvalue().strip()
self.assertTrue(spark_trials._fmin_cancelled)
self.assertEqual(spark_trials._fmin_cancelled_reason, "fmin run timeout")
self.assertGreater(spark_trials.count_successful_trials(), 0)
self.assertGreater(spark_trials.count_cancelled_trials(), 0)
self.assertIn(
"fmin is cancelled, so new trials will not be launched",
log_output,
""" "fmin is cancelled, so new trials will not be launched" missing from log:
{log_output}""".format(
log_output=log_output
),
)
self.assertIn(
"SparkTrials will block",
log_output,
""" "SparkTrials will block" missing from log: {log_output}""".format(
log_output=log_output
),
)
self.assert_task_succeeded(log_output, 0)
def test_timeout_with_job_cancellation(self):
if not self.sparkSupportsJobCancelling():
print(
"Skipping timeout test since this Apache PySpark version does not "
"support cancelling jobs by job group ID."
)
return
timeout = 2
spark_trials = SparkTrials(parallelism=4, timeout=timeout)
def fn(x):
if x < 0:
time.sleep(timeout + 20)
raise Exception("Task should have been cancelled")
else:
time.sleep(1)
return x
# Test 1 cancelled trial. Examine logs.
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn,
space=hp.uniform("x", -2, 0),
algo=anneal.suggest,
max_evals=1,
trials=spark_trials,
max_queue_len=1,
show_progressbar=False,
return_argmin=False,
rstate=np.random.RandomState(4),
)
log_output = output.getvalue().strip()
self.assertTrue(spark_trials._fmin_cancelled)
self.assertEqual(spark_trials._fmin_cancelled_reason, "fmin run timeout")
self.assertEqual(spark_trials.count_cancelled_trials(), 1)
self.assertIn(
"Cancelling all running jobs",
log_output,
""" "Cancelling all running jobs" missing from log: {log_output}""".format(
log_output=log_output
),
)
self.assertIn(
"trial task 0 cancelled",
log_output,
""" "trial task 0 cancelled" missing from log: {log_output}""".format(
log_output=log_output
),
)
self.assertNotIn(
"Task should have been cancelled",
log_output,
""" "Task should have been cancelled" should not in log:
{log_output}""".format(
log_output=log_output
),
)
self.assert_task_failed(log_output, 0)
# Test mix of successful and cancelled trials.
spark_trials = SparkTrials(parallelism=4, timeout=4)
fmin(
fn=fn,
space=hp.uniform("x", -0.25, 5),
algo=anneal.suggest,
max_evals=6,
trials=spark_trials,
max_queue_len=1,
show_progressbar=False,
return_argmin=True,
rstate=np.random.RandomState(4),
)
time.sleep(2)
self.assertTrue(spark_trials._fmin_cancelled)
self.assertEqual(spark_trials._fmin_cancelled_reason, "fmin run timeout")
# There are 2 finished trials, 1 cancelled running trial and 1 cancelled
# new trial. We do not need to check the new trial since it is not started yet.
self.assertGreaterEqual(
spark_trials.count_successful_trials(),
1,
"Expected at least 1 successful trial but found none.",
)
self.assertGreaterEqual(
spark_trials.count_cancelled_trials(),
1,
"Expected at least 1 cancelled trial but found none.",
)
def test_invalid_timeout(self):
with self.assertRaisesRegexp(
Exception,
"timeout argument should be None or a positive value. Given value: -1",
):
SparkTrials(parallelism=4, timeout=-1)
with self.assertRaisesRegexp(
Exception,
"timeout argument should be None or a positive value. Given value: True",
):
SparkTrials(parallelism=4, timeout=True)
def test_exception_when_spark_not_available(self):
import hyperopt
orig_have_spark = hyperopt.spark._have_spark
hyperopt.spark._have_spark = False
try:
with self.assertRaisesRegexp(Exception, "cannot import pyspark"):
SparkTrials(parallelism=4)
finally:
hyperopt.spark._have_spark = orig_have_spark
def test_no_retry_for_long_tasks(self):
NUM_TRIALS = 2
output_dir = tempfile.mkdtemp()
def fn(_):
with open(os.path.join(output_dir, str(timeit.default_timer())), "w") as f:
f.write("1")
raise Exception("Failed!")
spark_trials = SparkTrials(parallelism=2)
try:
fmin(
fn=fn,
space=hp.uniform("x", 0, 1),
algo=anneal.suggest,
max_evals=NUM_TRIALS,
trials=spark_trials,
show_progressbar=False,
return_argmin=False,
)
except BaseException as e:
self.assertEqual(
"There are no evaluation tasks, cannot return argmin of task losses.",
str(e),
)
call_count = len(os.listdir(output_dir))
self.assertEqual(NUM_TRIALS, call_count)
| 35.998442
| 100
| 0.54831
|
d2890a22a8e305c48018fa4f1e44c3435a9116d1
| 2,437
|
py
|
Python
|
cleastsq.py
|
cbbing/stock
|
6bde95c447544b719094f045817690c54c6e730f
|
[
"Apache-2.0"
] | 31
|
2015-10-25T05:31:49.000Z
|
2022-01-11T19:07:16.000Z
|
cleastsq.py
|
cbbing/stock
|
6bde95c447544b719094f045817690c54c6e730f
|
[
"Apache-2.0"
] | null | null | null |
cleastsq.py
|
cbbing/stock
|
6bde95c447544b719094f045817690c54c6e730f
|
[
"Apache-2.0"
] | 21
|
2015-07-17T10:11:18.000Z
|
2020-06-08T07:23:03.000Z
|
# coding=utf-8
'''
ไฝ่
:Jairus Chan
็จๅบ:ๅค้กนๅผๆฒ็บฟๆๅ็ฎๆณ
'''
import matplotlib.pyplot as plt
import math
import numpy
import random
#้ถๆฐไธบ9้ถ
order=9
#่ฟ่กๆฒ็บฟๆๅ
def getMatA(xa):
matA=[]
for i in range(0,order+1):
matA1=[]
for j in range(0,order+1):
tx=0.0
for k in range(0,len(xa)):
dx=1.0
for l in range(0,j+i):
dx=dx*xa[k]
tx+=dx
matA1.append(tx)
matA.append(matA1)
matA=numpy.array(matA)
return matA
def getMatB(xa,ya):
matB=[]
for i in range(0,order+1):
ty=0.0
for k in range(0,len(xa)):
dy=1.0
for l in range(0,i):
dy=dy*xa[k]
ty+=ya[k]*dy
matB.append(ty)
matB=numpy.array(matB)
return matB
def getMatAA(xa, ya):
matAA=numpy.linalg.solve(getMatA(xa),getMatB(xa, ya))
return matAA
#่ฎพ็ฝฎ้ถๆฐ๏ผ้ป่ฎคไธบ9
def setOrder(newOrder):
order = newOrder
#่ทๅๆๅๅ็ๆฐๅบๅ
def getFitYValues(xValues, yValues, xNewValues):
matAA_get = getMatAA(xValues, yValues)
yya=[]
for i in range(0,len(xNewValues)):
yy=0.0
for j in range(0,order+1):
dy=1.0
for k in range(0,j):
dy*=xNewValues[i]
dy*=matAA_get[j]
yy+=dy
yya.append(yy)
return yya
#็ปๅบๆๅๅ็ๆฒ็บฟ
#print(matAA)
if __name__ == "__main__":
fig = plt.figure()
ax = fig.add_subplot(111)
#็ๆๆฒ็บฟไธ็ๅไธช็น
x = numpy.arange(-1,1,0.02)
y = [((a*a-1)*(a*a-1)*(a*a-1)+0.5)*numpy.sin(a*2) for a in x]
#ax.plot(x,y,color='r',linestyle='-',marker='')
#,label="(a*a-1)*(a*a-1)*(a*a-1)+0.5"
#็ๆ็ๆฒ็บฟไธ็ๅไธช็นๅ็งปไธไธ๏ผๅนถๆพๅ
ฅๅฐxa,yaไธญๅป
i=0
xa=[]
ya=[]
for xx in x:
yy=y[i]
d=float(random.randint(60,140))/100
#ax.plot([xx*d],[yy*d],color='m',linestyle='',marker='.')
i+=1
xa.append(xx*d)
ya.append(yy*d)
'''for i in range(0,5):
xx=float(random.randint(-100,100))/100
yy=float(random.randint(-60,60))/100
xa.append(xx)
ya.append(yy)'''
ax.plot(xa,ya,color='m',linestyle='',marker='.')
matAA_n = getMatAA(xa,ya)
print len(matAA_n)
xxa= numpy.arange(-1,1.2,0.01)
yya= getFitYValues(xa, ya, xxa)
ax.plot(xxa,yya,color='g',linestyle='-',marker='')
ax.legend()
plt.show()
| 21.191304
| 65
| 0.509643
|
0fb8c40448957368380a81d9c6c096b42f3608c5
| 3,444
|
py
|
Python
|
trainer.py
|
ouldevloper/sign-language-recognition
|
f056238b005c3b5dec99605e799aef28f6e50861
|
[
"MIT"
] | null | null | null |
trainer.py
|
ouldevloper/sign-language-recognition
|
f056238b005c3b5dec99605e799aef28f6e50861
|
[
"MIT"
] | null | null | null |
trainer.py
|
ouldevloper/sign-language-recognition
|
f056238b005c3b5dec99605e799aef28f6e50861
|
[
"MIT"
] | null | null | null |
# @Author: ะะฑะดะตะปะปะฐั
ะฃะปะฐั
ะธัะฝะต
# @Date: 2021-04-17 23:54:50
# @Last Modified by: ะะฑะดะตะปะปะฐั
ะฃะปะฐั
ะธัะฝะต
# @Last Modified time: 2021-04-18 06:47:44
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow import keras
import keras
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
import sys
import os
from config import *
#chaeck if data are already collected or not
if not os.path.isdir("data") or \
not os.path.isdir("data/training") or \
not os.path.isdir("data/testing") or \
not os.path.isdir("data/validation") :
print("Exiting : ExCollect Data first")
exit(1)
# 1 - Building the CNN
#------------------------------
# Initializing the CNN
classifier = Sequential()
# First convolution layer and pooling
classifier.add(Convolution2D(32, (3, 3), input_shape=(image_size[0], image_size[1], 1), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Second convolution layer and pooling
classifier.add(Convolution2D(32, (3, 3), activation='relu'))
# input_shape is going to be the pooled feature maps from the previous convolution layer
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Flattening the layers
classifier.add(Flatten())
# Adding a fully connected layer
classifier.add(Dense(units=128, activation='relu'))
# softmax for more than 2 class's
classifier.add(Dense(units=len(os.listdir('./data/training/')), activation='softmax'))
# Compiling the CNN
# categorical_crossentropy for more than 2 class's
classifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# 2 - Preparing the train/test data and training the model
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory('data/training',
target_size=image_size,
batch_size=batch_size,
color_mode='grayscale',
class_mode='categorical')
test_set = test_datagen.flow_from_directory('data/testing',
target_size=image_size,
batch_size=batch_size,
color_mode='grayscale',
class_mode='categorical')
#.fit_generator will be dupricated in next version of keras
classifier.fit(#_generator(
training_set,
# No of images in training set
steps_per_epoch=3,
epochs=epochs,
validation_data=test_set,
# No of images in test set
validation_steps=30)
#check if model foler exist or not and make it if does not exist
if not os.path.isdir("modeles"):
os.mkdir("modeles")
# Saving the model as json
model_json = classifier.to_json()
with open("modeles/model.json", "w") as json_file:
json_file.write(model_json)
#saving model weights
classifier.save_weights('modeles/model.h5')
| 40.046512
| 107
| 0.63734
|
d5147774a7e8900bafcafc7464a6c7e71f642e9f
| 10,537
|
py
|
Python
|
lib/datasets/imdb.py
|
yjy941124/PPR-FCN
|
1eba5515b37e7b32413efdf14bb0c22a2e46fee9
|
[
"MIT"
] | 20
|
2017-10-16T18:12:51.000Z
|
2021-12-23T02:34:20.000Z
|
lib/datasets/imdb.py
|
yjy941124/PPR-FCN
|
1eba5515b37e7b32413efdf14bb0c22a2e46fee9
|
[
"MIT"
] | 1
|
2018-11-10T04:59:48.000Z
|
2021-01-21T04:51:31.000Z
|
lib/datasets/imdb.py
|
yjy941124/PPR-FCN
|
1eba5515b37e7b32413efdf14bb0c22a2e46fee9
|
[
"MIT"
] | 5
|
2017-10-17T00:54:42.000Z
|
2018-04-08T15:09:40.000Z
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import os
import os.path as osp
import PIL
from utils.cython_bbox import bbox_overlaps
import numpy as np
import scipy.sparse
from fast_rcnn.config import cfg
import glog
class imdb(object):
"""Image database."""
def __init__(self, name):
self._name = name
self._num_classes = 0
self._classes = []
self._image_index = []
self._obj_proposer = 'selective_search'
self._roidb = None
self._roidb_handler = self.default_roidb
# Use this dict for storing dataset specific config options
self.config = {}
@property
def name(self):
return self._name
@property
def num_classes(self):
return len(self._classes)
@property
def classes(self):
return self._classes
@property
def image_index(self):
return self._image_index
@property
def roidb_handler(self):
return self._roidb_handler
@roidb_handler.setter
def roidb_handler(self, val):
self._roidb_handler = val
def set_proposal_method(self, method):
method = eval('self.' + method + '_roidb')
self.roidb_handler = method
@property
def roidb(self):
# A roidb is a list of dictionaries, each with the following keys:
# boxes
# gt_overlaps
# gt_classes
# flipped
if self._roidb is not None:
return self._roidb
self._roidb = self.roidb_handler()
self._roidb[0]['db_name'] = self.name
self._roidb[0]['cache_path'] = self.cache_path
return self._roidb
@property
def cache_path(self):
cache_path = osp.abspath(osp.join(cfg.DATA_DIR, 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
@property
def num_images(self):
return len(self.image_index)
def image_path_at(self, i):
raise NotImplementedError
def default_roidb(self):
raise NotImplementedError
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
raise NotImplementedError
def _get_widths(self):
return [PIL.Image.open(self.image_path_at(i)).size[0]
for i in xrange(self.num_images)]
def append_flipped_images(self):
num_images = self.num_images
widths = self._get_widths()
for i in xrange(num_images):
boxes = self.roidb[i]['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = widths[i] - oldx2 #- 1
boxes[:, 2] = widths[i] - oldx1 #- 1
for j in xrange(boxes.shape[0]):
if boxes[j, 2] < boxes[j, 0]:
glog.info('%f %f %d %s %s' % (boxes[j, 2], boxes[j, 0], i, widths[i], self.image_path_at(i)))
# print boxes[:, 2], boxes[:, 0]
if not (boxes[:,2]>=boxes[:,0]).all():
pass
assert (boxes[:, 2] >= boxes[:, 0]).all()
entry = {'boxes' : boxes,
'gt_overlaps' : self.roidb[i]['gt_overlaps'],
'gt_classes' : self.roidb[i]['gt_classes'],
'flipped' : True}
self.roidb.append(entry)
self._image_index = self._image_index * 2
def evaluate_recall(self, candidate_boxes=None, thresholds=None,
area='all', limit=None):
"""Evaluate detection proposal recall metrics.
Returns:
results: dictionary of results with keys
'ar': average recall
'recalls': vector recalls at each IoU overlap threshold
'thresholds': vector of IoU overlap thresholds
'gt_overlaps': vector of all ground-truth overlaps
"""
# Record max overlap value for each gt box
# Return vector of overlap values
areas = { 'all': 0, 'small': 1, 'medium': 2, 'large': 3,
'96-128': 4, '128-256': 5, '256-512': 6, '512-inf': 7}
area_ranges = [ [0**2, 1e5**2], # all
[0**2, 32**2], # small
[32**2, 96**2], # medium
[96**2, 1e5**2], # large
[96**2, 128**2], # 96-128
[128**2, 256**2], # 128-256
[256**2, 512**2], # 256-512
[512**2, 1e5**2], # 512-inf
]
assert areas.has_key(area), 'unknown area range: {}'.format(area)
area_range = area_ranges[areas[area]]
gt_overlaps = np.zeros(0)
num_pos = 0
for i in xrange(self.num_images):
# Checking for max_overlaps == 1 avoids including crowd annotations
# (...pretty hacking :/)
max_gt_overlaps = self.roidb[i]['gt_overlaps'].toarray().max(axis=1)
gt_inds = np.where((self.roidb[i]['gt_classes'] > 0) &
(max_gt_overlaps == 1))[0]
gt_boxes = self.roidb[i]['boxes'][gt_inds, :]
gt_areas = self.roidb[i]['seg_areas'][gt_inds]
valid_gt_inds = np.where((gt_areas >= area_range[0]) &
(gt_areas <= area_range[1]))[0]
gt_boxes = gt_boxes[valid_gt_inds, :]
num_pos += len(valid_gt_inds)
if candidate_boxes is None:
# If candidate_boxes is not supplied, the default is to use the
# non-ground-truth boxes from this roidb
non_gt_inds = np.where(self.roidb[i]['gt_classes'] == 0)[0]
boxes = self.roidb[i]['boxes'][non_gt_inds, :]
else:
boxes = candidate_boxes[i]
if boxes.shape[0] == 0:
continue
if limit is not None and boxes.shape[0] > limit:
boxes = boxes[:limit, :]
overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
_gt_overlaps = np.zeros((gt_boxes.shape[0]))
for j in xrange(gt_boxes.shape[0]):
# find which proposal box maximally covers each gt box
argmax_overlaps = overlaps.argmax(axis=0)
# and get the iou amount of coverage for each gt box
max_overlaps = overlaps.max(axis=0)
# find which gt box is 'best' covered (i.e. 'best' = most iou)
gt_ind = max_overlaps.argmax()
gt_ovr = max_overlaps.max()
assert(gt_ovr >= 0)
# find the proposal box that covers the best covered gt box
box_ind = argmax_overlaps[gt_ind]
# record the iou coverage of this gt box
_gt_overlaps[j] = overlaps[box_ind, gt_ind]
assert(_gt_overlaps[j] == gt_ovr)
# mark the proposal box and the gt box as used
overlaps[box_ind, :] = -1
overlaps[:, gt_ind] = -1
# append recorded iou coverage level
gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))
gt_overlaps = np.sort(gt_overlaps)
if thresholds is None:
step = 0.05
thresholds = np.arange(0.5, 0.95 + 1e-5, step)
recalls = np.zeros_like(thresholds)
# compute recall for each iou threshold
for i, t in enumerate(thresholds):
recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
# ar = 2 * np.trapz(recalls, thresholds)
ar = recalls.mean()
return {'ar': ar, 'recalls': recalls, 'thresholds': thresholds,
'gt_overlaps': gt_overlaps}
@staticmethod
def vstack(blocks):
"""As spipy.sparse.vstack, but allowing for the lack of gt overlaps"""
blocks = [[block] for block in blocks if block.shape[0]]
return scipy.sparse.bmat(blocks)
def create_roidb_from_box_list(self, box_list, gt_roidb):
assert len(box_list) == self.num_images, \
'Number of boxes must match number of ground-truth images'
roidb = []
for i in xrange(self.num_images):
boxes = box_list[i]
num_boxes = boxes.shape[0]
overlaps = np.zeros((num_boxes, self.num_classes), dtype=np.float32)
if gt_roidb is not None:
gt_boxes = gt_roidb[i]['boxes']
#Need at least one box for argmax
if gt_boxes.shape[0] > 0:
gt_classes = gt_roidb[i]['gt_classes']
gt_overlaps = bbox_overlaps(boxes.astype(np.float),
gt_boxes.astype(np.float))
argmaxes = gt_overlaps.argmax(axis=1)
maxes = gt_overlaps.max(axis=1)
I = np.where(maxes > 0)[0]
overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
overlaps = scipy.sparse.csr_matrix(overlaps)
roidb.append({
'boxes' : boxes,
'gt_classes' : np.zeros((num_boxes,), dtype=np.int32),
'gt_overlaps' : overlaps,
'flipped' : False,
'seg_areas' : np.zeros((num_boxes,), dtype=np.float32),
})
return roidb
@staticmethod
def merge_roidbs(a, b):
assert len(a) == len(b)
for i in xrange(len(a)):
a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))
a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],
b[i]['gt_classes']))
a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],
b[i]['gt_overlaps']])
a[i]['seg_areas'] = np.hstack((a[i]['seg_areas'],
b[i]['seg_areas']))
return a
def competition_mode(self, on):
"""Turn competition mode on or off."""
pass
| 39.025926
| 113
| 0.528898
|
1e8710f83f1a9c0a30788461fc485c3054796397
| 23,303
|
py
|
Python
|
packaging/test_irods_resource_plugin_s3_for_cloudian.py
|
murlock/irods_resource_plugin_s3
|
5b7fa3f3cf373900828454147226a225791e97a5
|
[
"BSD-3-Clause"
] | null | null | null |
packaging/test_irods_resource_plugin_s3_for_cloudian.py
|
murlock/irods_resource_plugin_s3
|
5b7fa3f3cf373900828454147226a225791e97a5
|
[
"BSD-3-Clause"
] | null | null | null |
packaging/test_irods_resource_plugin_s3_for_cloudian.py
|
murlock/irods_resource_plugin_s3
|
5b7fa3f3cf373900828454147226a225791e97a5
|
[
"BSD-3-Clause"
] | null | null | null |
try:
from minio import Minio
except ImportError:
print('This test requires minio: perhaps try pip install minio')
exit()
import commands
import datetime
import os
import platform
import random
import re
import shutil
import string
import subprocess
import sys
if sys.version_info >= (2,7):
import unittest
else:
import unittest2 as unittest
from .. import lib
from . import session
from ..configuration import IrodsConfig
from .resource_suite import ResourceSuite
from .test_chunkydevtest import ChunkyDevTest
class Test_Compound_With_S3_Resource(ResourceSuite, ChunkyDevTest, unittest.TestCase):
def __init__(self, *args, **kwargs):
self.keypairfile='/etc/irods/cloudian_credentials.keypair'
self.archive_naming_policy='decoupled'
self.s3stsdate=''
self.s3region='demoreg1'
self.s3endPoint='s3.cloudianhyperstore.com'
self.s3signature_version=2
self.s3sse = 0 # server side encryption
super(Test_Compound_With_S3_Resource, self).__init__(*args, **kwargs)
def setUp(self):
# skip ssl tests on ub12
distro_str = ''.join(platform.linux_distribution()[:2]).replace(' ','')
if self._testMethodName.startswith('test_ssl') and distro_str.lower().startswith('ubuntu12'):
self.skipTest("skipping ssl tests on ubuntu 12")
# set up aws configuration
self.read_aws_keys()
# set up s3 bucket
#s3 = boto3.resource('s3', region_name=self.s3region)
s3_client = Minio('http://' + self.s3endPoint, access_key=self.aws_access_key_id, secret_key=self.aws_secret_access_key)
self.s3bucketname = 'irods-ci-' + distro_str + datetime.datetime.utcnow().strftime('-%Y-%m-%d.%H-%M-%S-%f-')
self.s3bucketname += ''.join(random.choice(string.letters) for i in xrange(10))
self.s3bucketname = self.s3bucketname[:63].lower() # bucket names can be no more than 63 characters long
s3_client.make_bucket(self.s3bucketname, location=self.s3region)
# set up resources
hostname = lib.get_hostname()
s3params = 'S3_RETRY_COUNT=15;S3_WAIT_TIME_SEC=1;S3_PROTO=HTTPS;S3_MPU_CHUNK=10;S3_MPU_THREADS=4;S3_ENABLE_MD5=1'
s3params += ';S3_STSDATE=' + self.s3stsdate
s3params += ';S3_DEFAULT_HOSTNAME=' + self.s3endPoint
s3params += ';S3_AUTH_FILE=' + self.keypairfile
s3params += ';S3_REGIONNAME=' + self.s3region
s3params += ';S3_SIGNATURE_VERSION=' + str(self.s3signature_version)
s3params += ';ARCHIVE_NAMING_POLICY=' + self.archive_naming_policy
try:
s3params += ';S3_SERVER_ENCRYPT=' + str(self.s3sse)
except AttributeError:
pass
s3params=os.environ.get('S3PARAMS', s3params);
with session.make_session_for_existing_admin() as admin_session:
irods_config = IrodsConfig()
admin_session.assert_icommand("iadmin modresc demoResc name origResc", 'STDOUT_SINGLELINE', 'rename', input='yes\n')
admin_session.assert_icommand("iadmin mkresc demoResc compound", 'STDOUT_SINGLELINE', 'compound')
admin_session.assert_icommand("iadmin mkresc cacheResc 'unixfilesystem' " + hostname + ":" + irods_config.irods_directory + "/cacheRescVault", 'STDOUT_SINGLELINE', 'cacheResc')
admin_session.assert_icommand('iadmin mkresc archiveResc s3 '+hostname+':/'+self.s3bucketname+'/irods/Vault "'+s3params+'"', 'STDOUT_SINGLELINE', 'archiveResc')
admin_session.assert_icommand("iadmin addchildtoresc demoResc cacheResc cache")
admin_session.assert_icommand("iadmin addchildtoresc demoResc archiveResc archive")
super(Test_Compound_With_S3_Resource, self).setUp()
def tearDown(self):
super(Test_Compound_With_S3_Resource, self).tearDown()
print(self.bucket)
# delete s3 bucket
s3_client = Minio('http://' + self.s3endPoint, access_key=self.aws_access_key_id, secret_key=self.aws_secret_access_key)
objects = s3_client.list_objects_v2(self.s3bucketname, recursive=True)
s3_client.remove_objects(self.s3bucketname, objects)
s3_client.remove_bucket(self.s3bucketname)
# tear down resources
with session.make_session_for_existing_admin() as admin_session:
admin_session.assert_icommand("iadmin rmchildfromresc demoResc archiveResc")
admin_session.assert_icommand("iadmin rmchildfromresc demoResc cacheResc")
admin_session.assert_icommand("iadmin rmresc archiveResc")
admin_session.assert_icommand("iadmin rmresc cacheResc")
admin_session.assert_icommand("iadmin rmresc demoResc")
admin_session.assert_icommand("iadmin modresc origResc name demoResc", 'STDOUT_SINGLELINE', 'rename', input='yes\n')
shutil.rmtree(IrodsConfig().irods_directory + "/cacheRescVault", ignore_errors=True)
def read_aws_keys(self):
# read access keys from keypair file
with open(self.keypairfile) as f:
self.aws_access_key_id = f.readline().rstrip()
self.aws_secret_access_key = f.readline().rstrip()
def test_irm_specific_replica(self):
self.admin.assert_icommand("ils -L "+self.testfile,'STDOUT_SINGLELINE',self.testfile) # should be listed
self.admin.assert_icommand("irepl -R "+self.testresc+" "+self.testfile) # creates replica
self.admin.assert_icommand("ils -L "+self.testfile,'STDOUT_SINGLELINE',self.testfile) # should be listed twice
self.admin.assert_icommand("irm -n 0 "+self.testfile, 'STDOUT_SINGLELINE','deprecated') # remove original from cacheResc only
self.admin.assert_icommand("ils -L "+self.testfile,'STDOUT_SINGLELINE',["2 "+self.testresc,self.testfile]) # replica 2 should still be there
self.admin.assert_icommand_fail("ils -L "+self.testfile,'STDOUT_SINGLELINE',["0 "+self.admin.default_resource,self.testfile]) # replica 0 should be gone
trashpath = self.admin.session_collection_trash
self.admin.assert_icommand_fail("ils -L "+trashpath+"/"+self.testfile,'STDOUT_SINGLELINE',["0 "+self.admin.default_resource,self.testfile]) # replica should not be in trash
@unittest.skip("--wlock has possible race condition due to Compound/Replication PDMO")
def test_local_iput_collision_with_wlock(self):
pass
@unittest.skip("NOTSURE / FIXME ... -K not supported, perhaps")
def test_local_iput_checksum(self):
pass
@unittest.skip("EMPTY_RESC_PATH - no vault path for coordinating resources")
def test_ireg_as_rodsuser_in_vault(self):
pass
@unittest.skip("No Vault for S3 archive resource")
def test_iput_overwrite_others_file__ticket_2086(self):
pass
def test_local_iput_with_force_and_destination_resource__ticket_1706(self):
# local setup
filename = "iputwithforceanddestination.txt"
filepath = lib.create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
self.admin.assert_icommand("ils -L "+filename,'STDERR_SINGLELINE',"does not exist") # should not be listed
self.admin.assert_icommand("iput "+filename) # put file
self.admin.assert_icommand("irepl -R "+self.testresc+" "+filename) # replicate to test resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) #
self.admin.assert_icommand("iput -f -R %s %s %s" % (self.testresc, doublefile, filename) ) # overwrite test repl with different data
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 "," "+filename]) # default resource cache should have dirty copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 "," "+filename]) # default resource archive should have dirty copy
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 "," "+doublesize+" "," "+filename]) # default resource cache should not have doublesize file
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 "," "+doublesize+" "," "+filename]) # default resource archive should not have doublesize file
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 "," "+doublesize+" ","& "+filename]) # targeted resource should have new double clean copy
# local cleanup
os.remove(filepath)
os.remove(doublefile)
###################
# irepl
###################
def test_irepl_update_replicas(self):
# local setup
filename = "updatereplicasfile.txt"
filepath = lib.create_local_testfile(filename)
hostname = lib.get_hostname()
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
self.admin.assert_icommand("iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, 'STDOUT_SINGLELINE', "Creating") # create third resource
self.admin.assert_icommand("iadmin mkresc fourthresc unixfilesystem %s:/tmp/fourthrescVault" % hostname, 'STDOUT_SINGLELINE', "Creating") # create fourth resource
self.admin.assert_icommand("ils -L "+filename,'STDERR_SINGLELINE',"does not exist") # should not be listed
self.admin.assert_icommand("iput "+filename) # put file
self.admin.assert_icommand("irepl -R "+self.testresc+" "+filename) # replicate to test resource
self.admin.assert_icommand("irepl -R thirdresc "+filename) # replicate to third resource
self.admin.assert_icommand("irepl -R fourthresc "+filename) # replicate to fourth resource
self.admin.assert_icommand("iput -f -R "+self.testresc+" "+doublefile+" "+filename) # repave overtop test resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) # for debugging
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 "," & "+filename]) # should have a dirty copy
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 "," & "+filename]) # should have a dirty copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 3 "," & "+filename]) # should have a dirty copy
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 4 "," & "+filename]) # should have a dirty copy
self.admin.assert_icommand("irepl -U "+filename) # update last replica
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 "," & "+filename]) # should have a dirty copy
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 "," & "+filename]) # should have a dirty copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 3 "," & "+filename]) # should have a dirty copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 4 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand("irepl -aU "+filename) # update all replicas
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 3 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 4 "," & "+filename]) # should have a clean copy
self.admin.assert_icommand("irm -f "+filename) # cleanup file
self.admin.assert_icommand("iadmin rmresc thirdresc") # remove third resource
self.admin.assert_icommand("iadmin rmresc fourthresc") # remove third resource
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_irepl_over_existing_second_replica__ticket_1705(self):
# local setup
filename = "secondreplicatest.txt"
filepath = lib.create_local_testfile(filename)
# assertions
self.admin.assert_icommand("ils -L "+filename,'STDERR_SINGLELINE',"does not exist") # should not be listed
self.admin.assert_icommand("iput -R "+self.testresc+" "+filename) # put file
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) # for debugging
self.admin.assert_icommand("irepl "+filename) # replicate to default resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) # for debugging
self.admin.assert_icommand("irepl "+filename) # replicate overtop default resource
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 3 "," & "+filename]) # should not have a replica 3
self.admin.assert_icommand("irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 3 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_third_replica__ticket_1705(self):
# local setup
filename = "thirdreplicatest.txt"
filepath = lib.create_local_testfile(filename)
hostname = lib.get_hostname()
# assertions
self.admin.assert_icommand("iadmin mkresc thirdresc unixfilesystem %s:/tmp/thirdrescVault" % hostname, 'STDOUT_SINGLELINE', "Creating") # create third resource
self.admin.assert_icommand("ils -L "+filename,'STDERR_SINGLELINE',"does not exist") # should not be listed
self.admin.assert_icommand("iput "+filename) # put file
self.admin.assert_icommand("irepl -R "+self.testresc+" "+filename) # replicate to test resource
self.admin.assert_icommand("irepl -R thirdresc "+filename) # replicate to third resource
self.admin.assert_icommand("irepl "+filename) # replicate overtop default resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) # for debugging
self.admin.assert_icommand("irepl -R "+self.testresc+" "+filename) # replicate overtop test resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) # for debugging
self.admin.assert_icommand("irepl -R thirdresc "+filename) # replicate overtop third resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) # for debugging
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 4 "," & "+filename]) # should not have a replica 4
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 5 "," & "+filename]) # should not have a replica 5
self.admin.assert_icommand("irm -f "+filename) # cleanup file
self.admin.assert_icommand("iadmin rmresc thirdresc") # remove third resource
# local cleanup
os.remove(filepath)
def test_irepl_over_existing_bad_replica__ticket_1705(self):
# local setup
filename = "reploverwritebad.txt"
filepath = lib.create_local_testfile(filename)
doublefile = "doublefile.txt"
os.system("cat %s %s > %s" % (filename, filename, doublefile))
doublesize = str(os.stat(doublefile).st_size)
# assertions
self.admin.assert_icommand("ils -L "+filename,'STDERR_SINGLELINE',"does not exist") # should not be listed
self.admin.assert_icommand("iput "+filename) # put file
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) # for debugging
self.admin.assert_icommand("irepl -R "+self.testresc+" "+filename) # replicate to test resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',filename) # for debugging
self.admin.assert_icommand("iput -f %s %s" % (doublefile, filename) ) # overwrite default repl with different data
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 "," & "+filename]) # default resource cache should have clean copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 "," "+doublesize+" "," & "+filename]) # default resource cache should have new double clean copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 "," & "+filename]) # default resource archive should have clean copy
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 "," "+doublesize+" "," & "+filename]) # default resource archive should have new double clean copy
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 "+self.testresc," "+doublesize+" "," "+filename]) # test resource should not have doublesize file
self.admin.assert_icommand("irepl -R "+self.testresc+" "+filename) # replicate back onto test resource
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 "+self.testresc," "+doublesize+" "," & "+filename]) # test resource should have new clean doublesize file
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 3 "," & "+filename]) # should not have a replica 3
# local cleanup
os.remove(filepath)
os.remove(doublefile)
def test_iput_with_purgec(self):
# local setup
filename = "purgecfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',filename) # should not be listed
self.admin.assert_icommand("iput --purgec "+filename) # put file
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 ",filename]) # should not be listed (trimmed)
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 ",filename]) # should be listed once - replica 1
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 ",filename]) # should be listed only once
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_iget_with_purgec(self):
# local setup
filename = "purgecgetfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',filename) # should not be listed
self.admin.assert_icommand("iput "+filename) # put file
self.admin.assert_icommand("iget -f --purgec "+filename) # get file and purge 'cached' replica
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 ",filename]) # should not be listed (trimmed)
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 ",filename]) # should be listed once
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 ",filename]) # should not be listed
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_irepl_with_purgec(self):
# local setup
filename = "purgecreplfile.txt"
filepath = os.path.abspath(filename)
f = open(filepath,'wb')
f.write("TESTFILE -- ["+filepath+"]")
f.close()
# assertions
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',filename) # should not be listed
self.admin.assert_icommand("iput "+filename) # put file
self.admin.assert_icommand("irepl -R "+self.testresc+" --purgec "+filename) # replicate to test resource
self.admin.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',[" 0 ",filename]) # should not be listed (trimmed)
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 1 ",filename]) # should be listed twice - 2 of 3
self.admin.assert_icommand("ils -L "+filename,'STDOUT_SINGLELINE',[" 2 ",filename]) # should be listed twice - 1 of 3
# local cleanup
output = commands.getstatusoutput( 'rm '+filepath )
def test_decoupled_naming_policy(self):
if self.archive_naming_policy != 'decoupled':
self.skipTest("Archive naming policy is not set to 'decoupled'")
# local setup
filename = self.testfile
# run as regular user
session = self.user0
collection = session.session_collection
# iquest to get the object id of the replica on the S3 archive
id_query = ( "select DATA_ID where COLL_NAME =" + "'" + collection + "'" +
" and DATA_NAME =" + "'" + filename + "'" +
" and DATA_REPL_NUM ='1'" )
# iquest to get the pysical path of the replica on the S3 archive
path_query = ( "select DATA_PATH where COLL_NAME =" + "'" + collection + "'" +
" and DATA_NAME =" + "'" + filename + "'" +
" and DATA_REPL_NUM ='1'" )
# assertions
session.assert_icommand_fail("ils -L "+filename,'STDOUT_SINGLELINE',filename) # should not be listed
session.assert_icommand("iput "+filename) # put file
# get object id
object_id = session.run_icommand('iquest "%s" ' + '"' + id_query + '"')[0].strip()
# physical path we expect to see: /{bucket_name}/{reversed_id}/{obj_name}
target_path = '/' + self.s3bucketname + '/' + object_id[::-1] + '/' + filename
# get object path
physical_path = session.run_icommand('iquest "%s" ' + '"' + path_query + '"')[0].strip()
# verify object path
self.assertEqual(target_path, physical_path)
# cleanup
session.run_icommand('irm -f ' + filename)
| 61.648148
| 188
| 0.653564
|
f9ba05f91fc985d298e7f760945f111e5b1349e9
| 1,901
|
py
|
Python
|
gitseries/cmds/create.py
|
4383/git-series
|
8fff0da84ac3f3302459808aa154c1d2d2264c44
|
[
"Apache-2.0"
] | null | null | null |
gitseries/cmds/create.py
|
4383/git-series
|
8fff0da84ac3f3302459808aa154c1d2d2264c44
|
[
"Apache-2.0"
] | null | null | null |
gitseries/cmds/create.py
|
4383/git-series
|
8fff0da84ac3f3302459808aa154c1d2d2264c44
|
[
"Apache-2.0"
] | null | null | null |
import uuid
import os
import logging
import shlex
import stat
import sys
import tempfile
from gitseries import opts
from gitseries import executor
from gitseries import git
from gitseries.cmds import base as base_cmd
class Create(base_cmd.BaseCommand):
"Create a new serie."
log = logging.getLogger(__name__)
def take_action(self, parsed_args):
self.cfg = opts.cfg
self.cfg.CONF(sys.argv[2:])
exec_dir = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
self.app.stdout.write(f"Execution path: {exec_dir}\n")
if not os.path.isfile(self.cfg.CONF.serie.commands):
self.app.stdout.write(
f"Commands file not found {self.cfg.CONF.serie.commands}")
current_dir = os.getcwd()
os.chmod(self.cfg.CONF.serie.commands, stat.S_IRWXU)
os.mkdir(exec_dir)
commit_msg = os.path.join(exec_dir, 'commit_msg')
with open(commit_msg, 'w+') as fp:
lines = str(self.cfg.CONF.serie.commit_msg).replace('\n', 'n').split("\n")
print(lines)
for line in lines:
fp.write(line)
os.chdir(exec_dir)
for project in self.cfg.CONF.serie.projects:
repo = git.clone(project)
self.app.stdout.write(f"Running on {repo}\n")
os.chdir(repo)
executor.execute(
[os.path.join(current_dir, self.cfg.CONF.serie.commands)])
if self.cfg.CONF.serie.commit:
git.add('.')
git.commit(commit_msg)
#if self.cfg.CONF.serie.review:
# git.review()
os.chdir(exec_dir)
class Error(base_cmd.BaseCommand):
"Always raises an error"
log = logging.getLogger(__name__)
def take_action(self, parsed_args):
self.log.info('causing error')
raise RuntimeError('this is the expected exception')
| 31.683333
| 86
| 0.617044
|
2892b12bede371f6d5a7ae8272072544c312cd65
| 2,569
|
py
|
Python
|
cdrc.py
|
PeterWurmsdobler/energy-balance-uk
|
3eccbf10d395d04bbd6cb855a8bb878ede130141
|
[
"MIT"
] | null | null | null |
cdrc.py
|
PeterWurmsdobler/energy-balance-uk
|
3eccbf10d395d04bbd6cb855a8bb878ede130141
|
[
"MIT"
] | null | null | null |
cdrc.py
|
PeterWurmsdobler/energy-balance-uk
|
3eccbf10d395d04bbd6cb855a8bb878ede130141
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import numpy as np
from csv import reader
from constants import T_s
def load_domestic_data(filepath: str) -> None:
"""Load CDRC data, aggregate over post code areas and meters, then save to file."""
# path = 'data/cdrc/DomesticEnergyProviderDataset/DEP2015_SF_CDRC.csv'
num_meters = 0
natural_gas_consumption = np.zeros([365,48])
electricity_consumption = np.zeros([365,48])
# open file in read mode
with open(filepath, 'r') as source:
# pass the file object to reader() to get the reader object
csv_reader = reader(source)
# Iterate over each row in the csv using reader object
header = next(csv_reader)
# Check file as empty
if header != None:
# Iterate over each row after the header in the csv
for row in csv_reader:
# row variable is a list that represents a row in csv
# dataset contains 31st September
if row[1] == "20150931":
continue
date = datetime.strptime(row[1], "%Y%m%d")
data = row[5:]
if not "NA" in data:
pc = row[2]
t = row[3]
num_meters += int(row[4])
day = int(date.timetuple().tm_yday)
print(f"{date} = {day}: {t}, pc = {pc}")
x = np.array(data)
y = x.astype(np.float)
z = np.nan_to_num(y)
if (t == 'G'):
natural_gas_consumption[day-1,:] += z
if (t == 'E'):
electricity_consumption[day-1,:] += z
natural_gas_energy = natural_gas_consumption.ravel()
electricity_energy = electricity_consumption.ravel()
# energy is measured every T_s in kWh, convert to power in GW:
kWh_p_sample_to_P_GW = 3600 * 1000 / T_s / 1E9
natural_gas_power = natural_gas_energy * kWh_p_sample_to_P_GW
electricity_power = electricity_energy * kWh_p_sample_to_P_GW
print(f"Total number of meters: {num_meters/365}")
print(f"Total natural gas energy: {np.sum(natural_gas_energy)/1E9} TWh")
print(f"Total electricity energy: {np.sum(electricity_energy)/1E9} TWh")
basename = filepath[:-3]
with open(basename + 'natural_gas.npy', 'wb') as f:
np.save(f, natural_gas_power)
with open(basename + 'electricity.npy', 'wb') as f:
np.save(f, electricity_power)
| 38.924242
| 87
| 0.569482
|
114a489a7f1947a532a584a57cbd6d6d8604fc08
| 1,543
|
py
|
Python
|
tests/conftest.py
|
brossboth/cryptocom-exchange
|
b4862187fc28bcb99e0df92cf8a651bd28b1f2c0
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
brossboth/cryptocom-exchange
|
b4862187fc28bcb99e0df92cf8a651bd28b1f2c0
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
brossboth/cryptocom-exchange
|
b4862187fc28bcb99e0df92cf8a651bd28b1f2c0
|
[
"MIT"
] | null | null | null |
import asyncio
import pytest
import cryptocom.exchange as cro
@pytest.fixture
async def exchange() -> cro.Exchange:
ex = cro.Exchange()
await ex.sync_pairs()
return ex
@pytest.fixture
async def account() -> cro.Account:
acc = cro.Account(from_env=True)
await acc.sync_pairs()
yield acc
await acc.cancel_open_orders(cro.pairs.CRO_USDT)
@pytest.fixture
def event_loop(request):
"""Create an instance of the default event loop for each test case."""
loop = asyncio.events.new_event_loop()
try:
asyncio.events.set_event_loop(loop)
yield loop
finally:
try:
_cancel_all_tasks(loop)
loop.run_until_complete(loop.shutdown_asyncgens())
if hasattr(loop, 'shutdown_default_executor'):
loop.run_until_complete(loop.shutdown_default_executor())
finally:
asyncio.events.set_event_loop(None)
loop.close()
def _cancel_all_tasks(loop):
to_cancel = asyncio.tasks.all_tasks(loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
loop.run_until_complete(
asyncio.tasks.gather(*to_cancel, return_exceptions=True))
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
loop.call_exception_handler({
'message': 'unhandled exception during asyncio.run() shutdown',
'exception': task.exception(),
'task': task,
})
| 25.716667
| 79
| 0.63383
|
1a4591d0cab685cbf9c22e2bbb7acbcec083d876
| 6,489
|
py
|
Python
|
python/_impl.py
|
gglin001/poptorch
|
61f38ed2d8c6b672e023862eb698865fa7f4724e
|
[
"MIT"
] | 128
|
2020-12-08T22:22:46.000Z
|
2022-03-23T10:54:26.000Z
|
python/_impl.py
|
gglin001/poptorch
|
61f38ed2d8c6b672e023862eb698865fa7f4724e
|
[
"MIT"
] | 4
|
2021-06-22T14:26:28.000Z
|
2022-02-15T11:25:05.000Z
|
python/_impl.py
|
gglin001/poptorch
|
61f38ed2d8c6b672e023862eb698865fa7f4724e
|
[
"MIT"
] | 7
|
2020-12-09T20:32:56.000Z
|
2022-01-18T16:12:24.000Z
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
from contextlib import contextmanager
import copy
import fcntl
import hashlib
import os
from typing import Dict, Any
import torch
# Do not import any poptorch.* here: it will break the poptorch module
from ._logging import logger
from . import poptorch_core
# A flag to tell the user if the current target is IPU. This is to allow
# divergent IPU/CPU codepaths within one model.
_is_ipu_context = False
def createPoptorchError(msg):
type = "poptorch_py_error"
error = poptorch_core.Error(f"'{type}': {msg}")
error.type = type
error.message = msg
error.location = ""
return error
def isRunningOnIpu() -> bool:
""" This function returns `True` when executing on IPU and `False` when
executing the model outside IPU scope. This allows for separate
codepaths to be marked in the model simply by using:
>>> if poptorch.isRunningOnIpu():
>>> # IPU path
>>> else:
>>> # CPU path
Note this will only apply to code during execution. During model
creation it will always return `False`.
:returns: True if running on IPU, otherwise False.
"""
global _is_ipu_context
return _is_ipu_context
def setIpuContext(val: bool):
global _is_ipu_context
_is_ipu_context = val
def internal_cast(tensor, dtype):
if dtype in [torch.float, torch.float32]:
return torch.ops.poptorch.internal_cast(tensor, "FLOAT")
if dtype in [torch.half, torch.float16]:
return torch.ops.poptorch.internal_cast(tensor, "FLOAT16")
raise ValueError(
'Invalid poptorch.cast target type. Expecting torch.float or torch.half'
)
def applyOptimizer(optimizer):
num_groups = len(optimizer.param_groups)
for index in range(0, num_groups):
torch.ops.poptorch.optimizer_group(
index, optimizer.param_groups[index]["params"])
# To understand which variable groups the user wants to apply the
# optimizer to we need to mark them via a wrapper. We do this because
# when we reference the variables in the context of the operation we
# get the corresponding IR value for "free" as part of the trace.
# Otherwise we would need a system to map the variable in the optimizer
# to the variable in the model to the variable in the IR.
class OptimizerWrapper(torch.nn.Module):
def __init__(self, model, optimizer):
super().__init__()
self.model = model
self.optimizer = optimizer
def forward(self, *args, **kwargs):
out = self.model(*args, **kwargs)
applyOptimizer(self.optimizer)
return out
@contextmanager
def distributedCacheLock(model, opts):
"""In a distributed environment we only want the model to be compiled once.
If there is only one process or if the cache is not enabled:
no need for a lock, early return.
Otherwise:
The first process to reach the lock takes it and compiles the model.
The model will be added to the PopART cache.
After the first process releases the lock the other ones will grab it
one at the time and compile the model too (Except that they will
now all hit the cache).
The last process to grab / release the lock will delete the file.
(Each process append a character to the file, so the position in
the file when acquiring the lock indicates how many processes have
already successfully compiled the model).
"""
filename = None
if opts.Distributed.numProcesses > 1:
cache = opts._popart.options.get("cachePath", "") # pylint: disable=protected-access
if not cache:
logger.warning(
"Use poptorch.Options.enableExecutableCaching() to avoid "
"compiling the model once per process")
else:
os.makedirs(cache, exist_ok=True)
assert os.access(cache, os.W_OK), (f"Cache folder {cache}"
" is not writable")
filename = os.path.join(
cache, "%s.lock" %
hashlib.md5(repr(model).encode("utf-8")).hexdigest())
# Not distributed mode or the cache is not enabled: do nothing.
if not filename:
yield False
return
delete_file = False
try:
with open(filename, "a+") as f:
try:
fcntl.flock(f, fcntl.LOCK_EX)
# Add a character to the file
f.write("0")
logger.debug(
"Executable cache file locked by process %s (pos %d/%d)",
opts.Distributed.processId, f.tell(),
opts.Distributed.numProcesses)
delete_file = f.tell() == opts.Distributed.numProcesses
# Only the first process should compile
yield f.tell() == 1
finally:
logger.debug("Process %s released the cache lock",
opts.Distributed.processId)
fcntl.flock(f, fcntl.LOCK_UN)
finally:
if delete_file:
os.remove(filename)
# The pickle handlers are called in two cases: when an object is copied
# (i.e copy.copy(obj)) or when an object is pickled / serialised.
# In both cases the object is first dumped using pickleUnwrapModel and then
# in the copy case _pickleRestoreWrapperIfPossible() is called immediately after
# to create the new object.
#
# The _wrapper_registry keeps track of the mapping between user model, parameter,
# buffer types and their corresponding wrapper.
# When an object is copied we want to preserve the Wrapper type: the PopTorch
# wrapper doesn't contain any attribute so it's just a question of updating
# the __class__attribute.
#
# When an object is loaded from file: the wrapper type doesn't exist anymore
# therefore we keep the object unwrapped. (It will be wrapped again when passed
# to poptorch.trainingModel anyway)
_wrapper_registry: Dict[int, Any] = {}
def _pickleRestoreWrapperIfPossible(obj):
wrapperType = _wrapper_registry.get(id(obj))
if wrapperType:
obj.__class__ = wrapperType
return obj
def pickleUnwrapObject(obj):
global _wrapper_registry
wrapperType = obj.__class__
obj.__class__ = obj.__class__.__bases__[0]
other = copy.copy(obj)
_wrapper_registry[id(other)] = wrapperType
obj.__class__ = wrapperType
return _pickleRestoreWrapperIfPossible, (other, )
| 35.266304
| 93
| 0.664355
|
3a10756f1821c368bbef4e2e7e7120b78cfc8c61
| 785
|
py
|
Python
|
core/authentication.py
|
blumug/texapi
|
3caf1dd3f0c641a06964a33f7d3046bdace24eeb
|
[
"MIT"
] | null | null | null |
core/authentication.py
|
blumug/texapi
|
3caf1dd3f0c641a06964a33f7d3046bdace24eeb
|
[
"MIT"
] | null | null | null |
core/authentication.py
|
blumug/texapi
|
3caf1dd3f0c641a06964a33f7d3046bdace24eeb
|
[
"MIT"
] | null | null | null |
import datetime
from django.utils.timezone import utc
from rest_framework.authentication import TokenAuthentication
from rest_framework import exceptions
class ExpiringTokenAuthentication(TokenAuthentication):
def authenticate_credentials(self, key):
try:
token = self.model.objects.get(key=key)
except self.model.DoesNotExist:
raise exceptions.AuthenticationFailed('Invalid token')
if not token.user.is_active:
raise exceptions.AuthenticationFailed('User inactive or deleted')
utc_now = datetime.datetime.utcnow().replace(tzinfo=utc)
if token.created < utc_now - datetime.timedelta(days=5):
raise exceptions.AuthenticationFailed('Token has expired')
return (token.user, token)
| 32.708333
| 77
| 0.721019
|
0232b76bc623fd926087d4c08b8b623394779db5
| 12,614
|
bzl
|
Python
|
asylo/bazel/asylo.bzl
|
Hasimir/asylo
|
ac598c5266e36312d18fcdaa55efbc780af88975
|
[
"Apache-2.0"
] | 1
|
2019-06-27T12:44:36.000Z
|
2019-06-27T12:44:36.000Z
|
asylo/bazel/asylo.bzl
|
Hasimir/asylo
|
ac598c5266e36312d18fcdaa55efbc780af88975
|
[
"Apache-2.0"
] | null | null | null |
asylo/bazel/asylo.bzl
|
Hasimir/asylo
|
ac598c5266e36312d18fcdaa55efbc780af88975
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018 Asylo authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Macro definitions for Asylo testing."""
load("@com_google_asylo_backend_provider//:enclave_info.bzl", "enclave_info")
load("@linux_sgx//:sgx_sdk.bzl", "sgx_enclave")
def _parse_label(label):
"""Parse a label into (package, name).
Args:
label: string in relative or absolute form.
Returns:
Pair of strings: package, relative_name
"""
if label.startswith("//"): # Absolute label.
label = label[2:] # drop the leading //
colon_split = label.split(":")
if len(colon_split) == 1: # no ":" in label
pkg = label
_, _, target = label.rpartition("/")
else:
pkg, target = colon_split # fails if len(colon_split) != 2
else:
colon_split = label.split(":")
if len(colon_split) == 1: # no ":" in label
pkg, target = native.package_name(), label
else:
pkg2, target = colon_split # fails if len(colon_split) != 2
pkg = native.package_name() + ("/" + pkg2 if pkg2 else "")
return pkg, target
def _ensure_static_manual(args):
"""Set linkopts and tags keys of args for static linking and manual testing.
Args:
args: A map representing the arguments to either cc_binary or cc_test.
Returns:
The given args modified for linking and tagging.
"""
# Fully static so the test can move and still operate
args["linkstatic"] = 1
args["copts"] = ["-g0"] + args.get("copts", [])
return args
def copy_from_host(target, output, name = ""):
"""Genrule that builds target with host CROSSTOOL."""
_, local_name = _parse_label(target)
name = name if name else local_name + "_as_host"
native.genrule(
name = name,
srcs = [],
outs = [output],
cmd = "cp $(location %s) $@" % target,
executable = 1,
output_to_bindir = 1,
tools = [target],
testonly = 1,
)
def _enclave_args(enclaves):
"""Collects enclave dependencies' paths with formatted argument string.
Arguments:
enclaves: depset of enclave dependencies.
Returns:
string: If 1 enclave, "--enclave_path=<path>", otherwise
for each enclave, "--<enclave_name>=<path>" ...
"""
for enclave in enclaves:
if enclave_info not in enclave:
fail("Expected all arguments to have the enclave_info provider: " +
enclave.label.name)
enclave_args = []
if len(enclaves) == 1:
enclave_args.append("--enclave_path=\"{path}\"".format(
path = enclaves[0].files.to_list()[0].short_path,
))
else:
for data in enclaves:
runpath = data.files.to_list()[0].path
enclave_args.append("--{name}={path}".format(
name = data.label.name,
path = runpath,
))
return " ".join(enclave_args)
def _enclave_binary_wrapper_impl(ctx):
"""Generates a runnable wrapper script around an enclave driver.
Given a binary and its data dependencies, call the binary with flags that
provide enclave dependencies' paths. A single enclave is given as the flag
--enclave_path=<path>. Multiple enclaves are disambiguated with their label
name as the flag. For example, given data dependencies on both //pkg0:enclave0
//pkg1:enclave1, the arguments passed are --enclave0=path/to/pkg0/enclave0.so
and --enclave1=path/to/pkg1/enclave1.so.
Arguments:
ctx: A blaze rule context
Returns:
The rule's providers. Indicates the data dependencies as runfiles.
"""
ctx.actions.write(
content = "#!/bin/bash\n" +
"\n" +
"exec \"./{bin}\" {args} \"$@\"\n".format(
bin = ctx.executable.binary.short_path,
args = _enclave_args(ctx.attr.enclaves),
),
is_executable = True,
output = ctx.outputs.executable,
)
return [DefaultInfo(runfiles = ctx.runfiles(files = [ctx.executable.binary] +
ctx.files.data +
ctx.files.enclaves))]
_enclave_binary_wrapper = rule(
implementation = _enclave_binary_wrapper_impl,
executable = True,
attrs = {
"binary": attr.label(
mandatory = True,
executable = True,
cfg = "host",
allow_single_file = True,
),
"data": attr.label_list(allow_files = True),
"enclaves": attr.label_list(allow_files = True, providers = [enclave_info]),
},
)
_enclave_script_test = rule(
implementation = _enclave_binary_wrapper_impl,
test = True,
attrs = {
"binary": attr.label(
cfg = "host",
executable = True,
mandatory = True,
allow_single_file = True,
),
"data": attr.label_list(allow_files = True),
"enclaves": attr.label_list(allow_files = True, providers = [enclave_info]),
},
)
def debug_enclave_driver(name, enclaves, **kwargs):
"""Wraps cc_binary with dependency on enclave availability at runtime.
Creates a cc_binary for a given enclave. The cc_binary will be passed
'--enclave_path=<path to instance of |enclave|>' for 1 enclave, or
'--<enclave_name>=<path to instance of |enclave_name.so|>' for many enclaves.
Args:
name: Name for build target.
enclaves: Enclave target dependencies.
**kwargs: cc_binary arguments.
This macro creates three build targets:
1) name: shell script that runs the debug_enclave_driver.
2) name_driver: cc_binary used as driver in name. This is a normal
native cc_binary. It cannot be directly run because there
is an undeclared dependency on the enclaves.
3) name_host_driver: genrule that builds name_driver with host crosstool.
"""
binary_name = name + "_driver"
host_binary_name = name + "_host_driver"
native.cc_binary(name = binary_name, **_ensure_static_manual(kwargs))
copy_from_host(target = binary_name, output = host_binary_name)
_enclave_binary_wrapper(
name = name,
binary = host_binary_name,
data = kwargs.get("data", []),
enclaves = enclaves,
)
def sim_enclave(name, **kwargs):
"""Build rule for creating simulated enclave object files signed for testing.
The enclave simulation backend currently makes use of the SGX simulator.
However, this is subject to change and users of this rule should not make
assumptions about it being related to SGX.
Args:
name: The name of the signed enclave object file.
**kwargs: cc_binary arguments.
"""
sgx_enclave(name, **kwargs)
def enclave_test(name, enclave = False, enclaves = [], tags = [], **kwargs):
"""Build target for testing one or more instances of 'sgx_enclave'.
Creates a cc_test for a given enclave. The cc_test will be passed
'--enclave_path=<path to instance of |enclave|>' for 1 enclave, or
'--<enclave_name>=<path to instance of |enclave_name.so|>' for many enclaves.
Args:
name: Name for build target.
enclave: [deprecated, use enclaves] The sgx_enclave target to test against.
enclaves: The sgx_enclave targets to test against.
tags: Label attached to this test to allow for querying.
**kwargs: cc_test arguments.
This macro creates three build targets:
1) name: sh_test that runs the enclave_test.
2) name_driver: cc_test used as test driver in name. This is a normal
native cc_test. It cannot be directly run because there is
an undeclared dependency on enclave.
3) name_host_driver: genrule that builds name_driver with host crosstool.
"""
test_name = name + "_driver"
host_test_name = name + "_host_driver"
native.cc_test(
name = test_name,
**_ensure_static_manual(kwargs)
)
copy_from_host(target = test_name, output = host_test_name)
_enclave_script_test(
name = name,
data = kwargs.get("data", []),
enclaves = enclaves + ([enclave] if enclave else []),
binary = host_test_name,
testonly = 1,
tags = ["enclave_test"] + tags,
)
def cc_test(name, enclave_test_name = "", srcs = [], deps = [], **kwargs):
"""Build macro that creates a cc_test target and a cc_enclave_test target.
This macro generates a cc_test target, which will run a gtest test suite
normally, and optionally a cc_enclave_test, which will run the test suite
inside of an enclave.
Args:
name: Same as native cc_test name.
enclave_test_name: Name for the generated cc_enclave_test. Optional.
srcs: Same as native cc_test srcs.
deps: Same as native cc_test deps.
**kwargs: cc_test arguments.
"""
native.cc_test(
name = name,
srcs = srcs,
deps = deps,
**kwargs
)
if enclave_test_name:
cc_enclave_test(
name = enclave_test_name,
srcs = srcs,
deps = deps,
**kwargs
)
def cc_test_and_cc_enclave_test(name, enclave_test_name = "", srcs = [], deps = [], **kwargs):
"""An alias for cc_test with a default enclave_test_name.
This macro is identical to cc_test, except it passes in an enclave
test name automatically. It is provided for convenience of overriding the
default definition of cc_test without having to specify enclave test names.
If this behavior is not desired, use cc_test instead, which will not create
and enclave test unless given an enclave test name.
This is most useful if imported as
load(
"//asylo/bazel:asylo.bzl",
cc_test = "cc_test_and_cc_enclave_test",
)
so any cc_test defined in the BUILD file will generate both native and
enclave tests.
Args:
name: See documentation for name in native cc_test rule.
enclave_test_name: See documentation for enclave_test_name in cc_test above.
If not provided and name ends with "_test", then defaults to name with
"_test" replaced with "_enclave_test". If not provided and name does
not end with "_test", then defaults to name appended with "_enclave".
srcs: See documentation for srcs in native cc_test rule.
deps: See documentation for deps in native cc_test rule.
**kwargs: See documentation for **kwargs in native cc_test rule.
"""
if not enclave_test_name:
if name.endswith("_test"):
enclave_test_name = "_enclave_test".join(name.rsplit("_test", 1))
else:
enclave_test_name = name + "_enclave"
cc_test(
name = name,
enclave_test_name = enclave_test_name,
srcs = srcs,
deps = deps,
**kwargs
)
def cc_enclave_test(name, srcs, tags = [], deps = [], **kwargs):
"""Build target that runs a cc_test srcs inside of an enclave.
This macro creates two targets, one sgx_enclave target with the test source.
And another test runner application to launch the test enclave.
Args:
name: Target name for will be <name>_enclave.
srcs: Same as cc_test srcs.
tags: Same as cc_test tags.
deps: Same as cc_test deps.
**kwargs: cc_test arguments.
"""
# Create a copy of the gtest enclave runner
host_test_name = name + "_host_driver"
copy_from_host(
target = "//asylo/bazel:enclave_runner",
output = host_test_name,
name = name + "_as_host",
)
# Build the gtest enclave using the test file and gtest "main" enclave shim
enclave_name = name + ".so"
enclave_target = ":" + enclave_name
sgx_enclave(
name = enclave_name,
srcs = srcs,
deps = deps + ["//asylo/bazel:enclave_test_shim"],
testonly = 1,
)
# Execute the gtest enclave using the gtest enclave runner
_enclave_script_test(
name = name,
data = kwargs.get("data", []),
enclaves = [enclave_target],
binary = host_test_name,
testonly = 1,
tags = ["enclave_test"] + tags,
)
| 35.13649
| 94
| 0.641747
|
bd2339f31d520d24782e4b2522c6978f32ae852e
| 5,263
|
py
|
Python
|
tensorflow/python/kernel_tests/broadcast_to_ops_test.py
|
yanzhiwei1990/tensorflow
|
28ede9ed7caee0ce2731d95cc0eb9aff7f360105
|
[
"Apache-2.0"
] | 1
|
2020-02-04T06:39:30.000Z
|
2020-02-04T06:39:30.000Z
|
tensorflow/python/kernel_tests/broadcast_to_ops_test.py
|
yanzhiwei1990/tensorflow
|
28ede9ed7caee0ce2731d95cc0eb9aff7f360105
|
[
"Apache-2.0"
] | 1
|
2018-09-17T19:30:27.000Z
|
2018-09-17T19:30:27.000Z
|
tensorflow/python/kernel_tests/broadcast_to_ops_test.py
|
yanzhiwei1990/tensorflow
|
28ede9ed7caee0ce2731d95cc0eb9aff7f360105
|
[
"Apache-2.0"
] | 1
|
2019-10-21T10:37:57.000Z
|
2019-10-21T10:37:57.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for broadcast_to ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.platform import test as test_lib
class BroadcastToTest(test_util.TensorFlowTestCase):
def testBroadcastToBasic(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16, np.int32, np.int64]:
with self.test_session(use_gpu=True):
x = np.array([1, 2, 3], dtype=dtype)
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
def testBroadcastToString(self):
with self.test_session(use_gpu=True):
x = np.array([b"1", b"2", b"3"])
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
def testBroadcastToBool(self):
with self.test_session(use_gpu=True):
x = np.array([True, False, True], dtype=np.bool)
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
def testBroadcastToShape(self):
for input_dim in range(1, 6):
for output_dim in range(input_dim, 6):
with self.test_session(use_gpu=True):
input_shape = [2] * input_dim
output_shape = [2] * output_dim
x = np.array(np.random.randint(5, size=input_shape), dtype=np.int32)
v_tf = array_ops.broadcast_to(constant_op.constant(x), output_shape)
v_np = np.broadcast_to(x, output_shape)
self.assertAllEqual(v_tf.eval(), v_np)
def testBroadcastToScalar(self):
with self.test_session(use_gpu=True):
x = np.array(1, dtype=np.int32)
v_tf = array_ops.broadcast_to(constant_op.constant(x), [3, 3])
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
def testBroadcastToShapeTypeAndInference(self):
for dtype in [dtypes.int32, dtypes.int64]:
with self.test_session(use_gpu=True):
x = np.array([1, 2, 3])
v_tf = array_ops.broadcast_to(
constant_op.constant(x),
constant_op.constant([3, 3], dtype=dtype))
shape = v_tf.get_shape().as_list()
v_np = np.broadcast_to(x, [3, 3])
self.assertAllEqual(v_tf.eval(), v_np)
# check shape inference when shape input is constant
self.assertAllEqual(shape, v_np.shape)
def testGradientForScalar(self):
# TODO(alextp): There is a bug with broadcast_to on GPU from scalars,
# hence we make this test cpu-only.
with ops.device("cpu:0"):
x = constant_op.constant(1, dtype=dtypes.float32)
v = array_ops.broadcast_to(x, [2, 4, 3])
out = 2 * v
with self.test_session():
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
out, out.get_shape())
self.assertLess(err, 1e-4)
def testGradientWithSameRank(self):
x = constant_op.constant(np.reshape(np.arange(6), (2, 1, 3)),
dtype=dtypes.float32)
v = array_ops.broadcast_to(x, [2, 5, 3])
out = 2 * v
with self.test_session():
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
out, out.get_shape())
self.assertLess(err, 1e-4)
def testGradientWithIncreasingRank(self):
x = constant_op.constant([[1], [2]],
dtype=dtypes.float32)
v = array_ops.broadcast_to(x, [5, 2, 3])
out = 2 * v
with self.test_session():
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
out, out.get_shape())
self.assertLess(err, 1e-4)
def testGradientWithBroadcastAllDimensions(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]], dtype=dtypes.float32)
v = array_ops.broadcast_to(x, [5, 4, 6])
out = 2 * v
with self.test_session():
err = gradient_checker.compute_gradient_error(x, x.get_shape(),
out, out.get_shape())
self.assertLess(err, 1e-4)
if __name__ == "__main__":
test_lib.main()
| 40.484615
| 80
| 0.642219
|
e9f7c6f9e8ac4b058c6578568604e9c180b9100e
| 2,379
|
py
|
Python
|
.ranger/commands.py
|
xhlar/configs
|
fd12ab668a6f2cb18582623c820333eec393a09c
|
[
"CC0-1.0"
] | null | null | null |
.ranger/commands.py
|
xhlar/configs
|
fd12ab668a6f2cb18582623c820333eec393a09c
|
[
"CC0-1.0"
] | null | null | null |
.ranger/commands.py
|
xhlar/configs
|
fd12ab668a6f2cb18582623c820333eec393a09c
|
[
"CC0-1.0"
] | null | null | null |
from ranger.api.commands import Command
class fzf_select(Command):
"""
:fzf_select
Find a file using fzf.
With a prefix argument to select only directories.
See: https://github.com/junegunn/fzf
"""
def execute(self):
import subprocess
import os
from ranger.ext.get_executables import get_executables
if 'fzf' not in get_executables():
self.fm.notify('Could not find fzf in the PATH.', bad=True)
return
fd = None
if 'fdfind' in get_executables():
fd = 'fdfind'
elif 'fd' in get_executables():
fd = 'fd'
if fd is not None:
hidden = ('--hidden' if self.fm.settings.show_hidden else '')
exclude = "--no-ignore-vcs --exclude '.git' --exclude '*.py[co]' --exclude '__pycache__'"
only_directories = ('--type directory' if self.quantifier else '')
fzf_default_command = '{} --follow {} {} {} --color=always'.format(
fd, hidden, exclude, only_directories
)
else:
hidden = ('-false' if self.fm.settings.show_hidden else r"-path '*/\.*' -prune")
exclude = r"\( -name '\.git' -o -iname '\.*py[co]' -o -fstype 'dev' -o -fstype 'proc' \) -prune"
only_directories = ('-type d' if self.quantifier else '')
fzf_default_command = 'find -L . -mindepth 1 {} -o {} -o {} -print | cut -b3-'.format(
hidden, exclude, only_directories
)
env = os.environ.copy()
env['FZF_DEFAULT_COMMAND'] = fzf_default_command
env['FZF_DEFAULT_OPTS'] = '--height=40% --layout=reverse --ansi --preview="{}"'.format('''
(
batcat --color=always {} ||
bat --color=always {} ||
cat {} ||
tree -ahpCL 3 -I '.git' -I '*.py[co]' -I '__pycache__' {}
) 2>/dev/null | head -n 100
''')
fzf = self.fm.execute_command('fzf --no-multi', env=env,
universal_newlines=True, stdout=subprocess.PIPE)
stdout, _ = fzf.communicate()
if fzf.returncode == 0:
selected = os.path.abspath(stdout.strip())
if os.path.isdir(selected):
self.fm.cd(selected)
else:
self.fm.select_file(selected)
| 38.370968
| 108
| 0.527533
|
e3b31dd35fec5d30447f50b6e1b6ef3ac6a8a474
| 7,045
|
py
|
Python
|
tests/test_serde.py
|
NervanaSystems/ngraph-python
|
ac032c83c7152b615a9ad129d54d350f9d6a2986
|
[
"Apache-2.0"
] | 18
|
2018-03-19T04:16:49.000Z
|
2021-02-08T14:44:58.000Z
|
tests/test_serde.py
|
rsumner31/ngraph
|
5e5c9bb9f24d95aee190b914dd2d44122fc3be53
|
[
"Apache-2.0"
] | 2
|
2019-04-16T06:41:49.000Z
|
2019-05-06T14:08:13.000Z
|
tests/test_serde.py
|
rsumner31/ngraph
|
5e5c9bb9f24d95aee190b914dd2d44122fc3be53
|
[
"Apache-2.0"
] | 11
|
2018-06-16T15:59:08.000Z
|
2021-03-06T00:45:30.000Z
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import os
from copy import deepcopy
import numpy as np
import ngraph as ng
from ngraph.op_graph.op_graph import Op
import ngraph.op_graph.serde.serde as ser
from ngraph.op_graph.serde.serde_pass import SerializationPass
from ngraph.testing.hetr_utils import create_send_recv_graph
def get_simple_graph():
ax = ng.make_axes([ng.make_axis(name='C', length=1)])
base_op = ng.constant(5.0, ax)
simple_graph = ng.log(ng.exp(base_op))
return base_op, simple_graph
def strip_dict(d):
"""
For equality testing we need to remove attributes of dicts that are either unique to each
instance or need more complex equality handling
"""
keys = ('_NameableValue__name', '_axes', '_args', 'valfun', 'dtype',
'scale', '_tensor', '_send_node')
for key in keys:
if key in d:
del d[key]
def assert_object_equality(obj1, obj2):
if hasattr(obj1, '_args'):
for arg1, arg2 in zip(obj1._args, obj2._args):
assert_object_equality(arg1, arg2)
d1 = deepcopy(obj1.__dict__)
strip_dict(d1)
d2 = deepcopy(obj2.__dict__)
strip_dict(d2)
assert d1 == d2
def test_flattenedaxis_serialization():
# We do a round robin serialization run with an axis and make sure that they match
c = ng.make_axis(name='C', length=2)
h = ng.make_axis(name='H', length=3)
orig_axis = ng.make_axes([c, h]).flatten()
pb_axis = ser.axis_to_protobuf(orig_axis)
py_axis = ser.pb_to_axis(pb_axis)
assert py_axis.length == orig_axis.length
# NameableValue name counter is different
# assert orig_axis.name == py_axis.name
assert type(py_axis) == type(orig_axis)
assert orig_axis == py_axis
def test_axis_serialization():
# We do a round robin serialization run with an axis and make sure that they match
axis = ng.make_axis(name='C', length=2)
pb_axis = ser.axis_to_protobuf(axis)
py_axis = ser.pb_to_axis(pb_axis)
assert axis.length == py_axis.length
assert axis.name == py_axis.name
assert axis == py_axis
def test_tensor_to_protobuf():
orig_tensor = np.arange(12, dtype=np.float32).reshape(3, 4)
pb_tensor = ser.tensor_to_protobuf(orig_tensor)
py_tensor = ser.pb_to_tensor(pb_tensor)
np.testing.assert_allclose(orig_tensor, py_tensor)
def test_scalar_to_protobuf():
orig_tensor = np.float32(12)
pb_tensor = ser.tensor_to_protobuf(orig_tensor)
py_tensor = ser.pb_to_tensor(pb_tensor)
np.testing.assert_allclose(orig_tensor, py_tensor)
def test_op_to_protobuf():
axis = ng.make_axis(name='C', length=2)
axes = ng.make_axes([axis])
orig_op = ng.placeholder(axes)
# Test attributes
orig_op.test0 = 'stringval_attr'
orig_op.test1 = [-1.0, 4]
orig_op.test2 = dict(foo=2, you='bar')
orig_op.test3 = dict()
orig_op.test4 = slice(1, 3, 5)
orig_op.test5 = slice(1, 3)
orig_op.test6 = slice(1, None, 3)
orig_op.test7 = axis
orig_op.test8 = axes
# Test metadata
orig_op.metadata['test0'] = 'stringval'
orig_op.metadata['test1'] = [1, 4.0]
orig_op.metadata['test2'] = dict(hey=1, you=4.0)
orig_op.metadata['test4'] = dict()
orig_op.metadata['test5'] = slice(1, 3, 5)
orig_op.metadata['test6'] = slice(1, 3)
orig_op.metadata['test7'] = slice(1, None, 5)
orig_op.metadata['test8'] = axis
orig_op.metadata['test9'] = axes
pb_op = ser.op_to_protobuf(orig_op)
py_op = ser.protobuf_to_op(pb_op)
assert_object_equality(py_op, orig_op)
def test_op_references():
# test op references in arbitrary attributes
orig_op = ng.placeholder(())
other_op = ng.placeholder(()).named("foo")
orig_op.op_ref = other_op
orig_op.many_op_refs = [other_op]
ser_string = ser.serialize_graph([orig_op], only_return_handle_ops=True)
py_op = ser.deserialize_graph(ser_string)[0]
assert py_op.op_ref.name.startswith('foo')
assert py_op.many_op_refs[0].name.startswith('foo')
def test_full_graph_serialization_endtoend():
base_op, simple_graph = get_simple_graph()
ser_string = ser.serialize_graph([simple_graph])
py_graph = ser.deserialize_graph(ser_string)
orig_graph = Op.all_op_references([simple_graph])
# This is actually overkill since the checks of the leaf nodes will recursively
# check equality up the graph, but we also want to make sure the full set of nodes
# returned is equal
for o1, o2 in zip(sorted(py_graph, key=lambda x: x.uuid),
sorted(orig_graph, key=lambda x: x.uuid)):
assert_object_equality(o1, o2)
def test_op_handle_selection():
"""
When serializing graphs, we can optionally add metadata to
those nodes we pass in, and return only those nodes when deserializing.
This is useful for ngraph transparent testing since it is common in
ngraph to use the final op as the 'handle' to the entire graph.
"""
base_op, simple_graph = get_simple_graph()
ser_string = ser.serialize_graph([simple_graph], only_return_handle_ops=True)
py_graph = ser.deserialize_graph(ser_string)
assert len(py_graph) == 1
assert_object_equality(simple_graph, py_graph[0])
def test_ser_pass():
_, graph = get_simple_graph()
ser_pass = SerializationPass('mypass_token')
fname = ser_pass.tmpfile.name
ser_pass.do_pass(ops=[graph])
assert os.path.getsize(fname) > 0
os.unlink(fname)
def test_hetr_send_recv_graph_serialization():
"""
test serializing send/recv ops defined in comm_nodes for hetr communication
"""
z, recv_x, recv_x_plus_one, send_x, x_plus_one, from_node, send_x_plus_one = \
create_send_recv_graph()
ser_string = ser.serialize_graph([z])
py_graph = ser.deserialize_graph(ser_string)
orig_graph = Op.all_op_references([z])
for o1, o2 in zip(sorted(py_graph, key=lambda x: x.uuid),
sorted(orig_graph, key=lambda x: x.uuid)):
assert_object_equality(o1, o2)
def test_all_op_references():
base_op, simple_graph = get_simple_graph()
leaf_all_ops = Op.all_op_references([simple_graph])
assert base_op in leaf_all_ops
assert simple_graph in leaf_all_ops
base_all_ops = Op.all_op_references([base_op])
assert base_op in base_all_ops
assert simple_graph not in base_all_ops
| 34.704433
| 93
| 0.690277
|
c672aef23bbfc0d5ba55d84917d192681a65c318
| 589
|
py
|
Python
|
manti_by/apps/blog/migrations/0014_auto_20180619_1106.py
|
manti-by/m2
|
ee2d2bad412c265962675c94dbfd29cdec07910c
|
[
"BSD-3-Clause"
] | 2
|
2017-09-07T09:28:29.000Z
|
2018-04-10T03:03:32.000Z
|
manti_by/apps/blog/migrations/0014_auto_20180619_1106.py
|
manti-by/m2
|
ee2d2bad412c265962675c94dbfd29cdec07910c
|
[
"BSD-3-Clause"
] | 11
|
2021-03-23T13:59:39.000Z
|
2022-02-02T10:16:58.000Z
|
manti_by/apps/blog/migrations/0014_auto_20180619_1106.py
|
manti-by/Manti.by
|
ee2d2bad412c265962675c94dbfd29cdec07910c
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 1.11.13 on 2018-06-19 11:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("blog", "0013_auto_20180525_1219")]
operations = [
migrations.AddField(
model_name="post",
name="updated",
field=models.DateTimeField(auto_now=True, verbose_name="Updated, UTC"),
),
migrations.AlterField(
model_name="post",
name="created",
field=models.DateTimeField(auto_now_add=True, verbose_name="Created, UTC"),
),
]
| 26.772727
| 87
| 0.611205
|
d7d4c7ad012e3b43e9009c14cb452f44156e6b61
| 2,007
|
py
|
Python
|
LogOX/main.py
|
MrEluzium/LogOX
|
fb1242be0bab223efa7837e9d319232f7a0cd80e
|
[
"Apache-2.0"
] | 3
|
2022-01-22T18:41:39.000Z
|
2022-02-05T13:52:11.000Z
|
LogOX/main.py
|
MrEluzium/LogOX
|
fb1242be0bab223efa7837e9d319232f7a0cd80e
|
[
"Apache-2.0"
] | null | null | null |
LogOX/main.py
|
MrEluzium/LogOX
|
fb1242be0bab223efa7837e9d319232f7a0cd80e
|
[
"Apache-2.0"
] | 1
|
2022-02-23T09:41:06.000Z
|
2022-02-23T09:41:06.000Z
|
"""
Copyright 2022 Artem Voronov <mreluzium@mail.ru>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import logging
from logging import handlers
from datetime import datetime
from pathlib import Path
from LogOX.formatter import FormatterOX
_FORMATTER = FormatterOX()
def _get_console_handler():
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(_FORMATTER)
return console_handler
def _get_file_handler():
try:
Path("log").mkdir(exist_ok=True)
except OSError:
raise
else:
file_handler = handlers.TimedRotatingFileHandler(r'log\{:%Y-%m-%d}.log'.format(datetime.now()), when='D')
file_handler.setFormatter(_FORMATTER)
return file_handler
def get_logger(name=None, level=logging.INFO):
if not name:
# Getting name of module which the logger is being created
# __import__('inspect').stack()[1].filename returns caller's full file path
# .split("\\")[-1] returns module's filename
# [:-3] removes .py at the end of filename
name = __import__('inspect').stack()[1].filename.split("\\")[-1][:-3]
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(_get_console_handler())
try:
logger.addHandler(_get_file_handler())
except OSError:
logger.exception("Can't create log file. Console-only logging.")
logger.propagate = False # Give access to multiply handlers logging
return logger
| 31.857143
| 113
| 0.709018
|
bd9b1bf39d0b5c82f80912c43597a171e265792b
| 509
|
py
|
Python
|
docs_src/async_methods/query_single/example.py
|
samnimoh/pydapper
|
28e02a82339c4373aae043483868c84946e4aca9
|
[
"MIT"
] | 19
|
2022-01-19T15:30:57.000Z
|
2022-03-10T15:15:56.000Z
|
docs_src/async_methods/query_single/example.py
|
samnimoh/pydapper
|
28e02a82339c4373aae043483868c84946e4aca9
|
[
"MIT"
] | 17
|
2022-01-19T06:23:35.000Z
|
2022-03-06T17:09:25.000Z
|
docs_src/async_methods/query_single/example.py
|
samnimoh/pydapper
|
28e02a82339c4373aae043483868c84946e4aca9
|
[
"MIT"
] | 2
|
2022-02-05T02:18:02.000Z
|
2022-02-17T08:39:54.000Z
|
import asyncio
import datetime
from dataclasses import dataclass
from pydapper import connect_async
@dataclass
class Task:
id: int
description: str
due_date: datetime.date
owner_id: int
async def main():
async with connect_async() as commands:
task = await commands.query_single_async("select * from task where id = 1", model=Task)
print(task)
# Task(id=1, description='Set up a test database', due_date=datetime.date(2021, 12, 31), owner_id=1)
asyncio.run(main())
| 20.36
| 104
| 0.711198
|
c9d9d70a62c52580b221ccfbcf8cb99754d78f51
| 6,296
|
py
|
Python
|
locobot/test/test_interpreter_mock.py
|
Dhiraj100892/droidlet
|
e4ea578672531524552b6ff021165fc9371b0ec8
|
[
"MIT"
] | null | null | null |
locobot/test/test_interpreter_mock.py
|
Dhiraj100892/droidlet
|
e4ea578672531524552b6ff021165fc9371b0ec8
|
[
"MIT"
] | null | null | null |
locobot/test/test_interpreter_mock.py
|
Dhiraj100892/droidlet
|
e4ea578672531524552b6ff021165fc9371b0ec8
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import unittest
import sys
import os
BASE_DIR = os.path.join(os.path.dirname(__file__), "../../")
sys.path.append(BASE_DIR)
import numpy as np
from base_fakeagent_test_case import BaseFakeAgentTestCase
import rotation
from all_test_commands import MOVE_COMMANDS, GROUND_TRUTH_PARSES
from test_utils import assert_turn_degree
CUBE1 = (9, 0, 4)
CUBE2 = (9, 0, 10)
TOY = (2, 0, 4)
CAMERA_HEIGHT = 1.0
def add_two_cubes(test):
test.agent.add_object(CUBE1, tags=["cube", "_physical_object"])
test.agent.add_object(CUBE2, tags=["cube", "_physical_object"])
def add_a_toy(test):
test.agent.add_object(TOY, tags=["toy", "_physical_object"])
# test.set_looking_at(test.cube_right[0][0])
class MoveAbsoluteTest(BaseFakeAgentTestCase):
"""Test for Move."""
def assert_move(self, reldir, steps, changes):
old_pos = changes[0]["agent"]["pos"]
new_pos = changes[1]["agent"]["pos"]
start_base_yaw = changes[0]["agent"]["base_yaw"]
reldir_vec = rotation.DIRECTIONS[reldir]
dir_vec = rotation.transform(reldir_vec, start_base_yaw, 0, inverted=True)
dir_vec = np.array([dir_vec[0], dir_vec[2]], dtype="float32")
tocheck_pos = np.around(old_pos + steps * dir_vec, 2)
self.assertEqual(new_pos[0], tocheck_pos[0])
self.assertEqual(new_pos[1], tocheck_pos[1])
def setUp(self):
super().setUp()
def test_move_forward(self):
d = MOVE_COMMANDS["move_forward"]
changes = self.handle_logical_form(d)
self.assert_move("FRONT", 1, changes)
def test_move_right(self):
d = GROUND_TRUTH_PARSES["go right 3 feet"]
changes = self.handle_logical_form(d)
self.assert_move("RIGHT", 3, changes)
def test_move_left(self):
d = GROUND_TRUTH_PARSES["go left 4 feet"]
changes = self.handle_logical_form(d)
self.assert_move("LEFT", 4, changes)
def test_move_coordinates(self):
d = MOVE_COMMANDS["move to -7 0 -8"]
target = np.array((-7, -8))
self.handle_logical_form(d)
# check that agent moved
self.assertLessEqual(np.linalg.norm(self.agent.pos - target), 1)
def test_action_sequence_order(self):
d = MOVE_COMMANDS["move to 3 0 2 then 7 0 7"]
target = np.array((7, 7))
print(d)
self.handle_logical_form(d)
print(self.agent.pos)
self.assertLessEqual(np.linalg.norm(self.agent.pos - target), 1)
def test_stop(self):
# start moving
target = np.array((-7, -8))
d = MOVE_COMMANDS["move to -7 0 -8"]
self.handle_logical_form(d, max_steps=5)
# stop
d = MOVE_COMMANDS["stop"]
self.handle_logical_form(d)
# assert that move did not complete
self.assertGreater(np.linalg.norm(self.agent.pos - target), 1)
class MoveRefObjectsTest(BaseFakeAgentTestCase):
def setUp(self):
super().setUp()
add_two_cubes(self)
# do this one after we have players
# def test_move_here(self):
# d = MOVE_COMMANDS["move here"]
# self.handle_logical_form(d)
#
# # check that agent moved
# self.assertLessEqual(euclid_dist(self.agent.pos, self.get_speaker_pos()), 1)
def test_go_to_the_cube(self):
d = MOVE_COMMANDS["go to the cube"]
self.handle_logical_form(d)
assert np.abs(self.agent.pos[1] - CUBE1[2]) < 1 or np.abs(self.agent.pos[1] - CUBE2[2]) < 1
def test_between_cubes(self):
d = MOVE_COMMANDS["go between the cubes"]
self.handle_logical_form(d)
print(self.agent.pos)
assert self.agent.pos[1] > CUBE1[2] and self.agent.pos[1] < CUBE2[2]
class GetBringTest(BaseFakeAgentTestCase):
def setUp(self):
super().setUp()
add_a_toy(self)
def test_get_toy(self):
d = MOVE_COMMANDS["get the toy"]
self.handle_logical_form(d)
d = MOVE_COMMANDS["move to -7 0 -8"]
self.handle_logical_form(d)
p = self.agent.world.objects[0]["pos"]
ap = self.agent.pos
assert (np.abs(ap[0] - p[0]) + np.abs(ap[1] - p[2])) < 1
class TurnTest(BaseFakeAgentTestCase):
"""Tests turn.
Left turn is positive yaw, right turn is negative yaw.
"""
def setUp(self):
super().setUp()
def test_turn_right(self):
d = GROUND_TRUTH_PARSES["turn right 90 degrees"]
changes = self.handle_logical_form(d)
old_yaw = changes[0]["agent"]["base_yaw"]
new_yaw = changes[1]["agent"]["base_yaw"]
assert_turn_degree(old_yaw, new_yaw, -90)
def test_turn_left(self):
d = GROUND_TRUTH_PARSES["turn left 90 degrees"]
changes = self.handle_logical_form(d)
old_yaw = changes[0]["agent"]["base_yaw"]
new_yaw = changes[1]["agent"]["base_yaw"]
assert_turn_degree(old_yaw, new_yaw, 90)
class DanceTest(BaseFakeAgentTestCase):
"""Tests for dance."""
def setUp(self):
super().setUp()
self.agent.add_object(CUBE1, tags=["cube"])
self.agent.world.players = []
def test_dance(self):
# just checks for exceptions
d = GROUND_TRUTH_PARSES["wave"]
self.handle_logical_form(d)
def test_look_at_cube(self):
d = MOVE_COMMANDS["look at the cube"]
self.handle_logical_form(d)
camera_pos = [self.agent.pos[0], CAMERA_HEIGHT, self.agent.pos[1]]
loc = self.agent.world.get_line_of_sight(
camera_pos, self.agent.base_yaw + self.agent.pan, self.agent.pitch
)
self.assertLessEqual(
np.linalg.norm(loc - np.array(self.agent.world.objects[0]["pos"])), 0.01
)
d = MOVE_COMMANDS["move to -7 0 -8"]
self.handle_logical_form(d)
d = MOVE_COMMANDS["look at the cube"]
self.handle_logical_form(d)
camera_pos = [self.agent.pos[0], CAMERA_HEIGHT, self.agent.pos[1]]
loc = self.agent.world.get_line_of_sight(
camera_pos, self.agent.base_yaw + self.agent.pan, self.agent.pitch
)
self.assertLessEqual(
np.linalg.norm(loc - np.array(self.agent.world.objects[0]["pos"])), 0.01
)
if __name__ == "__main__":
unittest.main()
| 30.563107
| 99
| 0.629447
|
47b2c4864606d4566c19f7b41f90b7937c7127b5
| 374
|
py
|
Python
|
apps/asset/serializer/idc.py
|
plsof/tabops_api
|
39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8
|
[
"MIT"
] | 1
|
2019-07-31T07:34:38.000Z
|
2019-07-31T07:34:38.000Z
|
apps/asset/serializer/idc.py
|
plsof/tabops_api
|
39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8
|
[
"MIT"
] | 9
|
2019-12-05T00:39:29.000Z
|
2022-02-10T14:13:29.000Z
|
apps/asset/serializer/idc.py
|
plsof/tabops_api
|
39f5d2fd5158ae0c22e43ab6ff7e2b07a68a62d8
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from ..models import Idc
class IdcSerializer(serializers.ModelSerializer):
isp_name = serializers.SerializerMethodField()
class Meta:
model = Idc
fields = ['id', 'name', 'address', 'isp', 'bandwidth', 'ip_range', 'comment', 'isp_name']
def get_isp_name(self, obj):
return obj.get_isp_display()
| 24.933333
| 97
| 0.681818
|
fc1cd0da6bddf183b034db63f73a3ae6d2bcb57e
| 261
|
py
|
Python
|
Leetcode/0078. Subsets/0078.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
Leetcode/0078. Subsets/0078.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
Leetcode/0078. Subsets/0078.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
ans = []
def dfs(s: int, path: List[int]) -> None:
ans.append(path)
for i in range(s, len(nums)):
dfs(i + 1, path + [nums[i]])
dfs(0, [])
return ans
| 20.076923
| 56
| 0.521073
|
63690e89fc7c44fce7014ed4d889f0d77ae5360c
| 7,457
|
py
|
Python
|
tests/unit_tests/test_simulator.py
|
larioandr/pydesim
|
4b04c07ddff71be0771270301c90cffb91886083
|
[
"MIT"
] | 1
|
2020-09-08T11:28:21.000Z
|
2020-09-08T11:28:21.000Z
|
tests/unit_tests/test_simulator.py
|
larioandr/pydesim
|
4b04c07ddff71be0771270301c90cffb91886083
|
[
"MIT"
] | null | null | null |
tests/unit_tests/test_simulator.py
|
larioandr/pydesim
|
4b04c07ddff71be0771270301c90cffb91886083
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch, ANY, Mock
import pytest
from pydesim import simulate, Model
def test_simulate_signature():
ret = simulate({}, init=None, fin=None, handlers={}, stime_limit=1)
assert ret.stime == 0
assert ret.data == {}
def test_simulate_executes_init_and_fin():
"""In this test we validate that `simulate()` calls init and fin methods.
"""
data = []
def init(sim):
sim.data.append(1)
def fin(sim):
sim.data.append('A')
ret = simulate(data, init=init, fin=fin, handlers={}, stime_limit=1)
assert ret.stime == 0
assert ret.data == [1, 'A']
def test_simulate_accepts_classes_with_create_method():
class ModelData:
default_value = 'correct value'
def __init__(self, value='incorrect value'):
self.value = value
@classmethod
def create(cls):
return ModelData(cls.default_value)
ret_for_default_value = simulate(ModelData)
ModelData.default_value = 'some new value'
ret_for_updated_value = simulate(ModelData)
assert ret_for_default_value.data.value == 'correct value'
assert ret_for_updated_value.data.value == 'some new value'
def test_simulate_accept_classes_without_create():
class ModelData:
default_value = 'default value'
def __init__(self):
self.value = ModelData.default_value
ret_for_default_value = simulate(ModelData)
ModelData.default_value = 'new value'
ret_for_updated_value = simulate(ModelData)
assert ret_for_default_value.data.value == 'default value'
assert ret_for_updated_value.data.value == 'new value'
def test_scheduled_methods_are_called_in_chain():
def write_some_data(sim, value='first'):
sim.data.append(value)
if value == 'first':
sim.schedule(3, write_some_data, args=('second',))
elif value == 'second':
sim.schedule(10, write_some_data, kwargs={'value': 'third'})
def init(sim):
sim.schedule(1, write_some_data)
ret = simulate([], init=init)
assert ret.stime == 14
assert ret.data == ['first', 'second', 'third']
def test_handlers_can_be_passed_and_accessed_via_sim_handlers_field():
def f1(sim):
sim.data.append(1)
sim.schedule(0, sim.handlers.get('second'))
def f2(sim):
sim.data.append(2)
sim.schedule(0, sim.handlers.third)
def f3(sim):
sim.data.append(3)
def init(sim):
sim.schedule(0, sim.handlers['first'])
ret = simulate([], init=init, handlers=dict(first=f1, second=f2, third=f3))
assert ret.data == [1, 2, 3]
def test_schedule_multiple_events():
def handler(sim):
sim.data.append(sim.stime)
def init(sim):
sim.schedule(1, handler)
sim.schedule(2, handler)
ret = simulate([], init=init)
assert ret.data == [1, 2]
def test_schedule_orders_events_by_time():
def f(sim):
sim.data.append(f'{int(sim.stime)}F')
sim.schedule(1.0, g)
def g(sim):
sim.data.append(f'{int(sim.stime)}G')
def h(sim):
sim.data.append(f'{int(sim.stime)}H')
def init(sim):
sim.schedule(1.0, f)
sim.schedule(4.0, f)
sim.schedule(3.0, h)
ret = simulate([], init)
assert ret.data == ['1F', '2G', '3H', '4F', '5G']
def test_schedule_accept_none_handler_by_changing_only_time():
def init(sim):
sim.schedule(5)
ret = simulate([], init=init)
assert ret.stime == 5
def test_schedule_negative_delays_not_allowed():
def invalid_init(sim):
sim.schedule(-1)
def invalid_handler(sim):
sim.schedule(-0.1)
def valid_init(sim):
sim.schedule(10, invalid_handler)
with pytest.raises(ValueError) as excinfo1:
simulate([], init=invalid_init)
with pytest.raises(ValueError) as excinfo2:
simulate([], init=valid_init)
assert "negative delay" in str(excinfo1.value).lower()
assert "negative delay" in str(excinfo2.value).lower()
def test_stime_is_readonly():
def valid_handler(sim):
sim.data.append('OK')
def valid_init(sim):
sim.schedule(1, sim.handlers.handler)
with pytest.raises(AttributeError) as excinfo1:
def invalid_init(sim):
sim.stime = 10
simulate([], init=invalid_init)
with pytest.raises(AttributeError) as excinfo2:
def invalid_handler(sim):
sim.stime += 1
simulate([], init=valid_init, handlers={'handler': invalid_handler})
with pytest.raises(AttributeError) as excinfo3:
def invalid_fin(sim):
sim.stime -= 1
simulate([], init=valid_init, fin=invalid_fin, handlers={
'handler': valid_handler
})
assert 'set attribute' in str(excinfo1.value)
assert 'set attribute' in str(excinfo2.value)
assert 'set attribute' in str(excinfo3.value)
def test_sim_provide_cancel_operation():
def init(sim):
eid = sim.schedule(1)
sim.cancel(eid)
ret = simulate([], init)
assert ret.stime == 0
assert ret.num_events == 0
def test_simulate_with_stime_limit():
def f(sim):
sim.data.append('f')
sim.schedule(2, g)
def g(sim):
sim.data.append('g')
def init(sim):
sim.schedule(1, f)
sim.data.append('init')
ret1 = simulate([], init, stime_limit=0.5)
ret2 = simulate([], init, stime_limit=1)
ret3 = simulate([], init, stime_limit=2)
ret4 = simulate([], init, stime_limit=3)
assert ret1.data == ['init']
assert ret2.data == ['init', 'f']
assert ret3.data == ['init', 'f']
assert ret4.data == ['init', 'f', 'g']
assert ret1.num_events == 0
assert ret2.num_events == 1
assert ret3.num_events == 1
assert ret4.num_events == 2
assert ret1.stime == 1
assert ret2.stime == 3
assert ret3.stime == 3
assert ret4.stime == 3
def test_simulate_accepts_params():
params = {'x': 10, 'y': 'hello'}
with patch('pydesim.simulator.Simulator') as SimulatorMock:
simulate([], params=params)
SimulatorMock.assert_called_with(ANY, [], ANY, params, ANY)
def test_params_accessible_via_getattr_and_getitem():
params = {'x': 10, 'y': 'hello'}
def init(sim):
assert sim.params.x == 10
assert sim.params['x'] == 10
assert sim.params.y == 'hello'
simulate([], init=init, params=params)
def test_array_returned_when_params_are_given_in_array():
params = [{'x': 1}, {'x': 2}]
data_class = Mock()
result = simulate(data_class, params=params)
assert result[0].params.as_dict() == {'x': 1}
assert result[1].params.as_dict() == {'x': 2}
def test_simulate_calls_constructor_without_parameters_but_with_sim():
with patch('pydesim.simulator.Simulator') as SimulatorMock:
class SomeModel(Model):
def __init__(self, sim):
assert isinstance(sim, SimulatorMock)
super().__init__(self, sim)
assert sim.params.x == 10
assert sim.params.y == 'hello'
result = simulate(SomeModel, params={'x': 10, 'y': 'hello'})
| 26.727599
| 80
| 0.603326
|
f88d87f82450aa8525f098a1687cfb94ccc080d3
| 275
|
py
|
Python
|
bstnode.py
|
Vlada04/bst
|
bda30f5e5bf55426c996b49f018aed5e9919e169
|
[
"MIT"
] | null | null | null |
bstnode.py
|
Vlada04/bst
|
bda30f5e5bf55426c996b49f018aed5e9919e169
|
[
"MIT"
] | null | null | null |
bstnode.py
|
Vlada04/bst
|
bda30f5e5bf55426c996b49f018aed5e9919e169
|
[
"MIT"
] | null | null | null |
"""
File: bstnode.py
Author: Ken Lambert
"""
class BSTNode(object):
"""Represents a node for a linked binary search tree."""
def __init__(self, data, left = None, right = None):
self.data = data
self.left = left
self.right = right
| 22.916667
| 61
| 0.589091
|
f22e11f52ab9d95b91cc4fb8d4ea20a8c6e39356
| 272
|
py
|
Python
|
tests/prime_number/sieve_of_eratosthenes/std.py
|
kagemeka/python
|
486ce39d97360b61029527bacf00a87fdbcf552c
|
[
"MIT"
] | null | null | null |
tests/prime_number/sieve_of_eratosthenes/std.py
|
kagemeka/python
|
486ce39d97360b61029527bacf00a87fdbcf552c
|
[
"MIT"
] | null | null | null |
tests/prime_number/sieve_of_eratosthenes/std.py
|
kagemeka/python
|
486ce39d97360b61029527bacf00a87fdbcf552c
|
[
"MIT"
] | null | null | null |
from kgmk.dsa.number_theory.sieve_of_eratosthenes import (
SieveOfEratosthenes,
)
def test():
fn = SieveOfEratosthenes()
a = fn(1000000)
print(a[:10])
a = fn.gpf(10000)
print(a[:10])
a = fn.lpf(10000)
print(a[:10])
if __name__ == '__main__':
test()
| 14.315789
| 58
| 0.647059
|
3edae763099237eefaba2e1b52f56533c2013a1d
| 304
|
py
|
Python
|
modules/2.79/bpy/types/BevelModifier.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/BevelModifier.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/BevelModifier.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
class BevelModifier:
angle_limit = None
edge_weight_method = None
limit_method = None
loop_slide = None
material = None
offset_type = None
profile = None
segments = None
use_clamp_overlap = None
use_only_vertices = None
vertex_group = None
width = None
| 17.882353
| 29
| 0.664474
|
7d4376004027f161aa144bd29483c01cbea342bb
| 25,412
|
py
|
Python
|
test/test_post_flag_scheduled_changes_input.py
|
launchdarkly/api-client-python
|
b72bd94fb65ac57bd95df5767aebcdaff50e5cb6
|
[
"Apache-2.0"
] | 6
|
2020-02-06T20:17:25.000Z
|
2021-12-28T20:13:34.000Z
|
test/test_post_flag_scheduled_changes_input.py
|
launchdarkly/api-client-python
|
b72bd94fb65ac57bd95df5767aebcdaff50e5cb6
|
[
"Apache-2.0"
] | 7
|
2019-02-18T21:51:47.000Z
|
2021-09-03T17:49:33.000Z
|
test/test_post_flag_scheduled_changes_input.py
|
launchdarkly/api-client-python
|
b72bd94fb65ac57bd95df5767aebcdaff50e5cb6
|
[
"Apache-2.0"
] | 6
|
2019-08-02T16:10:31.000Z
|
2021-05-23T17:47:03.000Z
|
# -*- coding: utf-8 -*-
"""
LaunchDarkly REST API
# Overview ## Authentication All REST API resources are authenticated with either [personal or service access tokens](https://docs.launchdarkly.com/home/account-security/api-access-tokens), or session cookies. Other authentication mechanisms are not supported. You can manage personal access tokens on your [Account settings](https://app.launchdarkly.com/settings/tokens) page. LaunchDarkly also has SDK keys, mobile keys, and client-side IDs that are used by our server-side SDKs, mobile SDKs, and client-side SDKs, respectively. **These keys cannot be used to access our REST API**. These keys are environment-specific, and can only perform read-only operations (fetching feature flag settings). | Auth mechanism | Allowed resources | Use cases | | ----------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------- | -------------------------------------------------- | | [Personal access tokens](https://docs.launchdarkly.com/home/account-security/api-access-tokens) | Can be customized on a per-token basis | Building scripts, custom integrations, data export | | SDK keys | Can only access read-only SDK-specific resources and the firehose, restricted to a single environment | Server-side SDKs, Firehose API | | Mobile keys | Can only access read-only mobile SDK-specific resources, restricted to a single environment | Mobile SDKs | | Client-side ID | Single environment, only flags marked available to client-side | Client-side JavaScript | > #### Keep your access tokens and SDK keys private > > Access tokens should _never_ be exposed in untrusted contexts. Never put an access token in client-side JavaScript, or embed it in a mobile application. LaunchDarkly has special mobile keys that you can embed in mobile apps. If you accidentally expose an access token or SDK key, you can reset it from your [Account Settings](https://app.launchdarkly.com/settings#/tokens) page. > > The client-side ID is safe to embed in untrusted contexts. It's designed for use in client-side JavaScript. ### Via request header The preferred way to authenticate with the API is by adding an `Authorization` header containing your access token to your requests. The value of the `Authorization` header must be your access token. Manage personal access tokens from the [Account Settings](https://app.launchdarkly.com/settings/tokens) page. ### Via session cookie For testing purposes, you can make API calls directly from your web browser. If you're logged in to the application, the API will use your existing session to authenticate calls. If you have a [role](https://docs.launchdarkly.com/home/team/built-in-roles) other than Admin, or have a [custom role](https://docs.launchdarkly.com/home/team/custom-roles) defined, you may not have permission to perform some API calls. You will receive a `401` response code in that case. > ### Modifying the Origin header causes an error > > LaunchDarkly validates that the Origin header for any API request authenticated by a session cookie matches the expected Origin header. The expected Origin header is `https://app.launchdarkly.com`. > > If the Origin header does not match what's expected, LaunchDarkly returns an error. This error can prevent the LaunchDarkly app from working correctly. > > Any browser extension that intentionally changes the Origin header can cause this problem. For example, the `Allow-Control-Allow-Origin: *` Chrome extension changes the Origin header to `http://evil.com` and causes the app to fail. > > To prevent this error, do not modify your Origin header. > > LaunchDarkly does not require origin matching when authenticating with an access token, so this issue does not affect normal API usage. ## Representations All resources expect and return JSON response bodies. Error responses will also send a JSON body. Read [Errors](#section/Errors) for a more detailed description of the error format used by the API. In practice this means that you always get a response with a `Content-Type` header set to `application/json`. In addition, request bodies for `PUT`, `POST`, `REPORT` and `PATCH` requests must be encoded as JSON with a `Content-Type` header set to `application/json`. ### Summary and detailed representations When you fetch a list of resources, the response includes only the most important attributes of each resource. This is a _summary representation_ of the resource. When you fetch an individual resource (for example, a single feature flag), you receive a _detailed representation_ containing all of the attributes of the resource. The best way to find a detailed representation is to follow links. Every summary representation includes a link to its detailed representation. ### Links and addressability The best way to navigate the API is by following links. These are attributes in representations that link to other resources. The API always uses the same format for links: - Links to other resources within the API are encapsulated in a `_links` object. - If the resource has a corresponding link to HTML content on the site, it is stored in a special `_site` link. Each link has two attributes: an href (the URL) and a type (the content type). For example, a feature resource might return the following: ```json { \"_links\": { \"parent\": { \"href\": \"/api/features\", \"type\": \"application/json\" }, \"self\": { \"href\": \"/api/features/sort.order\", \"type\": \"application/json\" } }, \"_site\": { \"href\": \"/features/sort.order\", \"type\": \"text/html\" } } ``` From this, you can navigate to the parent collection of features by following the `parent` link, or navigate to the site page for the feature by following the `_site` link. Collections are always represented as a JSON object with an `items` attribute containing an array of representations. Like all other representations, collections have `_links` defined at the top level. Paginated collections include `first`, `last`, `next`, and `prev` links containing a URL with the respective set of elements in the collection. ## Updates Resources that accept partial updates use the `PATCH` verb, and support the [JSON Patch](https://datatracker.ietf.org/doc/html/rfc6902) format. Some resources also support the [JSON Merge Patch](https://datatracker.ietf.org/doc/html/rfc7386) format. In addition, some resources support optional comments that can be submitted with updates. Comments appear in outgoing webhooks, the audit log, and other integrations. ### Updates via JSON Patch [JSON Patch](https://datatracker.ietf.org/doc/html/rfc6902) is a way to specify the modifications to perform on a resource. For example, in this feature flag representation: ```json { \"name\": \"New recommendations engine\", \"key\": \"engine.enable\", \"description\": \"This is the description\", ... } ``` You can change the feature flag's description with the following patch document: ```json [{ \"op\": \"replace\", \"path\": \"/description\", \"value\": \"This is the new description\" }] ``` JSON Patch documents are always arrays. You can specify multiple modifications to perform in a single request. You can also test that certain preconditions are met before applying the patch: ```json [ { \"op\": \"test\", \"path\": \"/version\", \"value\": 10 }, { \"op\": \"replace\", \"path\": \"/description\", \"value\": \"The new description\" } ] ``` The above patch request tests whether the feature flag's `version` is `10`, and if so, changes the feature flag's description. Attributes that aren't editable, like a resource's `_links`, have names that start with an underscore. ### Updates via JSON Merge Patch The API also supports the [JSON Merge Patch](https://datatracker.ietf.org/doc/html/rfc7386) format, as well as the [Update feature flag](/tag/Feature-flags#operation/patchFeatureFlag) resource. JSON Merge Patch is less expressive than JSON Patch but in many cases, it is simpler to construct a merge patch document. For example, you can change a feature flag's description with the following merge patch document: ```json { \"description\": \"New flag description\" } ``` ### Updates with comments You can submit optional comments with `PATCH` changes. The [Update feature flag](/tag/Feature-flags#operation/patchFeatureFlag) resource supports comments. To submit a comment along with a JSON Patch document, use the following format: ```json { \"comment\": \"This is a comment string\", \"patch\": [{ \"op\": \"replace\", \"path\": \"/description\", \"value\": \"The new description\" }] } ``` To submit a comment along with a JSON Merge Patch document, use the following format: ```json { \"comment\": \"This is a comment string\", \"merge\": { \"description\": \"New flag description\" } } ``` ### Updates via semantic patches The API also supports the Semantic patch format. A semantic `PATCH` is a way to specify the modifications to perform on a resource as a set of executable instructions. JSON Patch uses paths and a limited set of operations to describe how to transform the current state of the resource into a new state. Semantic patch allows you to be explicit about intent using precise, custom instructions. In many cases, semantic patch instructions can also be defined independently of the current state of the resource. This can be useful when defining a change that may be applied at a future date. For example, in this feature flag configuration in environment Production: ```json { \"name\": \"Alternate sort order\", \"kind\": \"boolean\", \"key\": \"sort.order\", ... \"environments\": { \"production\": { \"on\": true, \"archived\": false, \"salt\": \"c29ydC5vcmRlcg==\", \"sel\": \"8de1085cb7354b0ab41c0e778376dfd3\", \"lastModified\": 1469131558260, \"version\": 81, \"targets\": [ { \"values\": [ \"Gerhard.Little@yahoo.com\" ], \"variation\": 0 }, { \"values\": [ \"1461797806429-33-861961230\", \"438580d8-02ee-418d-9eec-0085cab2bdf0\" ], \"variation\": 1 } ], \"rules\": [], \"fallthrough\": { \"variation\": 0 }, \"offVariation\": 1, \"prerequisites\": [], \"_site\": { \"href\": \"/default/production/features/sort.order\", \"type\": \"text/html\" } } } } ``` You can add a date you want a user to be removed from the feature flag's user targets. For example, โremove user 1461797806429-33-861961230 from the user target for variation 0 on the Alternate sort order flag in the production environment on Wed Jul 08 2020 at 15:27:41 pmโ. This is done using the following: ```json { \"comment\": \"update expiring user targets\", \"instructions\": [ { \"kind\": \"removeExpireUserTargetDate\", \"userKey\": \"userKey\", \"variationId\": \"978d53f9-7fe3-4a63-992d-97bcb4535dc8\" }, { \"kind\": \"updateExpireUserTargetDate\", \"userKey\": \"userKey2\", \"variationId\": \"978d53f9-7fe3-4a63-992d-97bcb4535dc8\", \"value\": 1587582000000 }, { \"kind\": \"addExpireUserTargetDate\", \"userKey\": \"userKey3\", \"variationId\": \"978d53f9-7fe3-4a63-992d-97bcb4535dc8\", \"value\": 1594247266386 } ] } ``` Here is another example. In this feature flag configuration: ```json { \"name\": \"New recommendations engine\", \"key\": \"engine.enable\", \"environments\": { \"test\": { \"on\": true } } } ``` You can change the feature flag's description with the following patch document as a set of executable instructions. For example, โadd user X to targets for variation Y and remove user A from targets for variation B for test flagโ: ```json { \"comment\": \"\", \"instructions\": [ { \"kind\": \"removeUserTargets\", \"values\": [\"438580d8-02ee-418d-9eec-0085cab2bdf0\"], \"variationId\": \"852cb784-54ff-46b9-8c35-5498d2e4f270\" }, { \"kind\": \"addUserTargets\", \"values\": [\"438580d8-02ee-418d-9eec-0085cab2bdf0\"], \"variationId\": \"1bb18465-33b6-49aa-a3bd-eeb6650b33ad\" } ] } ``` > ### Supported semantic patch API endpoints > > - [Update feature flag](/tag/Feature-flags#operation/patchFeatureFlag) > - [Update expiring user targets on feature flag](/tag/Feature-flags#operation/patchExpiringUserTargets) > - [Update expiring user target for flags](/tag/User-settings#operation/patchExpiringFlagsForUser) > - [Update expiring user targets on segment](/tag/Segments#operation/patchExpiringUserTargetsForSegment) ## Errors The API always returns errors in a common format. Here's an example: ```json { \"code\": \"invalid_request\", \"message\": \"A feature with that key already exists\", \"id\": \"30ce6058-87da-11e4-b116-123b93f75cba\" } ``` The general class of error is indicated by the `code`. The `message` is a human-readable explanation of what went wrong. The `id` is a unique identifier. Use it when you're working with LaunchDarkly support to debug a problem with a specific API call. ### HTTP Status - Error Response Codes | Code | Definition | Desc. | Possible Solution | | ---- | ----------------- | ------------------------------------------------------------------------------------------- | ---------------------------------------------------------------- | | 400 | Bad Request | A request that fails may return this HTTP response code. | Ensure JSON syntax in request body is correct. | | 401 | Unauthorized | User doesn't have permission to an API call. | Ensure your SDK key is good. | | 403 | Forbidden | User does not have permission for operation. | Ensure that the user or access token has proper permissions set. | | 409 | Conflict | The API request could not be completed because it conflicted with a concurrent API request. | Retry your request. | | 429 | Too many requests | See [Rate limiting](/#section/Rate-limiting). | Wait and try again later. | ## CORS The LaunchDarkly API supports Cross Origin Resource Sharing (CORS) for AJAX requests from any origin. If an `Origin` header is given in a request, it will be echoed as an explicitly allowed origin. Otherwise, a wildcard is returned: `Access-Control-Allow-Origin: *`. For more information on CORS, see the [CORS W3C Recommendation](http://www.w3.org/TR/cors). Example CORS headers might look like: ```http Access-Control-Allow-Headers: Accept, Content-Type, Content-Length, Accept-Encoding, Authorization Access-Control-Allow-Methods: OPTIONS, GET, DELETE, PATCH Access-Control-Allow-Origin: * Access-Control-Max-Age: 300 ``` You can make authenticated CORS calls just as you would make same-origin calls, using either [token or session-based authentication](#section/Authentication). If youโre using session auth, you should set the `withCredentials` property for your `xhr` request to `true`. You should never expose your access tokens to untrusted users. ## Rate limiting We use several rate limiting strategies to ensure the availability of our APIs. Rate-limited calls to our APIs will return a `429` status code. Calls to our APIs will include headers indicating the current rate limit status. The specific headers returned depend on the API route being called. The limits differ based on the route, authentication mechanism, and other factors. Routes that are not rate limited may not contain any of the headers described below. > ### Rate limiting and SDKs > > LaunchDarkly SDKs are never rate limited and do not use the API endpoints defined here. LaunchDarkly uses a different set of approaches, including streaming/server-sent events and a global CDN, to ensure availability to the routes used by LaunchDarkly SDKs. > > The client-side ID is safe to embed in untrusted contexts. It's designed for use in client-side JavaScript. ### Global rate limits Authenticated requests are subject to a global limit. This is the maximum number of calls that can be made to the API per ten seconds. All personal access tokens on the account share this limit, so exceeding the limit with one access token will impact other tokens. Calls that are subject to global rate limits will return the headers below: | Header name | Description | | ------------------------------ | -------------------------------------------------------------------------------- | | `X-Ratelimit-Global-Remaining` | The maximum number of requests the account is permitted to make per ten seconds. | | `X-Ratelimit-Reset` | The time at which the current rate limit window resets in epoch milliseconds. | We do not publicly document the specific number of calls that can be made globally. This limit may change, and we encourage clients to program against the specification, relying on the two headers defined above, rather than hardcoding to the current limit. ### Route-level rate limits Some authenticated routes have custom rate limits. These also reset every ten seconds. Any access tokens hitting the same route share this limit, so exceeding the limit with one access token may impact other tokens. Calls that are subject to route-level rate limits will return the headers below: | Header name | Description | | ----------------------------- | ----------------------------------------------------------------------------------------------------- | | `X-Ratelimit-Route-Remaining` | The maximum number of requests to the current route the account is permitted to make per ten seconds. | | `X-Ratelimit-Reset` | The time at which the current rate limit window resets in epoch milliseconds. | A _route_ represents a specific URL pattern and verb. For example, the [Delete environment](/tag/Environments#operation/deleteEnvironment) endpoint is considered a single route, and each call to delete an environment counts against your route-level rate limit for that route. We do not publicly document the specific number of calls that can be made to each endpoint per ten seconds. These limits may change, and we encourage clients to program against the specification, relying on the two headers defined above, rather than hardcoding to the current limits. ### IP-based rate limiting We also employ IP-based rate limiting on some API routes. If you hit an IP-based rate limit, your API response will include a `Retry-After` header indicating how long to wait before re-trying the call. Clients must wait at least `Retry-After` seconds before making additional calls to our API, and should employ jitter and backoff strategies to avoid triggering rate limits again. ## OpenAPI (Swagger) We have a [complete OpenAPI (Swagger) specification](https://app.launchdarkly.com/api/v2/openapi.json) for our API. You can use this specification to generate client libraries to interact with our REST API in your language of choice. This specification is supported by several API-based tools such as Postman and Insomnia. In many cases, you can directly import our specification to ease use in navigating the APIs in the tooling. ## Client libraries We auto-generate multiple client libraries based on our OpenAPI specification. To learn more, visit [GitHub](https://github.com/search?q=topic%3Alaunchdarkly-api+org%3Alaunchdarkly&type=Repositories). ## Method Overriding Some firewalls and HTTP clients restrict the use of verbs other than `GET` and `POST`. In those environments, our API endpoints that use `PUT`, `PATCH`, and `DELETE` verbs will be inaccessible. To avoid this issue, our API supports the `X-HTTP-Method-Override` header, allowing clients to \"tunnel\" `PUT`, `PATCH`, and `DELETE` requests via a `POST` request. For example, if you wish to call one of our `PATCH` resources via a `POST` request, you can include `X-HTTP-Method-Override:PATCH` as a header. ## Beta resources We sometimes release new API resources in **beta** status before we release them with general availability. Resources that are in beta are still undergoing testing and development. They may change without notice, including becoming backwards incompatible. We try to promote resources into general availability as quickly as possible. This happens after sufficient testing and when we're satisfied that we no longer need to make backwards-incompatible changes. We mark beta resources with a \"Beta\" callout in our documentation, pictured below: > ### This feature is in beta > > To use this feature, pass in a header including the `LD-API-Version` key with value set to `beta`. Use this header with each call. To learn more, read [Beta resources](/#section/Beta-resources). ### Using beta resources To use a beta resource, you must include a header in the request. If you call a beta resource without this header, you'll receive a `403` response. Use this header: ``` LD-API-Version: beta ``` ## Versioning We try hard to keep our REST API backwards compatible, but we occasionally have to make backwards-incompatible changes in the process of shipping new features. These breaking changes can cause unexpected behavior if you don't prepare for them accordingly. Updates to our REST API include support for the latest features in LaunchDarkly. We also release a new version of our REST API every time we make a breaking change. We provide simultaneous support for multiple API versions so you can migrate from your current API version to a new version at your own pace. ### Setting the API version per request You can set the API version on a specific request by sending an `LD-API-Version` header, as shown in the example below: ``` LD-API-Version: 20191212 ``` The header value is the version number of the API version you'd like to request. The number for each version corresponds to the date the version was released. In the example above the version `20191212` corresponds to December 12, 2019. ### Setting the API version per access token When creating an access token, you must specify a specific version of the API to use. This ensures that integrations using this token cannot be broken by version changes. Tokens created before versioning was released have their version set to `20160426` (the version of the API that existed before versioning) so that they continue working the same way they did before versioning. If you would like to upgrade your integration to use a new API version, you can explicitly set the header described above. > ### Best practice: Set the header for every client or integration > > We recommend that you set the API version header explicitly in any client or integration you build. > > Only rely on the access token API version during manual testing. # noqa: E501
The version of the OpenAPI document: 2.0
Contact: support@launchdarkly.com
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import launchdarkly_api
from launchdarkly_api.model.instructions import Instructions
globals()['Instructions'] = Instructions
from launchdarkly_api.model.post_flag_scheduled_changes_input import PostFlagScheduledChangesInput
class TestPostFlagScheduledChangesInput(unittest.TestCase):
"""PostFlagScheduledChangesInput unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPostFlagScheduledChangesInput(self):
"""Test PostFlagScheduledChangesInput"""
# FIXME: construct object with mandatory attributes with example values
# model = PostFlagScheduledChangesInput() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 619.804878
| 24,467
| 0.670549
|
dbd970b555c1e21de460bb8d0a973a4aefe96a28
| 135,629
|
py
|
Python
|
python/cudf/cudf/core/column/string.py
|
mingwandroid/cudf
|
6a406ba8ca9918ef0757ac25c0a6acd383edc905
|
[
"Apache-2.0"
] | null | null | null |
python/cudf/cudf/core/column/string.py
|
mingwandroid/cudf
|
6a406ba8ca9918ef0757ac25c0a6acd383edc905
|
[
"Apache-2.0"
] | null | null | null |
python/cudf/cudf/core/column/string.py
|
mingwandroid/cudf
|
6a406ba8ca9918ef0757ac25c0a6acd383edc905
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
import pickle
import warnings
import cupy
import numpy as np
import pandas as pd
import pyarrow as pa
import cudf
from cudf import _lib as libcudf
from cudf._lib import string_casting as str_cast
from cudf._lib.column import Column
from cudf._lib.nvtext.generate_ngrams import (
generate_character_ngrams as cpp_generate_character_ngrams,
generate_ngrams as cpp_generate_ngrams,
)
from cudf._lib.nvtext.ngrams_tokenize import (
ngrams_tokenize as cpp_ngrams_tokenize,
)
from cudf._lib.nvtext.normalize import normalize_spaces as cpp_normalize_spaces
from cudf._lib.nvtext.replace import replace_tokens as cpp_replace_tokens
from cudf._lib.nvtext.subword_tokenize import (
subword_tokenize as cpp_subword_tokenize,
)
from cudf._lib.nvtext.tokenize import (
character_tokenize as cpp_character_tokenize,
count_tokens as cpp_count_tokens,
tokenize as cpp_tokenize,
)
from cudf._lib.nvtx import annotate
from cudf._lib.scalar import Scalar, as_scalar
from cudf._lib.strings.attributes import (
code_points as cpp_code_points,
count_bytes as cpp_count_bytes,
count_characters as cpp_count_characters,
)
from cudf._lib.strings.capitalize import (
capitalize as cpp_capitalize,
title as cpp_title,
)
from cudf._lib.strings.case import (
swapcase as cpp_swapcase,
to_lower as cpp_to_lower,
to_upper as cpp_to_upper,
)
from cudf._lib.strings.char_types import (
is_alnum as cpp_is_alnum,
is_alpha as cpp_is_alpha,
is_decimal as cpp_is_decimal,
is_digit as cpp_is_digit,
is_float as cpp_is_float,
is_integer as cpp_is_integer,
is_lower as cpp_is_lower,
is_numeric as cpp_is_numeric,
is_space as cpp_isspace,
is_upper as cpp_is_upper,
)
from cudf._lib.strings.combine import (
concatenate as cpp_concatenate,
join as cpp_join,
)
from cudf._lib.strings.contains import (
contains_re as cpp_contains_re,
count_re as cpp_count_re,
match_re as cpp_match_re,
)
from cudf._lib.strings.convert.convert_urls import (
url_decode as cpp_url_decode,
url_encode as cpp_url_encode,
)
from cudf._lib.strings.extract import extract as cpp_extract
from cudf._lib.strings.find import (
contains as cpp_contains,
endswith as cpp_endswith,
endswith_multiple as cpp_endswith_multiple,
find as cpp_find,
rfind as cpp_rfind,
startswith as cpp_startswith,
startswith_multiple as cpp_startswith_multiple,
)
from cudf._lib.strings.findall import findall as cpp_findall
from cudf._lib.strings.padding import (
PadSide,
center as cpp_center,
ljust as cpp_ljust,
pad as cpp_pad,
rjust as cpp_rjust,
zfill as cpp_zfill,
)
from cudf._lib.strings.replace import (
insert as cpp_string_insert,
replace as cpp_replace,
replace_multi as cpp_replace_multi,
slice_replace as cpp_slice_replace,
)
from cudf._lib.strings.replace_re import (
replace_multi_re as cpp_replace_multi_re,
replace_re as cpp_replace_re,
replace_with_backrefs as cpp_replace_with_backrefs,
)
from cudf._lib.strings.split.partition import (
partition as cpp_partition,
rpartition as cpp_rpartition,
)
from cudf._lib.strings.split.split import (
rsplit as cpp_rsplit,
split as cpp_split,
)
from cudf._lib.strings.strip import (
lstrip as cpp_lstrip,
rstrip as cpp_rstrip,
strip as cpp_strip,
)
from cudf._lib.strings.substring import (
get as cpp_string_get,
slice_from as cpp_slice_from,
slice_strings as cpp_slice_strings,
)
from cudf._lib.strings.translate import translate as cpp_translate
from cudf._lib.strings.wrap import wrap as cpp_wrap
from cudf.core.buffer import Buffer
from cudf.core.column import column, datetime
from cudf.utils import utils
from cudf.utils.docutils import copy_docstring
from cudf.utils.dtypes import can_convert_to_column, is_scalar, is_string_dtype
_str_to_numeric_typecast_functions = {
np.dtype("int8"): str_cast.stoi8,
np.dtype("int16"): str_cast.stoi16,
np.dtype("int32"): str_cast.stoi,
np.dtype("int64"): str_cast.stol,
np.dtype("uint8"): str_cast.stoui8,
np.dtype("uint16"): str_cast.stoui16,
np.dtype("uint32"): str_cast.stoui,
np.dtype("uint64"): str_cast.stoul,
np.dtype("float32"): str_cast.stof,
np.dtype("float64"): str_cast.stod,
np.dtype("bool"): str_cast.to_booleans,
# TODO: support Date32 UNIX days
# np.dtype("datetime64[D]"): str_cast.timestamp2int,
np.dtype("datetime64[s]"): str_cast.timestamp2int,
np.dtype("datetime64[ms]"): str_cast.timestamp2int,
np.dtype("datetime64[us]"): str_cast.timestamp2int,
np.dtype("datetime64[ns]"): str_cast.timestamp2int,
}
_numeric_to_str_typecast_functions = {
np.dtype("int8"): str_cast.i8tos,
np.dtype("int16"): str_cast.i16tos,
np.dtype("int32"): str_cast.itos,
np.dtype("int64"): str_cast.ltos,
np.dtype("uint8"): str_cast.ui8tos,
np.dtype("uint16"): str_cast.ui16tos,
np.dtype("uint32"): str_cast.uitos,
np.dtype("uint64"): str_cast.ultos,
np.dtype("float32"): str_cast.ftos,
np.dtype("float64"): str_cast.dtos,
np.dtype("bool"): str_cast.from_booleans,
# TODO: support Date32 UNIX days
# np.dtype("datetime64[D]"): str_cast.int2timestamp,
np.dtype("datetime64[s]"): str_cast.int2timestamp,
np.dtype("datetime64[ms]"): str_cast.int2timestamp,
np.dtype("datetime64[us]"): str_cast.int2timestamp,
np.dtype("datetime64[ns]"): str_cast.int2timestamp,
}
class StringMethods(object):
def __init__(self, column, parent=None):
"""
Vectorized string functions for Series and Index.
This mimics pandas ``df.str`` interface. nulls stay null
unless handled otherwise by a particular method.
Patterned after Pythonโs string methods, with some
inspiration from Rโs stringr package.
"""
self._column = column
self._parent = parent
def htoi(self):
"""
Returns integer value represented by each hex string.
String is interpretted to have hex (base-16) characters.
Returns
-------
Series/Index of str dtype
Examples
--------
>>> import cudf
>>> s = cudf.Series(["1234", "ABCDEF", "1A2", "cafe"])
>>> s.str.htoi()
0 4660
1 11259375
2 418
3 51966
dtype: int64
"""
out = str_cast.htoi(self._column)
return self._return_or_inplace(out, inplace=False)
def ip2int(self):
"""
This converts ip strings to integers
Returns
-------
Series/Index of str dtype
Examples
--------
>>> import cudf
>>> s = cudf.Series(["12.168.1.1", "10.0.0.1"])
>>> s.str.ip2int()
0 212336897
1 167772161
dtype: int64
Returns 0's if any string is not an IP.
>>> s = cudf.Series(["12.168.1.1", "10.0.0.1", "abc"])
>>> s.str.ip2int()
0 212336897
1 167772161
2 0
dtype: int64
"""
out = str_cast.ip2int(self._column)
return self._return_or_inplace(out, inplace=False)
def _return_or_inplace(self, new_col, **kwargs):
"""
Returns an object of the type of the column owner or updates the column
of the owner (Series or Index) to mimic an inplace operation
"""
inplace = kwargs.get("inplace", False)
if inplace:
self._parent._mimic_inplace(new_col, inplace=True)
else:
expand = kwargs.get("expand", False)
if expand or isinstance(
self._parent, (cudf.DataFrame, cudf.MultiIndex)
):
# This branch indicates the passed as new_col
# is actually a table-like data
table = new_col
from cudf._lib.table import Table
if isinstance(table, Table):
if isinstance(self._parent, cudf.Index):
idx = self._parent._constructor_expanddim._from_table(
table=table
)
idx.names = None
return idx
else:
return self._parent._constructor_expanddim(
data=table._data, index=self._parent.index
)
else:
return self._parent._constructor_expanddim(
{index: value for index, value in enumerate(table)},
index=self._parent.index,
)
elif isinstance(self._parent, cudf.Series):
retain_index = kwargs.get("retain_index", True)
if retain_index:
return cudf.Series(
new_col,
name=self._parent.name,
index=self._parent.index,
)
else:
return cudf.Series(new_col, name=self._parent.name)
elif isinstance(self._parent, cudf.Index):
return cudf.core.index.as_index(
new_col, name=self._parent.name
)
else:
if self._parent is None:
return new_col
else:
return self._parent._mimic_inplace(new_col, inplace=False)
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop, step=key.step)
else:
return self.get(key)
def len(self, **kwargs):
"""
Computes the length of each element in the Series/Index.
Returns : Series or Index of int
A Series or Index of integer values
indicating the length of each element in the Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["dog", "", "\\n", None])
>>> s.str.len()
0 3
1 0
2 1
3 null
dtype: int32
"""
return self._return_or_inplace(
cpp_count_characters(self._column), **kwargs,
)
def byte_count(self, **kwargs):
"""
Computes the number of bytes of each string in the Series/Index.
Returns : Series or Index of int
A Series or Index of integer values
indicating the number of bytes of each strings in the
Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["abc","d","ef"])
>>> s.str.byte_count()
0 3
1 1
2 2
dtype: int32
>>> s = cudf.Series(["Hello", "Bye", "Thanks ๐"])
>>> s.str.byte_count()
0 5
1 3
2 11
dtype: int32
"""
return self._return_or_inplace(
cpp_count_bytes(self._column), **kwargs,
)
def cat(self, others=None, sep=None, na_rep=None, **kwargs):
"""
Concatenate strings in the Series/Index with given separator.
If ``others`` is specified, this function concatenates the Series/Index
and elements of others element-wise. If others is not passed, then all
values in the Series/Index are concatenated into a single string with
a given sep.
Parameters
----------
others : Series or List of str
Strings to be appended.
The number of strings must match ``size()`` of this instance.
This must be either a Series of string dtype or a Python
list of strings.
sep : str
If specified, this separator will be appended to each string
before appending the others.
na_rep : str
This character will take the place of any null strings
(not empty strings) in either list.
- If ``na_rep`` is ``None``, and ``others`` is ``None``,
missing values in the Series/Index are
omitted from the result.
- If ``na_rep`` is ``None``, and ``others`` is
not ``None``, a row containing a missing value
in any of the columns (before concatenation)
will have a missing value in the result.
Returns
-------
concat : str or Series/Index of str dtype
If ``others`` is ``None``, ``str`` is returned,
otherwise a ``Series/Index`` (same type as caller)
of str dtype is returned.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['a', 'b', None, 'd'])
>>> s.str.cat(sep=' ')
'a b d'
By default, NA values in the Series are ignored. Using na_rep, they
can be given a representation:
>>> s.str.cat(sep=' ', na_rep='?')
'a b ? d'
If others is specified, corresponding values are concatenated with
the separator. Result will be a Series of strings.
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',')
0 a,A
1 b,B
2 None
3 d,D
dtype: object
Missing values will remain missing in the result, but can again be
represented using na_rep
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')
0 a,A
1 b,B
2 -,C
3 d,D
dtype: object
If sep is not specified, the values are concatenated without
separation.
>>> s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')
0 aA
1 bB
2 -C
3 dD
dtype: object
"""
if sep is None:
sep = ""
if others is None:
data = cpp_join(
self._column, as_scalar(sep), as_scalar(na_rep, "str")
)
else:
other_cols = _get_cols_list(others)
all_cols = [self._column] + other_cols
data = cpp_concatenate(
cudf.DataFrame(
{index: value for index, value in enumerate(all_cols)}
),
as_scalar(sep),
as_scalar(na_rep, "str"),
)
if len(data) == 1 and data.null_count == 1:
data = [""]
out = self._return_or_inplace(data, **kwargs)
if len(out) == 1 and others is None:
if isinstance(out, StringColumn):
out = out[0]
else:
out = out.iloc[0]
return out
def join(self, sep):
"""
Join lists contained as elements in the Series/Index with passed
delimiter.
Raises : NotImplementedError
Columns of arrays / lists are not yet supported.
"""
raise NotImplementedError(
"Columns of arrays / lists are not yet " "supported"
)
def extract(self, pat, flags=0, expand=True, **kwargs):
"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the first
match of regular expression `pat`.
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
expand : bool, default True
If True, return DataFrame with on column per capture group.
If False, return a Series/Index if there is one capture group or
DataFrame if there are multiple capture groups.
Returns
-------
DataFrame or Series/Index
A DataFrame with one row for each subject string, and one column
for each group. If `expand=False` and `pat` has only one capture
group, then return a Series/Index.
Notes
-----
The `flags` parameter is not yet supported and will raise a
NotImplementedError if anything other than the default value is passed.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['a1', 'b2', 'c3'])
>>> s.str.extract(r'([ab])(\d)') # noqa W605
0 1
0 a 1
1 b 2
2 None None
A pattern with one group will return a DataFrame with one
column if expand=True.
>>> s.str.extract(r'[ab](\d)', expand=True) # noqa W605
0
0 1
1 2
2 None
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r'[ab](\d)', expand=False) # noqa W605
0 1
1 2
2 None
dtype: object
"""
if flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
out = cpp_extract(self._column, pat)
if out._num_columns == 1 and expand is False:
return self._return_or_inplace(out._columns[0], **kwargs)
else:
kwargs.setdefault("expand", expand)
return self._return_or_inplace(out, **kwargs)
def contains(
self, pat, case=True, flags=0, na=np.nan, regex=True, **kwargs
):
"""
Test if pattern or regex is contained within a string of a Series or
Index.
Return boolean Series or Index based on whether a given pattern or
regex is contained within a string of a Series or Index.
Parameters
----------
pat : str
Character sequence or regular expression.
regex : bool, default True
If True, assumes the pattern is a regular expression.
If False, treats the pattern as a literal string.
Returns
-------
Series/Index of bool dtype
A Series/Index of boolean dtype indicating whether the given
pattern is contained within the string of each element of the
Series/Index.
Notes
-----
The parameters `case`, `flags`, and `na` are not yet supported and
will raise a NotImplementedError if anything other than the default
value is set.
Examples
--------
>>> import cudf
>>> s1 = cudf.Series(['Mouse', 'dog', 'house and parrot', '23', None])
>>> s1
0 Mouse
1 dog
2 house and parrot
3 23
4 None
dtype: object
>>> s1.str.contains('og', regex=False)
0 False
1 True
2 False
3 False
4 null
dtype: bool
Returning an Index of booleans using only a literal pattern.
>>> data = ['Mouse', 'dog', 'house and parrot', '23.0', np.NaN]
>>> ind = cudf.core.index.StringIndex(data)
>>> ind.str.contains('23', regex=False)
Index(['False', 'False', 'False', 'True', 'null'], dtype='object')
Returning โhouseโ or โdogโ when either expression occurs in a string.
>>> s1.str.contains('house|dog', regex=True)
0 False
1 True
2 True
3 False
4 null
dtype: bool
Returning any digit using regular expression.
>>> s1.str.contains('\d', regex=True) # noqa W605
0 False
1 False
2 False
3 True
4 null
dtype: bool
Ensure ``pat`` is a not a literal pattern when ``regex`` is set
to True. Note in the following example one might expect
only `s2[1]` and `s2[3]` to return True. However,
โ.0โ as a regex matches any character followed by a 0.
>>> s2 = cudf.Series(['40', '40.0', '41', '41.0', '35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
"""
if case is not True:
raise NotImplementedError("`case` parameter is not yet supported")
elif flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
elif na is not np.nan:
raise NotImplementedError("`na` parameter is not yet supported")
return self._return_or_inplace(
cpp_contains_re(self._column, pat)
if regex is True
else cpp_contains(self._column, as_scalar(pat, "str")),
**kwargs,
)
def replace(
self, pat, repl, n=-1, case=None, flags=0, regex=True, **kwargs
):
"""
Replace occurrences of pattern/regex in the Series/Index with some
other string. Equivalent to `str.replace()
<https://docs.python.org/3/library/stdtypes.html#str.replace>`_
or `re.sub()
<https://docs.python.org/3/library/re.html#re.sub>`_.
Parameters
----------
pat : str or list-like
String(s) to be replaced as a character sequence or regular
expression.
repl : str or list-like
String(s) to be used as replacement.
n : int, default -1 (all)
Number of replacements to make from the start.
regex : bool, default True
If True, assumes the pattern is a regular expression.
If False, treats the pattern as a literal string.
Returns
-------
Series/Index of str dtype
A copy of the object with all matching occurrences of pat replaced
by repl.
Notes
-----
The parameters `case` and `flags` are not yet supported and will raise
a `NotImplementedError` if anything other than the default value
is set.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['foo', 'fuz', None])
>>> s
0 foo
1 fuz
2 None
dtype: object
When pat is a string and regex is True (the default), the given pat
is compiled as a regex. When repl is a string, it replaces matching
regex patterns as with ``re.sub()``. NaN value(s) in the Series
are left as is:
>>> s.str.replace('f.', 'ba', regex=True)
0 bao
1 baz
2 None
dtype: object
When pat is a string and `regex` is False, every pat is replaced
with repl as with ``str.replace()``:
>>> s.str.replace('f.', 'ba', regex=False)
0 foo
1 fuz
2 None
dtype: object
"""
if case is not None:
raise NotImplementedError("`case` parameter is not yet supported")
if flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
if can_convert_to_column(pat) and can_convert_to_column(repl):
warnings.warn(
"`n` parameter is not supported when \
`pat` and `repl` are list-like inputs"
)
return self._return_or_inplace(
cpp_replace_multi_re(
self._column, pat, column.as_column(repl, dtype="str")
)
if regex
else cpp_replace_multi(
self._column,
column.as_column(pat, dtype="str"),
column.as_column(repl, dtype="str"),
),
**kwargs,
)
# Pandas treats 0 as all
if n == 0:
n = -1
# Pandas forces non-regex replace when pat is a single-character
return self._return_or_inplace(
cpp_replace_re(self._column, pat, as_scalar(repl, "str"), n)
if regex is True and len(pat) > 1
else cpp_replace(
self._column, as_scalar(pat, "str"), as_scalar(repl, "str"), n
),
**kwargs,
)
def replace_with_backrefs(self, pat, repl, **kwargs):
"""
Use the ``repl`` back-ref template to create a new string
with the extracted elements found using the ``pat`` expression.
Parameters
----------
pat : str
Regex with groupings to identify extract sections.
This should not be a compiled regex.
repl : str
String template containing back-reference indicators.
Returns
-------
Series/Index of str dtype
Examples
--------
>>> import cudf
>>> s = cudf.Series(["A543","Z756"])
>>> s.str.replace_with_backrefs('(\\d)(\\d)', 'V\\2\\1')
0 AV453
1 ZV576
dtype: object
"""
return self._return_or_inplace(
cpp_replace_with_backrefs(self._column, pat, repl), **kwargs
)
def slice(self, start=None, stop=None, step=None, **kwargs):
"""
Slice substrings from each element in the Series or Index.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series/Index of str dtype
Series or Index from sliced substring from
original string object.
See also
--------
slice_replace
Replace a slice with a string.
get
Return element at position. Equivalent
to ``Series.str.slice(start=i, stop=i+1)``
with ``i`` being the position.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["koala", "fox", "chameleon"])
>>> s
0 koala
1 fox
2 chameleon
dtype: object
>>> s.str.slice(start=1)
0 oala
1 ox
2 hameleon
dtype: object
>>> s.str.slice(start=-1)
0 a
1 x
2 n
dtype: object
>>> s.str.slice(stop=2)
0 ko
1 fo
2 ch
dtype: object
>>> s.str.slice(step=2)
0 kaa
1 fx
2 caeen
dtype: object
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 f
2 cm
dtype: object
"""
return self._return_or_inplace(
cpp_slice_strings(self._column, start, stop, step), **kwargs,
)
def isinteger(self, **kwargs):
"""
Check whether all characters in each string form integer.
If a string has zero characters, False is returned for
that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["1", "0.1", "+100", "-15", "abc"])
>>> s.str.isinteger()
0 True
1 False
2 True
3 True
4 False
dtype: bool
>>> s = cudf.Series(["this is plan text", "", "10 10"])
>>> s.str.isinteger()
0 False
1 False
2 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_integer(self._column), **kwargs)
def ishex(self, **kwargs):
"""
Check whether all characters in each string form a hex integer.
If a string has zero characters, False is returned for
that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See also
--------
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["", "123DEF", "0x2D3", "-15", "abc"])
>>> s.str.ishex()
0 False
1 True
2 True
3 False
4 True
dtype: bool
"""
return self._return_or_inplace(str_cast.is_hex(self._column), **kwargs)
def isfloat(self, **kwargs):
"""
Check whether all characters in each string form floating value.
If a string has zero characters, False is returned for
that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["1.1", "0.123213", "+0.123", "-100.0001", "234",
... "3-"])
>>> s.str.isfloat()
0 True
1 True
2 True
3 True
4 True
5 False
dtype: bool
>>> s = cudf.Series(["this is plain text", "\t\n", "9.9", "9.9.9"])
>>> s.str.isfloat()
0 False
1 False
2 True
3 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_float(self._column), **kwargs)
def isdecimal(self, **kwargs):
"""
Check whether all characters in each string are decimal.
This is equivalent to running the Python string method
`str.isdecimal()
<https://docs.python.org/3/library/stdtypes.html#str.isdecimal>`_
for each element of the Series/Index.
If a string has zero characters, False is returned for
that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s3 = cudf.Series(['23', 'ยณ', 'โ
', ''])
The s3.str.isdecimal method checks for characters used to form
numbers in base 10.
>>> s3.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_decimal(self._column), **kwargs)
def isalnum(self, **kwargs):
"""
Check whether all characters in each string are alphanumeric.
This is equivalent to running the Python string method
`str.isalnum()
<https://docs.python.org/3/library/stdtypes.html#str.isalnum>`_
for each element of the Series/Index. If a string has zero
characters, False is returned for that check.
Equivalent to: ``isalpha() or isdigit() or isnumeric() or isdecimal()``
Returns : Series or Index of bool
Series or Index of boolean values with the
same length as the original Series/Index.
See also
--------
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s1 = cudf.Series(['one', 'one1', '1', ''])
>>> s1.str.isalnum()
0 True
1 True
2 True
3 False
dtype: bool
Note that checks against characters mixed with
any additional punctuation or whitespace will
evaluate to false for an alphanumeric check.
>>> s2 = cudf.Series(['A B', '1.5', '3,000'])
>>> s2.str.isalnum()
0 False
1 False
2 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_alnum(self._column), **kwargs)
def isalpha(self, **kwargs):
"""
Check whether all characters in each string are alphabetic.
This is equivalent to running the Python string method
`str.isalpha()
<https://docs.python.org/3/library/stdtypes.html#str.isalpha>`_
for each element of the Series/Index.
If a string has zero characters, False is returned for that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same length
as the original Series/Index.
See also
--------
isalnum
Check whether all characters are alphanumeric.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s1 = cudf.Series(['one', 'one1', '1', ''])
>>> s1.str.isalpha()
0 True
1 False
2 False
3 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_alpha(self._column), **kwargs)
def isdigit(self, **kwargs):
"""
Check whether all characters in each string are digits.
This is equivalent to running the Python string method
`str.isdigit()
<https://docs.python.org/3/library/stdtypes.html#str.isdigit>`_
for each element of the Series/Index.
If a string has zero characters, False is returned
for that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['23', 'ยณ', 'โ
', ''])
The ``s.str.isdigit`` method is the same as ``s.str.isdecimal`` but
also includes special digits, like superscripted and
subscripted digits in unicode.
>>> s.str.isdigit()
0 True
1 True
2 False
3 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_digit(self._column), **kwargs)
def isnumeric(self, **kwargs):
"""
Check whether all characters in each string are numeric.
This is equivalent to running the Python string method
`str.isnumeric()
<https://docs.python.org/3/library/stdtypes.html#str.isnumeric>`_
for each element of the Series/Index. If a
string has zero characters, False is returned for that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s1 = cudf.Series(['one', 'one1', '1', ''])
>>> s1.str.isnumeric()
0 False
1 False
2 True
3 False
dtype: bool
The ``s1.str.isnumeric`` method is the same as ``s2.str.isdigit`` but
also includes other characters that can represent
quantities such as unicode fractions.
>>> s2 = pd.Series(['23', 'ยณ', 'โ
', ''])
>>> s2.str.isnumeric()
0 True
1 True
2 True
3 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_numeric(self._column), **kwargs)
def isupper(self, **kwargs):
"""
Check whether all characters in each string are uppercase.
This is equivalent to running the Python string method
`str.isupper()
<https://docs.python.org/3/library/stdtypes.html#str.isupper>`_
for each element of the Series/Index.
If a string has zero characters, False is returned
for that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isspace
Check whether all characters are whitespace.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s.str.isupper()
0 False
1 False
2 True
3 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_upper(self._column), **kwargs)
def islower(self, **kwargs):
"""
Check whether all characters in each string are lowercase.
This is equivalent to running the Python string method
`str.islower()
<https://docs.python.org/3/library/stdtypes.html#str.islower>`_
for each element of the Series/Index.
If a string has zero characters, False is returned
for that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
See also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
isspace
Check whether all characters are whitespace.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s.str.islower()
0 True
1 False
2 False
3 False
dtype: bool
"""
return self._return_or_inplace(cpp_is_lower(self._column), **kwargs)
def isipv4(self, **kwargs):
"""
Check whether all characters in each string form an IPv4 address.
If a string has zero characters, False is returned for
that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same
length as the original Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["", "127.0.0.1", "255.255.255.255", "123.456"])
>>> s.str.isipv4()
0 False
1 True
2 True
3 False
dtype: bool
"""
return self._return_or_inplace(
str_cast.is_ipv4(self._column), **kwargs
)
def lower(self, **kwargs):
"""
Converts all characters to lowercase.
Equivalent to `str.lower()
<https://docs.python.org/3/library/stdtypes.html#str.lower>`_.
Returns : Series or Index of object
A copy of the object with all strings converted to lowercase.
See also
--------
upper
Converts all characters to uppercase.
title
Converts first character of each word to uppercase and remaining
to lowercase.
capitalize
Converts first character to uppercase and remaining to lowercase.
swapcase
Converts uppercase to lowercase and lowercase to uppercase.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']
>>> s = cudf.Series(data)
>>> s.str.lower()
0 lower
1 capitals
2 this is a sentence
3 swapcase
dtype: object
"""
return self._return_or_inplace(cpp_to_lower(self._column), **kwargs)
def upper(self, **kwargs):
"""
Convert each string to uppercase.
This only applies to ASCII characters at this time.
Equivalent to `str.upper()
<https://docs.python.org/3/library/stdtypes.html#str.upper>`_.
Returns : Series or Index of object
See also
--------
lower
Converts all characters to lowercase.
upper
Converts all characters to uppercase.
title
Converts first character of each word to uppercase and
remaining to lowercase.
capitalize
Converts first character to uppercase and remaining to
lowercase.
swapcase
Converts uppercase to lowercase and lowercase to uppercase.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']
>>> s = cudf.Series(data)
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: object
"""
return self._return_or_inplace(cpp_to_upper(self._column), **kwargs)
def capitalize(self, **kwargs):
"""
Convert strings in the Series/Index to be capitalized.
This only applies to ASCII characters at this time.
Returns
-------
Series or Index of object
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']
>>> s = cudf.Series(data)
>>> s.str.capitalize()
0 Lower
1 Capitals
2 This is a sentence
3 Swapcase
dtype: object
>>> s = cudf.Series(["hello, friend","goodbye, friend"])
>>> s.str.capitalize()
0 Hello, friend
1 Goodbye, friend
dtype: object
"""
return self._return_or_inplace(cpp_capitalize(self._column), **kwargs)
def swapcase(self, **kwargs):
"""
Change each lowercase character to uppercase and vice versa.
This only applies to ASCII characters at this time.
Equivalent to `str.swapcase()
<https://docs.python.org/3/library/stdtypes.html#str.swapcase>`_.
Returns : Series or Index of object
See also
--------
lower
Converts all characters to lowercase.
upper
Converts all characters to uppercase.
title
Converts first character of each word to uppercase and remaining
to lowercase.
capitalize
Converts first character to uppercase and remaining to lowercase.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe']
>>> s = cudf.Series(data)
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.swapcase()
0 LOWER
1 capitals
2 THIS IS A SENTENCE
3 sWaPcAsE
dtype: object
"""
return self._return_or_inplace(cpp_swapcase(self._column), **kwargs)
def title(self, **kwargs):
"""
Uppercase the first letter of each letter after a space
and lowercase the rest.
This only applies to ASCII characters at this time.
Equivalent to `str.title()
<https://docs.python.org/3/library/stdtypes.html#str.title>`_.
Returns : Series or Index of object
See also
--------
lower
Converts all characters to lowercase.
upper
Converts all characters to uppercase.
capitalize
Converts first character to uppercase and remaining to lowercase.
swapcase
Converts uppercase to lowercase and lowercase to uppercase.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s = cudf.Series(data)
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.title()
0 Lower
1 Capitals
2 This Is A Sentence
3 Swapcase
dtype: object
"""
return self._return_or_inplace(cpp_title(self._column), **kwargs)
def slice_from(self, starts, stops, **kwargs):
"""
Return substring of each string using positions for each string.
The starts and stops parameters are of Column type.
Parameters
----------
starts : Series
Beginning position of each the string to extract.
Default is beginning of the each string.
stops : Series
Ending position of the each string to extract.
Default is end of each string.
Use -1 to specify to the end of that string.
Returns
-------
Series/Index of str dtype
A substring of each string using positions for each string.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["hello","there"])
>>> s
0 hello
1 there
dtype: object
>>> starts = cudf.Series([1, 3])
>>> stops = cudf.Series([5, 5])
>>> s.str.slice_from(starts, stops)
0 ello
1 re
dtype: object
"""
return self._return_or_inplace(
cpp_slice_from(
self._column, column.as_column(starts), column.as_column(stops)
),
**kwargs,
)
def slice_replace(self, start=None, stop=None, repl=None, **kwargs):
"""
Replace the specified section of each string with a new string.
Parameters
----------
start : int, optional
Beginning position of the string to replace.
Default is beginning of the each string.
stop : int, optional
Ending position of the string to replace.
Default is end of each string.
repl : str, optional
String to insert into the specified position values.
Returns
-------
Series/Index of str dtype
A new string with the specified section of the string
replaced with `repl` string.
See also
--------
slice
Just slicing without replacement.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: object
Specify just `start`, meaning replace `start` until the `end` of
the string with `repl`.
>>> s.str.slice_replace(1, repl='X')
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: object
Specify just `stop`, meaning the `start` of the string to `stop`
is replaced with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl='X')
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: object
Specify `start` and `stop`, meaning the slice from `start`
to `stop` is replaced with `repl`. Everything before or
after `start` and `stop` is included as is.
>>> s.str.slice_replace(start=1, stop=3, repl='X')
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: object
"""
if start is None:
start = 0
if stop is None:
stop = -1
if repl is None:
repl = ""
return self._return_or_inplace(
cpp_slice_replace(self._column, start, stop, as_scalar(repl)),
**kwargs,
)
def insert(self, start=0, repl=None, **kwargs):
"""
Insert the specified string into each string in the specified
position.
Parameters
----------
start : int
Beginning position of the string to replace.
Default is beginning of the each string.
Specify -1 to insert at the end of each string.
repl : str
String to insert into the specified position value.
Returns
-------
Series/Index of str dtype
A new string series with the specified string
inserted at the specified position.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["abcdefghij", "0123456789"])
>>> s.str.insert(2, '_')
0 ab_cdefghij
1 01_23456789
dtype: object
When no `repl` is passed, nothing is inserted.
>>> s.str.insert(2)
0 abcdefghij
1 0123456789
dtype: object
Negative values are also supported for `start`.
>>> s.str.insert(-1,'_')
0 abcdefghij_
1 0123456789_
dtype: object
"""
if repl is None:
repl = ""
return self._return_or_inplace(
cpp_string_insert(self._column, start, as_scalar(repl)), **kwargs
)
def get(self, i=0, **kwargs):
"""
Extract element from each component at specified position.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
Series/Index of str dtype
Examples
--------
>>> import cudf
>>> s = cudf.Series(["hello world", "rapids", "cudf"])
>>> s
0 hello world
1 rapids
2 cudf
dtype: object
>>> s.str.get(10)
0 d
1
2
dtype: object
>>> s.str.get(1)
0 e
1 a
2 u
dtype: object
``get`` also accepts negative index number.
>>> s.str.get(-1)
0 d
1 s
2 f
dtype: object
"""
return self._return_or_inplace(
cpp_string_get(self._column, i), **kwargs
)
def split(self, pat=None, n=-1, expand=None, **kwargs):
"""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the beginning, at the
specified delimiter string. Equivalent to `str.split()
<https://docs.python.org/3/library/stdtypes.html#str.split>`_.
Parameters
----------
pat : str, default ' ' (space)
String to split on, does not yet support regular expressions.
n : int, default -1 (all)
Limit number of splits in output. `None`, 0, and -1 will all be
interpreted as "all splits".
Returns
-------
DataFrame
Returns a DataFrame with each split as a column.
See also
--------
rsplit
Splits string around given separator/delimiter, starting from
the right.
str.split
Standard library version for split.
str.rsplit
Standard library version for rsplit.
Notes
-----
The parameter `expand` is not yet supported and will raise a
NotImplementedError if anything other than the default value
is set. The handling of the n keyword depends on the number
of found splits:
- If found splits > n, make first n splits only
- If found splits <= n, make all splits
- If for a certain row the number of found
splits < n, append None for padding up to n
Examples
--------
>>> import cudf
>>> data = ["this is a regular sentence", "https://docs.python.org/index.html", None] # noqa E501
>>> s = cudf.Series(data)
>>> s
0 this is a regular sentence
1 https://docs.python.org/index.html
2 None
dtype: object
The `n` parameter can be used to limit the number of
splits on the delimiter.
>>> s.str.split(n=2)
0 1 2
0 this is a regular sentence
1 https://docs.python.org/index.html None None
2 None None None
The `pat` parameter can be used to split by other characters.
>>> s.str.split(pat = "/")
0 1 2 3
0 this is a regular sentence None None None
1 https: docs.python.org index.html
2 None None None None
"""
if expand is None:
expand = True
warnings.warn("`expand` parameter defatults to True.")
elif expand is not True:
raise NotImplementedError(
"`expand=False` setting is not supported yet"
)
# Pandas treats 0 as all
if n == 0:
n = -1
kwargs.setdefault("expand", expand)
if pat is None:
pat = ""
result_table = cpp_split(self._column, as_scalar(pat, "str"), n)
if len(result_table._data) == 1:
if result_table._data[0].null_count == len(self._column):
result_table = []
elif self._column.null_count == len(self._column):
result_table = [self._column.copy()]
return self._return_or_inplace(result_table, **kwargs,)
def rsplit(self, pat=None, n=-1, expand=None, **kwargs):
"""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the end, at the
specified delimiter string. Equivalent to `str.rsplit()
<https://docs.python.org/3/library/stdtypes.html#str.rsplit>`_.
Parameters
----------
pat : str, default ' ' (space)
String to split on, does not yet support regular expressions.
n : int, default -1 (all)
Limit number of splits in output. `None`, 0, and -1 will all be
interpreted as "all splits".
Returns
-------
DataFrame or MultiIndex
Returns a DataFrame/MultiIndex with each split as a column.
See also
--------
split
Split strings around given separator/delimiter.
str.split
Standard library version for split.
str.rsplit
Standard library version for rsplit.
Notes
-----
The parameter `expand` is not yet supported and will raise a
`NotImplementedError` if anything other than the default value is
set. The handling of the n keyword depends on the number of
found splits:
- If found splits > n, make first n splits only
- If found splits <= n, make all splits
- If for a certain row the number of found splits < n,
append None for padding up to n.
Examples
--------
>>> import cudf
>>> data = ["this is a regular sentence","https://docs.python.org/3/tutorial/index.html",None] # noqa E501
>>> s = cudf.Series(data)
>>> s.str.rsplit(n=2)
0 1 2
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html None None
2 None None None
For slightly more complex use cases like splitting the
html document name from a url, a combination of parameter
settings can be used.
>>> s.str.rsplit("/", n=1, expand=True)
0 1
0 this is a regular sentence None
1 https://docs.python.org/3/tutorial index.html
2 None None
"""
if expand is None:
expand = True
warnings.warn("`expand` parameter defatults to True.")
elif expand is not True:
raise NotImplementedError(
"`expand=False` setting is not supported yet"
)
# Pandas treats 0 as all
if n == 0:
n = -1
kwargs.setdefault("expand", expand)
if pat is None:
pat = ""
result_table = cpp_rsplit(self._column, as_scalar(pat), n)
if len(result_table._data) == 1:
if result_table._data[0].null_count == len(self._parent):
result_table = []
elif self._parent.null_count == len(self._parent):
result_table = [self._column.copy()]
return self._return_or_inplace(result_table, **kwargs)
def partition(self, sep=" ", expand=True, **kwargs):
"""
Split the string at the first occurrence of sep.
This method splits the string at the first occurrence
of sep, and returns 3 elements containing the part
before the separator, the separator itself, and the
part after the separator. If the separator is not found,
return 3 elements containing the string itself, followed
by two empty strings.
Parameters
----------
sep : str, default ' ' (whitespace)
String to split on.
Returns
-------
DataFrame or MultiIndex
Returns a DataFrame / MultiIndex
Notes
-----
The parameter `expand` is not yet supported and will raise a
`NotImplementedError` if anything other than the default value is set.
See also
--------
rpartition
Split the string at the last occurrence of sep.
split
Split strings around given separators.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: object
>>> s.str.partition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by something different than a space:
>>> s.str.partition('-')
0 1 2
0 Linda van der Berg
1 George Pitt - Rivers
Also available on indices:
>>> idx = cudf.core.index.StringIndex(['X 123', 'Y 999'])
>>> idx
StringIndex(['X 123' 'Y 999'], dtype='object')
Which will create a MultiIndex:
>>> idx.str.partition()
MultiIndex(levels=[0 X
1 Y
dtype: object, 0
dtype: object, 0 123
1 999
dtype: object],
codes= 0 1 2
0 0 0 0
1 1 0 1)
"""
if expand is not True:
raise NotImplementedError(
"`expand=False` is currently not supported"
)
kwargs.setdefault("expand", expand)
if sep is None:
sep = " "
return self._return_or_inplace(
cpp_partition(self._column, as_scalar(sep)), **kwargs
)
def rpartition(self, sep=" ", expand=True, **kwargs):
"""
Split the string at the last occurrence of sep.
This method splits the string at the last occurrence
of sep, and returns 3 elements containing the part
before the separator, the separator itself, and the
part after the separator. If the separator is not
found, return 3 elements containing two empty strings,
followed by the string itself.
Parameters
----------
sep : str, default ' ' (whitespace)
String to split on.
Returns
-------
DataFrame or MultiIndex
Returns a DataFrame / MultiIndex
Notes
-----
The parameter `expand` is not yet supported and will raise a
`NotImplementedError` if anything other than the default value is set.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: object
>>> s.str.rpartition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
Also available on indices:
>>> idx = cudf.core.index.StringIndex(['X 123', 'Y 999'])
>>> idx
StringIndex(['X 123' 'Y 999'], dtype='object')
Which will create a MultiIndex:
>>> idx.str.rpartition()
MultiIndex(levels=[0 X
1 Y
dtype: object, 0
dtype: object, 0 123
1 999
dtype: object],
codes= 0 1 2
0 0 0 0
1 1 0 1)
"""
if expand is not True:
raise NotImplementedError(
"`expand=False` is currently not supported"
)
kwargs.setdefault("expand", expand)
if sep is None:
sep = " "
return self._return_or_inplace(
cpp_rpartition(self._column, as_scalar(sep)), **kwargs
)
def pad(self, width, side="left", fillchar=" ", **kwargs):
"""
Pad strings in the Series/Index up to width.
Parameters
----------
width : int
Minimum width of resulting string;
additional characters will be filled with
character defined in fillchar.
side : {โleftโ, โrightโ, โbothโ}, default โleftโ
Side from which to fill resulting string.
fillchar : str, default ' ' (whitespace)
Additional character for filling, default is whitespace.
Returns
-------
Series/Index of object
Returns Series or Index with minimum number
of char in object.
See also
--------
rjust
Fills the left side of strings with an arbitrary character.
Equivalent to ``Series.str.pad(side='left')``.
ljust
Fills the right side of strings with an arbitrary character.
Equivalent to ``Series.str.pad(side='right')``.
center
Fills boths sides of strings with an arbitrary character.
Equivalent to ``Series.str.pad(side='both')``.
zfill
Pad strings in the Series/Index by prepending โ0โ character.
Equivalent to ``Series.str.pad(side='left', fillchar='0')``.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["caribou", "tiger"])
>>> s.str.pad(width=10)
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10, side='right', fillchar='-')
0 caribou---
1 tiger-----
dtype: object
>>> s.str.pad(width=10, side='both', fillchar='-')
0 -caribou--
1 --tiger---
dtype: object
"""
if not isinstance(fillchar, str):
msg = (
f"fillchar must be a character, not {type(fillchar).__name__}"
)
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not pd.api.types.is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
try:
side = PadSide[side.upper()]
except KeyError:
raise ValueError(
"side has to be either one of {โleftโ, โrightโ, โbothโ}"
)
return self._return_or_inplace(
cpp_pad(self._column, width, fillchar, side), **kwargs
)
def zfill(self, width, **kwargs):
"""
Pad strings in the Series/Index by prepending โ0โ characters.
Strings in the Series/Index are padded with โ0โ characters
on the left of the string to reach a total string length
width. Strings in the Series/Index with length greater
or equal to width are unchanged.
Parameters
----------
width : int
Minimum length of resulting string;
strings with length less than width
be prepended with โ0โ characters.
Returns
-------
Series/Index of str dtype
Returns Series or Index with prepended โ0โ characters.
See also
--------
rjust
Fills the left side of strings with an arbitrary character.
ljust
Fills the right side of strings with an arbitrary character.
pad
Fills the specified sides of strings with an arbitrary character.
center
Fills boths sides of strings with an arbitrary character.
Notes
-----
Differs from `str.zfill()
<https://docs.python.org/3/library/stdtypes.html#str.zfill>`_
which has special handling for โ+โ/โ-โ in the string.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['-1', '1', '1000', None])
>>> s
0 -1
1 1
2 1000
3 None
dtype: object
Note that ``None`` is not string, therefore it is converted
to ``None``. The minus sign in ``'-1'`` is treated as a
regular character and the zero is added to the left
of it (`str.zfill()
<https://docs.python.org/3/library/stdtypes.html#str.zfill>`_
would have moved it to the left). ``1000`` remains unchanged as
it is longer than width.
>>> s.str.zfill(3)
0 0-1
1 001
2 1000
3 None
dtype: object
"""
if not pd.api.types.is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
return self._return_or_inplace(
cpp_zfill(self._column, width), **kwargs
)
def center(self, width, fillchar=" ", **kwargs):
"""
Filling left and right side of strings in the Series/Index with an
additional character.
Parameters
----------
width : int
Minimum width of resulting string;
additional characters will be filled
with fillchar.
fillchar : str, default is ' ' (whitespace)
Additional character for filling.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['a', 'b', None, 'd'])
>>> s.str.center(1)
0 a
1 b
2 None
3 d
dtype: object
>>> s.str.center(1, fillchar='-')
0 a
1 b
2 None
3 d
dtype: object
>>> s.str.center(2, fillchar='-')
0 a-
1 b-
2 None
3 d-
dtype: object
>>> s.str.center(5, fillchar='-')
0 --a--
1 --b--
2 None
3 --d--
dtype: object
>>> s.str.center(6, fillchar='-')
0 --a---
1 --b---
2 None
3 --d---
dtype: object
"""
if not isinstance(fillchar, str):
msg = (
f"fillchar must be a character, not {type(fillchar).__name__}"
)
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not pd.api.types.is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
return self._return_or_inplace(
cpp_center(self._column, width, fillchar), **kwargs
)
def ljust(self, width, fillchar=" ", **kwargs):
"""
Filling right side of strings in the Series/Index with an additional
character. Equivalent to `str.ljust()
<https://docs.python.org/3/library/stdtypes.html#str.ljust>`_.
Parameters
----------
width : int
Minimum width of resulting string;
additional characters will be filled
with ``fillchar``.
fillchar : str, default ' ' (whitespace)
Additional character for filling, default is whitespace.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["hello world", "rapids ai"])
>>> s.str.ljust(10, fillchar="_")
0 hello world
1 rapids ai_
dtype: object
>>> s = cudf.Series(["a", "", "ab", "__"])
>>> s.str.ljust(1, fillchar="-")
0 a
1 -
2 ab
3 __
dtype: object
"""
if not isinstance(fillchar, str):
msg = (
f"fillchar must be a character, not {type(fillchar).__name__}"
)
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not pd.api.types.is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
return self._return_or_inplace(
cpp_ljust(self._column, width, fillchar), **kwargs
)
def rjust(self, width, fillchar=" ", **kwargs):
"""
Filling left side of strings in the Series/Index with an additional
character. Equivalent to `str.rjust()
<https://docs.python.org/3/library/stdtypes.html#str.rjust>`_.
Parameters
----------
width : int
Minimum width of resulting string;
additional characters will be filled
with fillchar.
fillchar : str, default ' ' (whitespace)
Additional character for filling, default is whitespace.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["hello world", "rapids ai"])
>>> s.str.rjust(20, fillchar="_")
0 _________hello world
1 ___________rapids ai
dtype: object
>>> s = cudf.Series(["a", "", "ab", "__"])
>>> s.str.rjust(1, fillchar="-")
0 a
1 -
2 ab
3 __
dtype: object
"""
if not isinstance(fillchar, str):
msg = (
f"fillchar must be a character, not {type(fillchar).__name__}"
)
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not pd.api.types.is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
return self._return_or_inplace(
cpp_rjust(self._column, width, fillchar), **kwargs
)
def strip(self, to_strip=None, **kwargs):
"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of
specified characters from each string in the Series/Index
from left and right sides. Equivalent to `str.strip()
<https://docs.python.org/3/library/stdtypes.html#str.strip>`_.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters
will be stripped. If None then whitespaces are removed.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
See also
--------
lstrip
Remove leading characters in Series/Index.
rstrip
Remove trailing characters in Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['1. Ant. ', '2. Bee!\\n', '3. Cat?\\t', None])
>>> s
0 1. Ant.
1 2. Bee!\\n
2 3. Cat?\\t
3 None
dtype: object
>>> s.str.strip()
0 1. Ant.
1 2. Bee!
2 3. Cat?
3 None
dtype: object
>>> s.str.strip('123.!? \\n\\t')
0 Ant
1 Bee
2 Cat
3 None
dtype: object
"""
if to_strip is None:
to_strip = ""
return self._return_or_inplace(
cpp_strip(self._column, as_scalar(to_strip)), **kwargs
)
def lstrip(self, to_strip=None, **kwargs):
"""
Remove leading and trailing characters.
Strip whitespaces (including newlines)
or a set of specified characters from
each string in the Series/Index from left side.
Equivalent to `str.lstrip()
<https://docs.python.org/3/library/stdtypes.html#str.lstrip>`_.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will
be stripped. If None then whitespaces are removed.
Returns
-------
Series or Index of object
See also
--------
strip
Remove leading and trailing characters in Series/Index.
rstrip
Remove trailing characters in Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['1. Ant. ', '2. Bee!\\n', '3. Cat?\\t', None])
>>> s.str.lstrip('123.')
0 Ant.
1 Bee!\\n
2 Cat?\\t
3 None
dtype: object
"""
if to_strip is None:
to_strip = ""
return self._return_or_inplace(
cpp_lstrip(self._column, as_scalar(to_strip)), **kwargs
)
def rstrip(self, to_strip=None, **kwargs):
"""
Remove leading and trailing characters.
Strip whitespaces (including newlines)
or a set of specified characters from each
string in the Series/Index from right side.
Equivalent to `str.rstrip()
<https://docs.python.org/3/library/stdtypes.html#str.rstrip>`_.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to
be removed. All combinations of this
set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
Series/Index of str dtype
Returns Series or Index.
See also
--------
strip
Remove leading and trailing characters in Series/Index.
lstrip
Remove leading characters in Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['1. Ant. ', '2. Bee!\\n', '3. Cat?\\t', None])
>>> s
0 1. Ant.
1 2. Bee!\\n
2 3. Cat?\\t
3 None
dtype: object
>>> s.str.rstrip('.!? \\n\\t')
0 1. Ant
1 2. Bee
2 3. Cat
3 None
dtype: object
"""
if to_strip is None:
to_strip = ""
return self._return_or_inplace(
cpp_rstrip(self._column, as_scalar(to_strip)), **kwargs
)
def wrap(self, width, **kwargs):
"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
Parameters
----------
width : int
Maximum line width.
Returns
-------
Series or Index
Notes
-----
The parameters `expand_tabsbool`, `replace_whitespace`,
`drop_whitespace`, `break_long_words`, `break_on_hyphens`,
`expand_tabsbool` are not yet supported and will raise a
NotImplementedError if they are set to any value.
This method currently achieves behavior matching Rโs
stringr library ``str_wrap`` function, the equivalent
pandas implementation can be obtained using the
following parameter setting:
expand_tabs = False
replace_whitespace = True
drop_whitespace = True
break_long_words = False
break_on_hyphens = False
Examples
--------
>>> import cudf
>>> data = ['line to be wrapped', 'another line to be wrapped']
>>> s = cudf.Series(data)
>>> s.str.wrap(12)
0 line to be\\nwrapped
1 another line\\nto be\\nwrapped
dtype: object
"""
if not pd.api.types.is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
expand_tabs = kwargs.get("expand_tabs", None)
if expand_tabs is True:
raise NotImplementedError("`expand_tabs=True` is not supported")
elif expand_tabs is None:
warnings.warn(
"wrap current implementation defaults to `expand_tabs`=False"
)
replace_whitespace = kwargs.get("replace_whitespace", True)
if not replace_whitespace:
raise NotImplementedError(
"`replace_whitespace=False` is not supported"
)
drop_whitespace = kwargs.get("drop_whitespace", True)
if not drop_whitespace:
raise NotImplementedError(
"`drop_whitespace=False` is not supported"
)
break_long_words = kwargs.get("break_long_words", None)
if break_long_words is True:
raise NotImplementedError(
"`break_long_words=True` is not supported"
)
elif break_long_words is None:
warnings.warn(
"wrap current implementation defaults to \
`break_long_words`=False"
)
break_on_hyphens = kwargs.get("break_on_hyphens", None)
if break_long_words is True:
raise NotImplementedError(
"`break_on_hyphens=True` is not supported"
)
elif break_on_hyphens is None:
warnings.warn(
"wrap current implementation defaults to \
`break_on_hyphens`=False"
)
return self._return_or_inplace(cpp_wrap(self._column, width), **kwargs)
def count(self, pat, flags=0, **kwargs):
"""
Count occurrences of pattern in each string of the Series/Index.
This function is used to count the number of times a particular
regex pattern is repeated in each of the string elements of the Series.
Parameters
----------
pat : str
Valid regular expression.
Returns
-------
Series or Index
Notes
-----
- `flags` parameter is currently not supported.
- Some characters need to be escaped when passing
in pat. eg. ``'$'`` has a special meaning in regex
and must be escaped when finding this literal character.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['A', 'B', 'Aaba', 'Baca', None, 'CABA', 'cat'])
>>> s.str.count('a')
0 0
1 0
2 2
3 2
4 null
5 0
6 1
dtype: int32
Escape ``'$'`` to find the literal dollar sign.
>>> s = cudf.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])
>>> s.str.count('\$') # noqa W605
0 1
1 0
2 1
3 2
4 2
5 0
dtype: int32
This is also available on Index.
>>> index = cudf.core.index.StringIndex(['A', 'A', 'Aaba', 'cat'])
>>> index.str.count('a')
Int64Index([0, 0, 2, 1], dtype='int64')
"""
if flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
return self._return_or_inplace(
cpp_count_re(self._column, pat), **kwargs
)
def findall(self, pat, flags=0, **kwargs):
"""
Find all occurrences of pattern or regular expression in the
Series/Index.
Parameters
----------
pat : str
Pattern or regular expression.
Returns
-------
DataFrame
All non-overlapping matches of pattern or
regular expression in each string of this Series/Index.
Notes
-----
`flags` parameter is currently not supported.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['Lion', 'Monkey', 'Rabbit'])
The search for the pattern โMonkeyโ returns one match:
>>> s.str.findall('Monkey')
0
0 None
1 Monkey
2 None
When the pattern matches more than one string
in the Series, all matches are returned:
>>> s.str.findall('on')
0
0 on
1 on
2 None
Regular expressions are supported too. For instance,
the search for all the strings ending with
the word โonโ is shown next:
>>> s.str.findall('on$')
0
0 on
1 None
2 None
If the pattern is found more than once in the same
string, then multiple strings are returned as columns:
>>> s.str.findall('b')
0 1
0 None None
1 None None
2 b b
"""
if flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
kwargs.setdefault("expand", True)
return self._return_or_inplace(
cpp_findall(self._column, pat), **kwargs
)
def isempty(self, **kwargs):
"""
Check whether each string is an empty string.
Returns : Series or Index of bool
Series or Index of boolean values with the same length as
the original Series/Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["1", "abc", "", " ", None])
>>> s.str.isempty()
0 False
1 False
2 True
3 False
4 False
dtype: bool
"""
return self._return_or_inplace(
(self._parent == "").fillna(False), **kwargs
)
def isspace(self, **kwargs):
"""
Check whether all characters in each string are whitespace.
This is equivalent to running the Python string method
`str.isspace()
<https://docs.python.org/3/library/stdtypes.html#str.isspace>`_
for each element of the Series/Index.
If a string has zero characters, False is returned
for that check.
Returns : Series or Index of bool
Series or Index of boolean values with the same length as
the original Series/Index.
See also
--------
isalnum
Check whether all characters are alphanumeric.
isalpha
Check whether all characters are alphabetic.
isdecimal
Check whether all characters are decimal.
isdigit
Check whether all characters are digits.
isinteger
Check whether all characters are integer.
isnumeric
Check whether all characters are numeric.
isfloat
Check whether all characters are float.
islower
Check whether all characters are lowercase.
isupper
Check whether all characters are uppercase.
Examples
--------
>>> import cudf
>>> s = cudf.Series([' ', '\\t\\r\\n ', ''])
>>> s.str.isspace()
0 True
1 True
2 False
dtype: bool
"""
return self._return_or_inplace(cpp_isspace(self._column), **kwargs)
def endswith(self, pat, **kwargs):
"""
Test if the end of each string element matches a pattern.
Parameters
----------
pat : str or list-like
If `str` is an `str`, evaluates whether each string of
series ends with `pat`.
If `pat` is a list-like, evaluates whether `self[i]`
ends with `pat[i]`.
Regular expressions are not accepted.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given
pattern matches the end of each string element.
Notes
-----
`na` parameter is not yet supported, as cudf uses
native strings instead of Python objects.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['bat', 'bear', 'caT', None])
>>> s
0 bat
1 bear
2 caT
3 None
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 null
dtype: bool
"""
if "na" in kwargs:
warnings.warn(
"`na` parameter is not yet supported, \
as cudf uses native strings instead of Python objects"
)
if pat is None:
result_col = column.column_empty(
len(self._column), dtype="bool", masked=True
)
elif is_scalar(pat):
result_col = cpp_endswith(self._column, as_scalar(pat, "str"))
else:
result_col = cpp_endswith_multiple(
self._column, column.as_column(pat, dtype="str")
)
return self._return_or_inplace(result_col, **kwargs)
def startswith(self, pat, **kwargs):
"""
Test if the start of each string element matches a pattern.
Equivalent to `str.startswith()
<https://docs.python.org/3/library/stdtypes.html#str.startswith>`_.
Parameters
----------
pat : str or list-like
If `str` is an `str`, evaluates whether each string of
series starts with `pat`.
If `pat` is a list-like, evaluates whether `self[i]`
starts with `pat[i]`.
Regular expressions are not accepted.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given
pattern matches the start of each string element.
See also
--------
endswith
Same as startswith, but tests the end of string.
contains
Tests if string element contains a pattern.
Examples
--------
>>> import cudf
>>> s
0 bat
1 Bear
2 cat
3 None
dtype: object
>>> s.str.startswith('b')
0 True
1 False
2 False
3 null
dtype: bool
"""
if "na" in kwargs:
warnings.warn(
"`na` parameter is not yet supported, \
as cudf uses native strings instead of Python objects"
)
if pat is None:
result_col = column.column_empty(
len(self._column), dtype="bool", masked=True
)
elif is_scalar(pat):
result_col = cpp_startswith(self._column, as_scalar(pat, "str"))
else:
result_col = cpp_startswith_multiple(
self._column, column.as_column(pat, dtype="str")
)
return self._return_or_inplace(result_col, **kwargs)
def find(self, sub, start=0, end=None, **kwargs):
"""
Return lowest indexes in each strings in the Series/Index
where the substring is fully contained between ``[start:end]``.
Return -1 on failure.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of int
Examples
--------
>>> import cudf
>>> s = cudf.Series(['abc', 'a','b' ,'ddb'])
>>> s.str.find('b')
0 1
1 -1
2 0
3 2
dtype: int32
Parameters such as `start` and `end` can also be used.
>>> s.str.find('b', start=1, end=5)
0 1
1 -1
2 -1
3 2
dtype: int32
"""
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
if end is None:
end = -1
result_col = cpp_find(self._column, as_scalar(sub, "str"), start, end)
return self._return_or_inplace(result_col, **kwargs)
def rfind(self, sub, start=0, end=None, **kwargs):
"""
Return highest indexes in each strings in the Series/Index
where the substring is fully contained between ``[start:end]``.
Return -1 on failure. Equivalent to standard `str.rfind()
<https://docs.python.org/3/library/stdtypes.html#str.rfind>`_.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of int
See also
--------
find
Return lowest indexes in each strings.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["abc", "hello world", "rapids ai"])
>>> s.str.rfind('a')
0 0
1 -1
2 7
dtype: int32
Using `start` and `end` parameters.
>>> s.str.rfind('a', start=2, end=5)
0 -1
1 -1
2 -1
dtype: int32
"""
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
if end is None:
end = -1
result_col = cpp_rfind(self._column, as_scalar(sub, "str"), start, end)
return self._return_or_inplace(result_col, **kwargs)
def index(self, sub, start=0, end=None, **kwargs):
"""
Return lowest indexes in each strings where the substring
is fully contained between ``[start:end]``. This is the same
as str.find except instead of returning -1, it raises a ValueError
when the substring is not found.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of object
Examples
--------
>>> import cudf
>>> s = cudf.Series(['abc', 'a','b' ,'ddb'])
>>> s.str.index('b')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: substring not found
Parameters such as `start` and `end` can also be used.
>>> s = cudf.Series(['abc', 'abb','ab' ,'ddb'])
>>> s.str.index('b', start=1, end=5)
0 1
1 1
2 1
3 2
dtype: int32
"""
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
if end is None:
end = -1
result_col = cpp_find(self._column, as_scalar(sub, "str"), start, end)
result = self._return_or_inplace(result_col, **kwargs)
if (result == -1).any():
raise ValueError("substring not found")
else:
return result
def rindex(self, sub, start=0, end=None, **kwargs):
"""
Return highest indexes in each strings where the substring
is fully contained between ``[start:end]``. This is the same
as ``str.rfind`` except instead of returning -1, it raises a
``ValueError`` when the substring is not found.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of object
Examples
--------
>>> import cudf
>>> s = cudf.Series(['abc', 'a','b' ,'ddb'])
>>> s.str.rindex('b')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: substring not found
Parameters such as `start` and `end` can also be used.
>>> s = cudf.Series(['abc', 'abb','ab' ,'ddb'])
>>> s.str.rindex('b', start=1, end=5)
0 1
1 2
2 1
3 2
dtype: int32
"""
if not isinstance(sub, str):
msg = "expected a string object, not {0}"
raise TypeError(msg.format(type(sub).__name__))
if end is None:
end = -1
result_col = cpp_rfind(self._column, as_scalar(sub, "str"), start, end)
result = self._return_or_inplace(result_col, **kwargs)
if (result == -1).any():
raise ValueError("substring not found")
else:
return result
def match(self, pat, case=True, flags=0, **kwargs):
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : str
Character sequence or regular expression.
Returns
-------
Series or Index of boolean values.
Notes
-----
Parameters currently not supported are: `case`, `flags` and `na`.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["rapids", "ai", "cudf"])
Checking for strings starting with `a`.
>>> s.str.match('a')
0 False
1 True
2 False
dtype: bool
Checking for strings starting with any of `a` or `c`.
>>> s.str.match('[ac]')
0 False
1 True
2 True
dtype: bool
"""
if case is not True:
raise NotImplementedError("`case` parameter is not yet supported")
if flags != 0:
raise NotImplementedError("`flags` parameter is not yet supported")
if "na" in kwargs:
warnings.warn(
"`na` parameter is not yet supported, \
as cudf uses native strings instead of Python objects"
)
return self._return_or_inplace(
cpp_match_re(self._column, pat), **kwargs
)
def url_decode(self, **kwargs):
"""
Returns a URL-decoded format of each string.
No format checking is performed. All characters
are expected to be encoded as UTF-8 hex values.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['A%2FB-C%2FD', 'e%20f.g', '4-5%2C6'])
>>> s.str.url_decode()
0 A/B-C/D
1 e f.g
2 4-5,6
dtype: object
>>> data = ["https%3A%2F%2Frapids.ai%2Fstart.html", "https%3A%2F%2Fmedium.com%2Frapids-ai"] # noqa E501
>>> s = cudf.Series(data)
>>> s.str.url_decode()
0 https://rapids.ai/start.html
1 https://medium.com/rapids-ai
dtype: object
"""
return self._return_or_inplace(cpp_url_decode(self._column), **kwargs)
def url_encode(self, **kwargs):
"""
Returns a URL-encoded format of each string.
No format checking is performed.
All characters are encoded except for ASCII letters,
digits, and these characters: ``โ.โ,โ_โ,โ-โ,โ~โ``.
Encoding converts to hex using UTF-8 encoded bytes.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(['A/B-C/D', 'e f.g', '4-5,6'])
>>> s.str.url_encode()
0 A%2FB-C%2FD
1 e%20f.g
2 4-5%2C6
dtype: object
>>> data = ["https://rapids.ai/start.html", "https://medium.com/rapids-ai"] # noqa E501
>>> s = cudf.Series(data)
>>> s.str.url_encode()
0 https%3A%2F%2Frapids.ai%2Fstart.html
1 https%3A%2F%2Fmedium.com%2Frapids-ai
dtype: object
"""
return self._return_or_inplace(cpp_url_encode(self._column), **kwargs)
def code_points(self, **kwargs):
"""
Returns an array by filling it with the UTF-8 code point
values for each character of each string.
This function uses the ``len()`` method to determine
the size of each sub-array of integers.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> s = cudf.Series(["a","xyz", "รฉee"])
>>> s.str.code_points()
0 97
1 120
2 121
3 122
4 50089
5 101
6 101
dtype: int32
>>> s = cudf.Series(["abc"])
>>> s.str.code_points()
0 97
1 98
2 99
dtype: int32
"""
new_col = cpp_code_points(self._column)
if self._parent is None:
return new_col
elif isinstance(self._parent, cudf.Series):
return cudf.Series(new_col, name=self._parent.name)
elif isinstance(self._parent, cudf.Index):
return cudf.core.index.as_index(new_col, name=self._parent.name)
def translate(self, table, **kwargs):
"""
Map all characters in the string through the given
mapping table.
Equivalent to standard `str.translate()
<https://docs.python.org/3/library/stdtypes.html#str.translate>`_.
Parameters
----------
table : dict
Table is a mapping of Unicode ordinals to Unicode
ordinals, strings, or None.
Unmapped characters are left untouched.
`str.maketrans()
<https://docs.python.org/3/library/stdtypes.html#str.maketrans>`_
is a helper function for making translation tables.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> data = ['lower', 'CAPITALS', 'this is a sentence','SwApCaSe']
>>> s = cudf.Series(data)
>>> s.str.translate({'a': "1"})
0 lower
1 CAPITALS
2 this is 1 sentence
3 SwApC1Se
dtype: object
>>> s.str.translate({'a': "1", "e":"#"})
0 low#r
1 CAPITALS
2 this is 1 s#nt#nc#
3 SwApC1S#
dtype: object
"""
table = str.maketrans(table)
return self._return_or_inplace(
cpp_translate(self._column, table), **kwargs
)
def normalize_spaces(self, **kwargs):
"""
Remove extra whitespace between tokens and trim whitespace
from the beginning and the end of each string.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> ser = cudf.Series(["hello \\t world"," test string "])
>>> ser.str.normalize_spaces()
0 hello world
1 test string
dtype: object
"""
return self._return_or_inplace(
cpp_normalize_spaces(self._column), **kwargs
)
def tokenize(self, delimiter=" ", **kwargs):
"""
Each string is split into tokens using the provided delimiter(s).
The sequence returned contains the tokens in the order
they were found.
Parameters
----------
delimiter : str or list of strs, Default is whitespace.
The string used to locate the split points of each string.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> data = ["hello world", "goodbye world", "hello goodbye"]
>>> ser = cudf.Series(data)
>>> ser.str.tokenize()
0 hello
1 world
2 goodbye
3 world
4 hello
5 goodbye
dtype: object
"""
delimiter = _massage_string_arg(delimiter, "delimiter", allow_col=True)
kwargs.setdefault("retain_index", False)
return self._return_or_inplace(
cpp_tokenize(self._column, delimiter), **kwargs
)
def character_tokenize(self, **kwargs):
"""
Each string is split into individual characters.
The sequence returned contains each character as an individual string.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> data = ["hello world", None, "goodbye, thank you."]
>>> ser = cudf.Series(data)
>>> ser.str.character_tokenize()
0 h
1 e
2 l
3 l
4 o
5
6 w
7 o
8 r
9 l
10 d
11 g
12 o
13 o
14 d
15 b
16 y
17 e
18 ,
19
20 t
21 h
22 a
23 n
24 k
25
26 y
27 o
28 u
29 .
dtype: object
"""
result_col = cpp_character_tokenize(self._column)
if self._parent is None:
return result_col
elif isinstance(self._parent, cudf.Series):
return cudf.Series(result_col, name=self._parent.name)
elif isinstance(self._parent, cudf.Index):
return cudf.core.index.as_index(result_col, name=self._parent.name)
def token_count(self, delimiter=" ", **kwargs):
"""
Each string is split into tokens using the provided delimiter.
The returned integer sequence is the number of tokens in each string.
Parameters
----------
delimiter : str or list of strs, Default is whitespace.
The characters or strings used to locate the
split points of each string.
Returns
-------
Series or Index.
Examples
--------
>>> import cudf
>>> ser = cudf.Series(["hello world","goodbye",""])
>>> ser.str.token_count()
0 2
1 1
2 0
dtype: int32
"""
delimiter = _massage_string_arg(delimiter, "delimiter", allow_col=True)
return self._return_or_inplace(
cpp_count_tokens(self._column, delimiter), **kwargs
)
def ngrams(self, n=2, separator="_", **kwargs):
"""
Generate the n-grams from a set of tokens, each record
in series is treated a token.
You can generate tokens from a Series instance using
the ``Series.str.tokenize()`` function.
Parameters
----------
n : int
The degree of the n-gram (number of consecutive tokens).
Default of 2 for bigrams.
separator : str
The separator to use between within an n-gram.
Default is '_'.
Examples
--------
>>> import cudf
>>> str_series = cudf.Series(['this is my', 'favorite book'])
>>> str_series = cudf.Series(['this is my', 'favorite book'])
>>> str_series.str.ngrams(2, "_")
0 this is my_favorite book
dtype: object
>>> str_series = cudf.Series(['abc','def','xyz','hhh'])
>>> str_series.str.ngrams(2, "_")
0 abc_def
1 def_xyz
2 xyz_hhh
dtype: object
"""
separator = _massage_string_arg(separator, "separator")
kwargs.setdefault("retain_index", False)
return self._return_or_inplace(
cpp_generate_ngrams(self._column, n, separator), **kwargs
)
def character_ngrams(self, n=2, **kwargs):
"""
Generate the n-grams from characters in a column of strings.
Parameters
----------
n : int
The degree of the n-gram (number of consecutive characters).
Default of 2 for bigrams.
Examples
--------
>>> import cudf
>>> str_series = cudf.Series(['abcd','efgh','xyz'])
>>> str_series.str.character_ngrams(2)
0 ab
1 bc
2 cd
3 ef
4 fg
5 gh
6 xy
7 yz
dtype: object
>>> str_series.str.character_ngrams(3)
0 abc
1 bcd
2 efg
3 fgh
4 xyz
dtype: object
"""
kwargs.setdefault("retain_index", False)
return self._return_or_inplace(
cpp_generate_character_ngrams(self._column, n), **kwargs
)
def ngrams_tokenize(self, n=2, delimiter=" ", separator="_", **kwargs):
"""
Generate the n-grams using tokens from each string.
This will tokenize each string and then generate ngrams for each
string.
Parameters
----------
n : int, Default 2.
The degree of the n-gram (number of consecutive tokens).
delimiter : str, Default is white-space.
The character used to locate the split points of each string.
sep : str, Default is '_'.
The separator to use between tokens within an n-gram.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> ser = cudf.Series(['this is the', 'best book'])
>>> ser.str.ngrams_tokenize(n=2, sep='_')
0 this_is
1 is_the
2 best_book
dtype: object
"""
delimiter = _massage_string_arg(delimiter, "delimiter")
separator = _massage_string_arg(separator, "separator")
kwargs.setdefault("retain_index", False)
return self._return_or_inplace(
cpp_ngrams_tokenize(self._column, n, delimiter, separator),
**kwargs,
)
def replace_tokens(self, targets, replacements, delimiter=None, **kwargs):
"""
The targets tokens are searched for within each string in the series
and replaced with the corresponding replacements if found.
Tokens are identified by the delimiter character provided.
Parameters
----------
targets : array-like, Sequence or Series
The tokens to search for inside each string.
replacements : array-like, Sequence, Series or str
The strings to replace for each found target token found.
Alternately, this can be a single str instance and would be
used as replacement for each string found.
delimiter : str
The character used to locate the tokens of each string.
Default is whitespace.
Returns
-------
Series or Index of object.
Examples
--------
>>> import cudf
>>> sr = cudf.Series(["this is me", "theme music", ""])
>>> targets = cudf.Series(["is", "me"])
>>> sr.str.replace_tokens(targets=targets, replacements="_")
0 this _ _
1 theme music
2
dtype: object
>>> sr = cudf.Series(["this;is;me", "theme;music", ""])
>>> sr.str.replace_tokens(targets=targets, replacements=":")
0 this;is;me
1 theme;music
2
dtype: object
"""
if can_convert_to_column(targets):
targets_column = column.as_column(targets)
else:
raise TypeError(
f"targets should be an array-like or a Series object, "
f"found {type(targets)}"
)
if is_scalar(replacements):
replacements_column = column.as_column([replacements])
elif can_convert_to_column(replacements):
replacements_column = column.as_column(replacements)
if len(targets_column) != len(replacements_column):
raise ValueError(
"targets and replacements should be same size"
" sequences unless replacements is a string."
)
else:
raise TypeError(
f"replacements should be an str, array-like or Series object, "
f"found {type(replacements)}"
)
if delimiter is None:
delimiter = ""
elif not is_scalar(delimiter):
raise TypeError(
f"Type of delimiter should be a string,"
f" found {type(delimiter)}"
)
return self._return_or_inplace(
cpp_replace_tokens(
self._column,
targets_column,
replacements_column,
as_scalar(delimiter, dtype="str"),
),
**kwargs,
)
def subword_tokenize(
self,
hash_file,
max_length=64,
stride=48,
do_lower=True,
do_truncate=False,
max_num_strings=100,
max_num_chars=100000,
max_rows_tensor=500,
**kwargs,
):
"""
Run CUDA BERT subword tokenizer on cuDF strings column.
Encodes words to token ids using vocabulary from a pretrained
tokenizer.
Parameters
----------
hash_file : str
Path to hash file containing vocabulary of words with token-ids.
max_length : int, Default is 64
Limits the length of the sequence returned.
If tokenized string is shorter than max_length,
output will be padded with 0s.
If the tokenized string is longer than max_length and
do_truncate == False, there will be multiple returned
sequences containing the overflowing token-ids.
stride : int, Default is 48
If do_truncate == False and the tokenized string is larger
than max_length, the sequences containing the overflowing
token-ids can contain duplicated token-ids from the main
sequence. If max_length is equal to stride there are no
duplicated-id tokens. If stride is 80% of max_length,
20% of the first sequence will be repeated on the second
sequence and so on until the entire sentence is encoded.
do_lower : bool, Default is True
If set to true, original text will be lowercased before encoding.
do_truncate : bool, Default is False
If set to true, strings will be truncated and padded to
max_length. Each input string will result in exactly one output
sequence. If set to false, there may be multiple output
sequences when the max_length is smaller than generated tokens.
max_num_strings : int, Default is 100
The maximum number of strings to be encoded.
max_num_chars : int, Default is 100000
The maximum number of characters in the input strings column.
max_rows_tensor : int, Default is 500
The maximum number of rows in the output
Returns
-------
token-ids : Column
The token-ids for each string padded with 0s to max_length.
attention-mask : Column
The mask for token-ids result where corresponding positions
identify valid token-id values.
metadata : Column
Each row contains the index id of the original string and the
first and last index of the token-ids that are non-padded and
non-overlapping.
Examples
--------
>>> import cudf
>>> ser = cudf.Series(['this is the', 'best book'])
>>> tokens, masks, metadata =
ser.str.subword_tokenize("bert_hash_table.txt")
"""
tokens, masks, metadata = cpp_subword_tokenize(
self._column,
hash_file,
max_length,
stride,
do_lower,
do_truncate,
max_num_strings,
max_num_chars,
max_rows_tensor,
)
return (
cupy.asarray(tokens),
cupy.asarray(masks),
cupy.asarray(metadata),
)
def _massage_string_arg(value, name, allow_col=False):
if isinstance(value, str):
return as_scalar(value, dtype="str")
if isinstance(value, Scalar) and is_string_dtype(value.dtype):
return value
allowed_types = ["Scalar"]
if allow_col:
if isinstance(value, list):
return column.as_column(value, dtype="str")
if isinstance(value, Column) and is_string_dtype(value.dtype):
return value
allowed_types.append("Column")
raise ValueError(
"Expected {} for {} but got {}".format(
_expected_types_format(allowed_types), name, type(value)
)
)
def _expected_types_format(types):
if len(types) == 1:
return types[0]
return ", ".join(types[:-1]) + ", or " + types[-1]
class StringColumn(column.ColumnBase):
"""Implements operations for Columns of String type
"""
def __init__(
self, mask=None, size=None, offset=0, null_count=None, children=()
):
"""
Parameters
----------
mask : Buffer
The validity mask
offset : int
Data offset
children : Tuple[Column]
Two non-null columns containing the string data and offsets
respectively
"""
dtype = np.dtype("object")
if size is None:
for child in children:
assert child.offset == 0
if len(children) == 0:
size = 0
elif children[0].size == 0:
size = 0
else:
# one less because the last element of offsets is the number of
# bytes in the data buffer
size = children[0].size - 1
size = size - offset
super().__init__(
None,
size,
dtype,
mask=mask,
offset=offset,
null_count=null_count,
children=children,
)
@property
def base_size(self):
if len(self.base_children) == 0:
return 0
else:
return int(
(self.base_children[0].size - 1)
/ self.base_children[0].dtype.itemsize
)
def sum(self, dtype=None):
return self.str().cat()
def product(self, dtype=None):
raise TypeError("can't multiply sequence by non-int of type 'object'")
def mean(self, dtype=np.float64):
raise NotImplementedError(
"mean for Series of type 'object' is not yet implemented."
)
def var(self, ddof=1, dtype=np.float64):
raise TypeError("unsupported operation for object of type 'object'")
def std(self, ddof=1, dtype=np.float64):
raise TypeError("unsupported operation for object of type 'object'")
def set_base_data(self, value):
if value is not None:
raise RuntimeError(
"StringColumns do not use data attribute of Column, use "
"`set_base_children` instead"
)
else:
super().set_base_data(value)
def set_base_mask(self, value):
super().set_base_mask(value)
def set_base_children(self, value):
# TODO: Implement dtype validation of the children here somehow
super().set_base_children(value)
@property
def children(self):
if self._children is None:
if len(self.base_children) == 0:
self._children = ()
elif self.offset == 0 and self.base_children[0].size == (
self.size + 1
):
self._children = self.base_children
else:
# First get the base columns for chars and offsets
chars_column = self.base_children[1]
offsets_column = self.base_children[0]
# Shift offsets column by the parent offset.
offsets_column = column.build_column(
data=offsets_column.base_data,
dtype=offsets_column.dtype,
mask=offsets_column.base_mask,
size=self.size + 1,
offset=self.offset,
)
# Now run a subtraction binary op to shift all of the offsets
# by the respective number of characters relative to the
# parent offset
chars_offset = libcudf.copying.get_element(offsets_column, 0)
offsets_column = offsets_column.binary_operator(
"sub", chars_offset
)
# Shift the chars offset by the new first element of the
# offsets column
chars_size = libcudf.copying.get_element(
offsets_column, self.size
)
chars_column = column.build_column(
data=chars_column.base_data,
dtype=chars_column.dtype,
mask=chars_column.base_mask,
size=chars_size.value,
offset=chars_offset.value,
)
self._children = (offsets_column, chars_column)
return self._children
def __contains__(self, item):
return True in self.str().contains(f"^{item}$")
def str(self, parent=None):
return StringMethods(self, parent=parent)
def __sizeof__(self):
n = 0
if len(self.base_children) == 2:
n += (
self.base_children[0].__sizeof__()
+ self.base_children[1].__sizeof__()
)
if self.base_mask is not None:
n += self.base_mask.size
return n
def _memory_usage(self, **kwargs):
return self.__sizeof__()
def unary_operator(self, unaryop):
raise TypeError(
f"Series of dtype `str` cannot perform the operation: "
f"{unaryop}"
)
def __len__(self):
return self.size
def _set_mask(self, value):
super()._set_mask(value)
@property
def _nbytes(self):
if self.size == 0:
return 0
else:
return self.children[1].size
def as_numerical_column(self, dtype, **kwargs):
out_dtype = np.dtype(dtype)
kwargs.update(dtype=out_dtype)
if out_dtype.type is np.datetime64:
if "format" not in kwargs:
if len(self) > 0:
# infer on host from the first not na element
fmt = datetime.infer_format(self[self.notna()][0])
kwargs.update(format=fmt)
# Check for None strings
if len(self) > 0 and self.binary_operator("eq", "None").any():
raise ValueError("Could not convert `None` value to datetime")
boolean_match = self.binary_operator("eq", "NaT")
elif out_dtype.kind in {"i", "u"}:
if not cpp_is_integer(self).all():
raise ValueError(
"Could not convert strings to integer \
type due to presence of non-integer values."
)
elif out_dtype.kind == "f":
if not cpp_is_float(self).all():
raise ValueError(
"Could not convert strings to float \
type due to presence of non-floating values."
)
result_col = _str_to_numeric_typecast_functions[out_dtype](
self, **kwargs
)
if (out_dtype.type is np.datetime64) and boolean_match.any():
result_col[boolean_match] = None
return result_col
def as_datetime_column(self, dtype, **kwargs):
return self.as_numerical_column(dtype, **kwargs)
def as_string_column(self, dtype, **kwargs):
return self
def to_arrow(self):
if len(self) == 0:
sbuf = np.empty(0, dtype="int8")
obuf = np.empty(0, dtype="int32")
nbuf = None
else:
sbuf = self.children[1].data.to_host_array().view("int8")
obuf = self.children[0].data.to_host_array().view("int32")
nbuf = None
if self.null_count > 0:
nbuf = self.mask.to_host_array().view("int8")
nbuf = pa.py_buffer(nbuf)
sbuf = pa.py_buffer(sbuf)
obuf = pa.py_buffer(obuf)
if self.null_count == len(self):
return pa.NullArray.from_buffers(
pa.null(), len(self), [pa.py_buffer((b""))], self.null_count
)
else:
return pa.StringArray.from_buffers(
len(self), obuf, sbuf, nbuf, self.null_count
)
def to_pandas(self, index=None):
pd_series = self.to_arrow().to_pandas()
if index is not None:
pd_series.index = index
return pd_series
def to_array(self, fillna=None):
"""Get a dense numpy array for the data.
Notes
-----
if ``fillna`` is ``None``, null values are skipped. Therefore, the
output size could be smaller.
Raises
------
``NotImplementedError`` if there are nulls
"""
if fillna is not None:
warnings.warn("fillna parameter not supported for string arrays")
return self.to_arrow().to_pandas().values
def __array__(self, dtype=None):
raise TypeError(
"Implicit conversion to a host NumPy array via __array__ is not "
"allowed, Conversion to GPU array in strings is not yet "
"supported.\nTo explicitly construct a host array, "
"consider using .to_array()"
)
def __arrow_array__(self, type=None):
raise TypeError(
"Implicit conversion to a host PyArrow Array via __arrow_array__ "
"is not allowed, To explicitly construct a PyArrow Array, "
"consider using .to_arrow()"
)
def serialize(self):
header = {"null_count": self.null_count}
header["type-serialized"] = pickle.dumps(type(self))
frames = []
sub_headers = []
for item in self.children:
sheader, sframes = item.serialize()
sub_headers.append(sheader)
frames.extend(sframes)
if self.null_count > 0:
frames.append(self.mask)
header["subheaders"] = sub_headers
header["frame_count"] = len(frames)
return header, frames
@classmethod
def deserialize(cls, header, frames):
# Deserialize the mask, value, and offset frames
buffers = [Buffer(each_frame) for each_frame in frames]
if header["null_count"] > 0:
nbuf = buffers[2]
else:
nbuf = None
children = []
for h, b in zip(header["subheaders"], buffers[:2]):
column_type = pickle.loads(h["type-serialized"])
children.append(column_type.deserialize(h, [b]))
col = column.build_column(
data=None, dtype="str", mask=nbuf, children=tuple(children)
)
return col
def can_cast_safely(self, to_dtype):
to_dtype = np.dtype(to_dtype)
if self.dtype == to_dtype:
return True
elif to_dtype.kind in {"i", "u"} and not cpp_is_integer(self).all():
return False
elif to_dtype.kind == "f" and not cpp_is_float(self).all():
return False
else:
return True
def find_and_replace(self, to_replace, replacement, all_nan):
"""
Return col with *to_replace* replaced with *value*
"""
to_replace = column.as_column(to_replace, dtype=self.dtype)
replacement = column.as_column(replacement, dtype=self.dtype)
return libcudf.replace.replace(self, to_replace, replacement)
def fillna(self, fill_value):
if not is_scalar(fill_value):
fill_value = column.as_column(fill_value, dtype=self.dtype)
return libcudf.replace.replace_nulls(self, fill_value, dtype="object")
def _find_first_and_last(self, value):
found_indices = self.str().contains(f"^{value}$")
found_indices = libcudf.unary.cast(found_indices, dtype=np.int32)
first = column.as_column(found_indices).find_first_value(1)
last = column.as_column(found_indices).find_last_value(1)
return first, last
def find_first_value(self, value, closest=False):
return self._find_first_and_last(value)[0]
def find_last_value(self, value, closest=False):
return self._find_first_and_last(value)[1]
def normalize_binop_value(self, other):
if isinstance(other, column.Column):
return other.astype(self.dtype)
elif isinstance(other, str) or other is None:
col = utils.scalar_broadcast_to(
other, size=len(self), dtype="object"
)
return col
else:
raise TypeError("cannot broadcast {}".format(type(other)))
def default_na_value(self):
return None
def binary_operator(self, op, rhs, reflect=False):
lhs = self
if reflect:
lhs, rhs = rhs, lhs
if isinstance(rhs, StringColumn) and op == "add":
return lhs.str().cat(others=rhs)
elif op in ("eq", "ne", "gt", "lt", "ge", "le"):
return _string_column_binop(self, rhs, op=op, out_dtype="bool")
else:
msg = "{!r} operator not supported between {} and {}"
raise TypeError(msg.format(op, type(self), type(rhs)))
@property
def is_unique(self):
return len(self.unique()) == len(self)
@property
def __cuda_array_interface__(self):
raise NotImplementedError(
"Strings are not yet supported via `__cuda_array_interface__`"
)
def _mimic_inplace(self, other_col, inplace=False):
out = super()._mimic_inplace(other_col, inplace=inplace)
return out
@copy_docstring(column.ColumnBase.view)
def view(self, dtype):
if self.null_count > 0:
raise ValueError(
"Can not produce a view of a string column with nulls"
)
dtype = np.dtype(dtype)
str_byte_offset = self.base_children[0][self.offset]
str_end_byte_offset = self.base_children[0][self.offset + self.size]
char_dtype_size = self.base_children[1].dtype.itemsize
n_bytes_to_view = (
str_end_byte_offset - str_byte_offset
) * char_dtype_size
to_view = column.build_column(
self.base_children[1].data,
dtype=self.base_children[1].dtype,
offset=str_byte_offset,
size=n_bytes_to_view,
)
return to_view.view(dtype)
@annotate("BINARY_OP", color="orange", domain="cudf_python")
def _string_column_binop(lhs, rhs, op, out_dtype):
out = libcudf.binaryop.binaryop(lhs=lhs, rhs=rhs, op=op, dtype=out_dtype)
return out
def _get_cols_list(others):
if (
can_convert_to_column(others)
and len(others) > 0
and (
can_convert_to_column(
others.iloc[0]
if isinstance(others, cudf.Series)
else others[0]
)
)
):
"""
If others is a list-like object (in our case lists & tuples)
just another Series/Index, great go ahead with concatenation.
"""
cols_list = [column.as_column(frame, dtype="str") for frame in others]
return cols_list
elif others is not None:
return [column.as_column(others, dtype="str")]
else:
raise TypeError(
"others must be Series, Index, DataFrame, np.ndarrary "
"or list-like (either containing only strings or "
"containing only objects of type Series/Index/"
"np.ndarray[1-dim])"
)
| 30.086291
| 119
| 0.534075
|
6e8cc35961241be2a921d45cead66fa77614f1ea
| 3,987
|
py
|
Python
|
layers/layer1_python3/0200_mflog/mflog/__init__.py
|
thefab/mfcom-1
|
f6a209edb22a782dfb0cf63cc0f62433b1b6e961
|
[
"BSD-3-Clause"
] | null | null | null |
layers/layer1_python3/0200_mflog/mflog/__init__.py
|
thefab/mfcom-1
|
f6a209edb22a782dfb0cf63cc0f62433b1b6e961
|
[
"BSD-3-Clause"
] | null | null | null |
layers/layer1_python3/0200_mflog/mflog/__init__.py
|
thefab/mfcom-1
|
f6a209edb22a782dfb0cf63cc0f62433b1b6e961
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import logging
import logging.config
import json
from jinja2 import Template
MFLOG_DEFAULT_CONFIG_PATH = \
os.path.join(os.environ.get('MFCOM_HOME', ''), "config",
"python_default_logging.json")
MFLOG_CONFIG_PATH = \
os.path.join(os.environ.get('MODULE_HOME', ''), "config",
"python_logging.json")
MODULE = os.environ.get('MODULE')
MODULE_LOG_DEFAULT_LEVEL_VAR = "%s_LOG_DEFAULT_LEVEL" % MODULE
MODULE_LOG_STDOUT_VAR = "%s_LOG_STDOUT" % MODULE
MODULE_LOG_STDERR_VAR = "%s_LOG_STDERR" % MODULE
MODULE_LOG_EXTERNAL_MONITORING_FILE_VAR = \
"%s_LOG_EXTERNAL_MONITORING_FILE" % MODULE
MODULE_LOG_EXTERNAL_MONITORING_LEVEL_VAR = \
"%s_LOG_EXTERNAL_MONITORING_LEVEL" % MODULE
MODULE_LOG_EXTERNAL_MONITORING_FORMATTER_VAR = \
"%s_LOG_EXTERNAL_MONITORING_FORMATTER" % MODULE
def __get_jinja2_env():
module_log_default_level = \
os.environ.get(MODULE_LOG_DEFAULT_LEVEL_VAR,
'NOTSET')
module_log_stdout = os.environ.get(MODULE_LOG_STDOUT_VAR,
'ext://sys.stdout')
module_log_stderr = os.environ.get(MODULE_LOG_STDERR_VAR,
'ext://sys.stderr')
module_log_em_file = \
os.environ.get(MODULE_LOG_EXTERNAL_MONITORING_FILE_VAR, "null")
module_log_em_level = \
os.environ.get(MODULE_LOG_EXTERNAL_MONITORING_LEVEL_VAR, "CRITICAL")
module_log_em_formatter = \
os.environ.get(MODULE_LOG_EXTERNAL_MONITORING_FORMATTER_VAR, "metwork")
jinja2_env = {
'MODULE_LOG_DEFAULT_LEVEL': module_log_default_level,
'MODULE_LOG_STDOUT': module_log_stdout,
'MODULE_LOG_STDERR': module_log_stderr,
'MODULE_LOG_EXTERNAL_MONITORING_FILE': module_log_em_file,
'MODULE_LOG_EXTERNAL_MONITORING_LEVEL': module_log_em_level,
'MODULE_LOG_EXTERNAL_MONITORING_FORMATTER': module_log_em_formatter
}
jinja2_env.update(os.environ)
return jinja2_env
def set_logging_config():
"""Set the metwork logging config.
If the env var MFLOG_DEBUG_CONFIGURATION is set to 1,
you have a debug output on stdout on the full configuration
applied with logging.config.dictConfig.
"""
with open(MFLOG_DEFAULT_CONFIG_PATH, 'r') as f:
default_config_content = f.read()
template = Template(default_config_content)
jinja2_env = __get_jinja2_env()
default_config_content = template.render(jinja2_env)
overriden_config_content = '{}'
try:
with open(MFLOG_CONFIG_PATH, 'r') as f:
overriden_config_content = f.read()
template = Template(overriden_config_content)
overriden_config_content = template.render(jinja2_env)
except IOError:
pass
try:
config_dict = json.loads(default_config_content)
except ValueError:
print("BAD DEFAULT LOGGING CONFIG")
os._exit(3)
try:
overriden_config = json.loads(overriden_config_content)
except ValueError:
print("BAD LOGGING CONFIG")
os._exit(3)
for key in ('formatters', 'handlers', 'loggers', 'filters'):
try:
config_dict[key].update(overriden_config[key])
except Exception:
pass
if int(os.environ.get('MFLOG_DEBUG_CONFIGURATION', '0')) == 1:
print("WE ARE GOING TO SET PYTHON LOGGING CONFIGURATION:")
print("=================================================")
print(json.dumps(config_dict, indent=4))
print("=================================================")
logging.config.dictConfig(config_dict)
# IMPORTANT LINE : set logging config at import
set_logging_config()
def getLogger(*args, **kwargs):
"""Return a python logging logger.
This function is just a wrapper.
But by importing and using this one (and not directly logging.getLogger),
you are sure that the logging config is set.
"""
return logging.getLogger(*args, **kwargs)
| 36.577982
| 79
| 0.672686
|
f9a4f680b0d07eeb51492fe707dbb2cdf25367fc
| 8,281
|
py
|
Python
|
plot_helper.py
|
nadanai263/MCMC_emcee_intro
|
a0664ad39dff46bdb660b355abe7ec19b92134d7
|
[
"MIT"
] | 1
|
2019-03-01T08:05:43.000Z
|
2019-03-01T08:05:43.000Z
|
plot_helper.py
|
nadanai263/MCMC_emcee_intro
|
a0664ad39dff46bdb660b355abe7ec19b92134d7
|
[
"MIT"
] | null | null | null |
plot_helper.py
|
nadanai263/MCMC_emcee_intro
|
a0664ad39dff46bdb660b355abe7ec19b92134d7
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib.ticker import MaxNLocator
import cycler as cycler
def plot1(x,y,xlabel='x',ylabel='y',legend=False,title=False):
plt.close("all")
my_dpi=150
figure_options={'figsize':(8.27,5.83)} #figure size in inches. A4=11.7x8.3, A5=8.27x5.83
font_options={'size':'20','family':'sans-serif','sans-serif':'Arial'}
plt.rc('figure', **figure_options)
plt.rc('font', **font_options)
# get colormap
cmap=plt.cm.Set1
# build cycler with 5 equally spaced colors from that colormap,supply cycler to the rcParam
plt.rcParams["axes.prop_cycle"] = cycler.cycler('color', cmap(np.linspace(0,1,9)) )
f, axarr=plt.subplots()
axarr.plot(x,y,'-')
formatplot(axarr,xlabel,ylabel,legend,False,False,False,False,False,title)
plt.title(title)
plt.show()
def plot2(x,y,xobs,yobs,yerr,xlabel='x',ylabel='y',legend=False,title=False):
plt.close("all")
my_dpi=150
figure_options={'figsize':(8.27,5.83)} #figure size in inches. A4=11.7x8.3, A5=8.27x5.83
font_options={'size':'20','family':'sans-serif','sans-serif':'Arial'}
plt.rc('figure', **figure_options)
plt.rc('font', **font_options)
# get colormap
cmap=plt.cm.Set1
# build cycler with 5 equally spaced colors from that colormap,supply cycler to the rcParam
plt.rcParams["axes.prop_cycle"] = cycler.cycler('color', cmap(np.linspace(0,1,9)) )
f, axarr=plt.subplots()
axarr.plot(x,y,'-')
axarr.errorbar(xobs,yobs,yerr=yerr,fmt='o')
formatplot(axarr,xlabel,ylabel,legend,False,False,False,False,False,title)
plt.show()
def plot3(x1,y1,x2,y2,xlabel='x',ylabel='y',legend=False,title=False):
plt.close("all")
my_dpi=150
figure_options={'figsize':(18,5.83)} #figure size in inches. A4=11.7x8.3, A5=8.27x5.83
font_options={'size':'20','family':'sans-serif','sans-serif':'Arial'}
plt.rc('figure', **figure_options)
plt.rc('font', **font_options)
# get colormap
cmap=plt.cm.Set1
# build cycler with 5 equally spaced colors from that colormap,supply cycler to the rcParam
plt.rcParams["axes.prop_cycle"] = cycler.cycler('color', cmap(np.linspace(0,1,9)) )
f, axarr=plt.subplots(1,2)
axarr[0].plot(x1,y1,'-')
axarr[1].plot(x2,y2,'-')
formatplot(axarr[0],'$y_{max}$',ylabel,legend,False,False,False,False,False,title)
formatplot(axarr[1],'$K$',ylabel,legend,False,False,False,False,False,title)
plt.show()
def plot4(x1,y1,x2,y2,samples,xlabel='x',ylabel='y',legend=False,title=False):
plt.close("all")
my_dpi=150
figure_options={'figsize':(18,5.83)} #figure size in inches. A4=11.7x8.3, A5=8.27x5.83
font_options={'size':'20','family':'sans-serif','sans-serif':'Arial'}
plt.rc('figure', **figure_options)
plt.rc('font', **font_options)
# get colormap
cmap=plt.cm.Set1
# build cycler with 5 equally spaced colors from that colormap,supply cycler to the rcParam
plt.rcParams["axes.prop_cycle"] = cycler.cycler('color', cmap(np.linspace(0,1,9)) )
f, axarr=plt.subplots(1,2)
axarrt1=axarr[0].twinx()
axarrt2=axarr[1].twinx()
axarr[0].plot(x1,y1,'-')
axarrt1.hist(samples[:,0],bins=200,color='#0d76e8') #f55f4e red #0d76e8 blue
axarr[1].plot(x2,y2,'-')
axarrt2.hist(samples[:,1],bins=200,color='#0d76e8')
formatplot(axarr[0],'$y_{max}$',ylabel,legend,False,False,False,False,False,title)
formatplot(axarr[1],'$K$',False,legend,False,False,False,False,False,title)
plt.show()
def plot5(x,y,xobs,yobs,yerr,samples,xlabel='x',ylabel='y',legend=False,title=False):
plt.close("all")
my_dpi=150
figure_options={'figsize':(8.27,5.83)} #figure size in inches. A4=11.7x8.3, A5=8.27x5.83
font_options={'size':'20','family':'sans-serif','sans-serif':'Arial'}
plt.rc('figure', **figure_options)
plt.rc('font', **font_options)
# get colormap
cmap=plt.cm.Set1
# build cycler with 5 equally spaced colors from that colormap,supply cycler to the rcParam
plt.rcParams["axes.prop_cycle"] = cycler.cycler('color', cmap(np.linspace(0,1,9)) )
f, axarr=plt.subplots()
axarr.plot(x,y,'-')
axarr.errorbar(xobs,yobs,yerr=yerr,fmt='o')
xmod=np.linspace(0,10,50)
for ymax,K in samples[np.random.randint(len(samples), size=20)]:
ymod=ymax*xmod/(xmod+K)
axarr.plot(xmod,ymod,'k-',label='_nolegend_',alpha=0.1)
formatplot(axarr,xlabel,ylabel,legend,False,False,False,False,False,title)
plt.show()
def formatplot(ax,xlabel,ylabel,legend,xlim,ylim,logx,logy,logxy,title):
my_dpi=150
######### SET AXES LIMITS #########
if xlim!=False:
ax.set_xlim([0,35])
if ylim!=False:
ax.set_ylim([0.05,25])
######### SET TICK VALUES #########
ax.tick_params(axis='both',pad=10)
# ax.set_xticks([0,2e-5,4e-5,6e-5])
# ax.set_yticks([0,2,4,6,8])
######### SET TITLES AND LABLES #########
if title!=False:
ax.set_title(title)
if xlabel!=False:
ax.set_xlabel(xlabel, labelpad=12)
if ylabel!=False:
ax.set_ylabel(ylabel, labelpad=12)
######### SET LINE THICKNESSES #########
# ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter("%1.e"))
# ax.axhline(linewidth=2, color='k')
# ax.axvline(linewidth=2, color='k')
ax.spines['bottom'].set_linewidth(2)
ax.spines['top'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.spines['right'].set_linewidth(2)
######### SET TICKS #########
if logx==True:
ax.set_xscale("log")
elif logy==True:
ax.set_yscale("log")
elif logxy==True:
ax.set_xscale("log")
ax.set_yscale("log")
else:
minorLocatorx=AutoMinorLocator(2) # Number of minor intervals per major interval
minorLocatory=AutoMinorLocator(2)
ax.xaxis.set_minor_locator(minorLocatorx)
ax.yaxis.set_minor_locator(minorLocatory)
ax.tick_params(which='major', width=2, length=8, pad=9, direction='in')
ax.tick_params(which='minor', width=2, length=4, pad=9, direction='in')
######### CALL LEGEND #########
if legend==True:
ax.legend(loc='best', fontsize=22,numpoints=1)
def plottraces(data,parameternames,parametertruths,nwalkers,niterations,save=1):
numberofplots=data.shape[1]
plt.close("all")
my_dpi=150
figure_options={'figsize':(11.7,8.3)} #figure size in inches. A4=11.7x8.3.
font_options={'size':'6','family':'sans-serif','sans-serif':'Arial'}
plt.rc('figure', **figure_options)
plt.rc('font', **font_options)
# fig=plt.figure(); ax=fig.add_subplot(1,1,1)
######### CALL PLOTS #########
if numberofplots>1:
f, axarr=plt.subplots(numberofplots)
for i in range(0,numberofplots):
for j in range(1,nwalkers+1):
axarr[i].plot(np.arange(niterations),data[niterations*j-niterations:niterations*j,i],'k-',lw=0.5)
if parametertruths!=[]:
axarr[i].axhline(parametertruths[i], color="#888888", lw=2)
formatplottrace(axarr[i],parameternames[i])
else:
f, axarr=plt.subplots()
for i in range(1,nwalkers+1):
axarr.plot(np.arange(niterations),data[niterations*i-niterations:niterations*i],'k-',lw=0.5)
if parametertruths!=[]:
axarr.axhline(parametertruths[0], color="#888888", lw=2)
formatplottrace(axarr,parameternames[0])
######### SAVE PLOT #########
if save==True:
print()
print('Saving file...')
print()
plt.savefig('plots/trace.png',dpi=my_dpi,bbox_inches='tight')
else:
plt.show()
def formatplottrace(ax,parametername):
######### SET TICK VALUES #########
ax.tick_params(axis='both',pad=10)
# ax.set_xticks([0,2e-5,4e-5,6e-5])
# ax.set_yticks([0,2,4,6,8])
######### SET TITLES AND LABLES #########
#ax.set_title('Plot title')
# ax.set_xlabel('x', labelpad=12)
ax.set_ylabel(parametername, labelpad=12)
######### SET LINE THICKNESSES #########
# ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter("%1.e"))
# ax.axhline(linewidth=2, color='k')
# ax.axvline(linewidth=2, color='k')
ax.spines['bottom'].set_linewidth(1)
ax.spines['top'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(1)
######### SET TICKS #########
minorLocatorx=AutoMinorLocator(2) # Number of minor intervals per major interval
minorLocatory=AutoMinorLocator(2)
ax.xaxis.set_minor_locator(minorLocatorx)
ax.yaxis.set_minor_locator(minorLocatory)
ax.tick_params(which='major', width=1, length=8, pad=9)
ax.tick_params(which='minor', width=1, length=4, pad=9)
######### CALL LEGEND #########
# ax.legend(loc='best', fontsize=22,numpoints=1)
| 28.653979
| 101
| 0.689168
|
a8954abba24f193f27a2cfc877c344130ea80b21
| 5,035
|
py
|
Python
|
plasmapy/plasma/sources/plasmablob.py
|
Quettle/PlasmaPy
|
9689c83b991832c32158cca8b3f94525b59bde18
|
[
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | 2
|
2020-09-16T08:53:45.000Z
|
2022-01-29T18:00:10.000Z
|
plasmapy/plasma/sources/plasmablob.py
|
RAJAGOPALAN-GANGADHARAN/PlasmaPy
|
6df9583cc47375687a07300c0aa11ba31634d770
|
[
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | null | null | null |
plasmapy/plasma/sources/plasmablob.py
|
RAJAGOPALAN-GANGADHARAN/PlasmaPy
|
6df9583cc47375687a07300c0aa11ba31634d770
|
[
"BSD-2-Clause",
"MIT",
"BSD-2-Clause-Patent",
"BSD-1-Clause",
"BSD-3-Clause"
] | null | null | null |
"""
Defines the core Plasma class used by PlasmaPy to represent plasma properties.
"""
__all__ = ["PlasmaBlob"]
import astropy.units as u
import warnings
from plasmapy.formulary.collisions import coupling_parameter
from plasmapy.formulary.dimensionless import quantum_theta
from plasmapy.formulary.parameters import _grab_charge
from plasmapy.particles import particle_mass
from plasmapy.plasma.plasma_base import GenericPlasma
from plasmapy.utils import code_repr
from plasmapy.utils.decorators import validate_quantities
from plasmapy.utils.exceptions import CouplingWarning
class PlasmaBlob(GenericPlasma):
"""
Class for describing and calculating plasma parameters without
spatial/temporal description.
"""
@validate_quantities(T_e=u.K, n_e=u.m ** -3)
def __init__(self, T_e, n_e, Z=None, particle="p"):
"""
Initialize plasma parameters.
The most basic description is composition (ion), temperature,
density, and ionization.
"""
self.T_e = T_e
self.n_e = n_e
self.particle = particle
self.Z = _grab_charge(particle, Z)
# extract mass from particle
self.ionMass = particle_mass(self.particle)
def __str__(self):
"""
Fetch regimes for easy printing.
Examples
--------
>>> print(PlasmaBlob(1e4*u.K, 1e20/u.m**3, particle='p'))
PlasmaBlob(T_e=10000.0*u.K, n_e=1e+20*u.m**-3, particle='p', Z=1)
Intermediate coupling regime: Gamma = 0.01250283...
Thermal kinetic energy dominant: Theta = 109690.5...
"""
return self.__repr__() + "\n" + "\n".join(self.regimes())
def __repr__(self):
"""
Return a string representation of this instance.
Returns
-------
str
Examples
--------
>>> from astropy import units as u
>>> PlasmaBlob(1e4*u.K, 1e20/u.m**3, particle='p')
PlasmaBlob(T_e=10000.0*u.K, n_e=1e+20*u.m**-3, particle='p', Z=1)
"""
argument_dict = {
"T_e": self.T_e,
"n_e": self.n_e,
"particle": self.particle,
"Z": self.Z,
}
return code_repr.call_string(PlasmaBlob, (), argument_dict)
@property
def electron_temperature(self):
return self.T_e
@property
def electron_density(self):
return self.n_e
@property
def ionization(self):
return self.Z
@property
def composition(self):
return self.particle
def regimes(self):
"""
Generate a comprehensive description of the plasma regimes
based on plasma properties and consequent plasma parameters.
"""
# getting dimensionless parameters
coupling = self.coupling()
quantum_theta = self.quantum_theta()
# determining regimes based off dimensionless parameters
# coupling
if coupling <= 0.01:
# weakly coupled
coupling_str = f"Weakly coupled regime: Gamma = {coupling}."
elif coupling >= 100:
# strongly coupled
coupling_str = f"Strongly coupled regime: Gamma = {coupling}."
else:
# intermediate regime
coupling_str = f"Intermediate coupling regime: Gamma = {coupling}."
# quantum_theta
if quantum_theta <= 0.01:
# Fermi energy dominant
quantum_theta_str = (
f"Fermi quantum energy dominant: Theta = {quantum_theta}"
)
elif quantum_theta >= 100:
# thermal kinetic energy dominant
quantum_theta_str = (
f"Thermal kinetic energy dominant: Theta = {quantum_theta}"
)
else:
# intermediate regime
quantum_theta_str = (
f"Both Fermi and thermal energy important: Theta = {quantum_theta}"
)
# summarizing and printing/returning regimes
aggregateStrs = [coupling_str, quantum_theta_str]
return aggregateStrs
def coupling(self):
"""
Ion-ion coupling parameter to determine if quantum/coupling effects
are important. This compares Coulomb potential energy to thermal
kinetic energy.
"""
couple = coupling_parameter(
self.T_e, self.n_e, (self.particle, self.particle), self.Z
)
if couple < 0.01:
warnings.warn(
f"Coupling parameter is {couple}, you might have strong coupling effects",
CouplingWarning,
)
return couple
def quantum_theta(self):
"""
Quantum theta parameter, which compares Fermi kinetic energy to
thermal kinetic energy to check if quantum effects are important.
"""
theta = quantum_theta(self.T_e, self.n_e)
return theta
@classmethod
def is_datasource_for(cls, **kwargs):
match = "T_e" in kwargs.keys() and "n_e" in kwargs.keys()
return match
| 31.080247
| 90
| 0.606356
|
320fccff94a8fef884c43348112506dff6e93e45
| 2,614
|
py
|
Python
|
omniapp/screens/mainScreen/mainScreen.py
|
omniaura/omnisynth-gui
|
a14c6fdeff6e10d65cbbd46f3c9d6929fc6231a2
|
[
"Apache-2.0"
] | null | null | null |
omniapp/screens/mainScreen/mainScreen.py
|
omniaura/omnisynth-gui
|
a14c6fdeff6e10d65cbbd46f3c9d6929fc6231a2
|
[
"Apache-2.0"
] | 1
|
2022-01-04T20:32:26.000Z
|
2022-01-04T20:32:26.000Z
|
omniapp/screens/mainScreen/mainScreen.py
|
omniaura/omnisynth-gui
|
a14c6fdeff6e10d65cbbd46f3c9d6929fc6231a2
|
[
"Apache-2.0"
] | null | null | null |
# Defining all the screens for ScreenManager
from omniapp.constants import OMNISYNTH_PATH
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.screenmanager import Screen
from kivy.properties import BooleanProperty
from kivy.lang import Builder
from kivy.uix.widget import WidgetException
import os
import time
# The main, landing screen of the app
class MainScreen(Screen):
def on_pre_enter(self):
omni = self.manager.omni_instance
omni.numPatch = len(self.manager.patch_list)
patch_select_list_layout = self.ids['patch_select_list_layout']
# make sure patch index is initialized, default to 0
omni.patchIndex = omni.patchIndex or 0
# call omni#synth_sel on our current patch
self.__omni_select_patch()
# set slot text
self.__set_slot_text()
# add slots
# handle exception if slots have already been added
# and bail out with a noop
try:
patch_select_list_layout.add_widget(self.manager.slots[0])
patch_select_list_layout.add_widget(self.manager.slots[1])
patch_select_list_layout.add_widget(self.manager.slots[2])
except WidgetException:
pass
def handle_up_button_release(self):
if self.manager.omni_instance.patchIndex == 0:
return
self.manager.omni_instance.patchIndex -= 1
self.__omni_select_patch()
self.__set_slot_text()
def handle_down_button_release(self):
if self.manager.omni_instance.patchIndex == (self.manager.omni_instance.numPatch-1):
return
self.manager.omni_instance.patchIndex += 1
self.__omni_select_patch()
self.__set_slot_text()
def __get_slot_text(self, patch_index):
if patch_index >= 0 and patch_index < self.manager.omni_instance.numPatch:
return str(self.manager.patch_list[patch_index])
else:
return ''
def __set_slot_text(self):
self.manager.slots[0].text = self.__get_slot_text(
self.manager.omni_instance.patchIndex - 1)
self.manager.slots[1].text = self.__get_slot_text(
self.manager.omni_instance.patchIndex)
self.manager.slots[2].text = self.__get_slot_text(
self.manager.omni_instance.patchIndex + 1)
def __omni_select_patch(self):
self.manager.omni_instance.synth_sel(
self.manager.patch_list[self.manager.omni_instance.patchIndex], OMNISYNTH_PATH)
| 35.324324
| 93
| 0.672915
|
15fecb3563eadd04bbf44a444340fd01ba16267a
| 675
|
py
|
Python
|
pyopendds/Subscriber.py
|
iguessthislldo/pyopendds
|
edc8f698f24728d1faca20c8039024739d7c2972
|
[
"MIT"
] | 5
|
2018-11-04T21:16:29.000Z
|
2019-12-24T02:24:43.000Z
|
pyopendds/Subscriber.py
|
iguessthislldo/pyopendds
|
edc8f698f24728d1faca20c8039024739d7c2972
|
[
"MIT"
] | 9
|
2019-07-19T05:39:30.000Z
|
2020-02-06T02:39:58.000Z
|
pyopendds/Subscriber.py
|
iguessthislldo/pyopendds
|
edc8f698f24728d1faca20c8039024739d7c2972
|
[
"MIT"
] | 2
|
2019-07-18T07:22:37.000Z
|
2019-07-26T03:13:41.000Z
|
from __future__ import annotations
from typing import TYPE_CHECKING, List
from .DataReader import DataReader
from .Topic import Topic
if TYPE_CHECKING:
from .DomainParticipant import DomainParticipant
class Subscriber:
def __init__(self, participant: DomainParticipant, qos=None, listener=None):
participant.subscribers.append(self)
self.qos = qos
self.listener = listener
self.readers: List[DataReader] = []
from _pyopendds import create_subscriber
create_subscriber(self, participant)
def create_datareader(self, topic: Topic, qos=None, listener=None):
return DataReader(self, topic, qos, listener)
| 29.347826
| 80
| 0.733333
|
529709ad6dc4bc319b8cdf0356178a48a56a805d
| 735
|
py
|
Python
|
care/base/context_processors.py
|
tobast/care
|
03dd4a5095b5f7c53e53fc771a31b149876e4b50
|
[
"MIT"
] | 1
|
2021-11-06T12:59:07.000Z
|
2021-11-06T12:59:07.000Z
|
care/base/context_processors.py
|
tobast/care
|
03dd4a5095b5f7c53e53fc771a31b149876e4b50
|
[
"MIT"
] | null | null | null |
care/base/context_processors.py
|
tobast/care
|
03dd4a5095b5f7c53e53fc771a31b149876e4b50
|
[
"MIT"
] | null | null | null |
from care.userprofile.models import UserProfile
from care.groupaccountinvite.models import GroupAccountInvite
def base_context(request):
""" Add some basic context to every page """
context = {}
if request.user.is_authenticated:
user_profile = UserProfile.objects.get(user=request.user)
invites = GroupAccountInvite.objects.filter(
invitee=user_profile, isAccepted=False, isDeclined=False
)
context["user"] = request.user
context["userprofile"] = user_profile
context["hasInvites"] = invites.exists()
context["nInvites"] = invites.count()
context["displayname"] = user_profile.displayname
context["isLoggedin"] = True
return context
| 36.75
| 68
| 0.687075
|
f74f31c008764067d236e426a04117c1f847fea3
| 22,768
|
py
|
Python
|
tests/test_macsec.py
|
superchild/sonic-swss
|
0f069108215ec5cb456e87554309ee99febb302d
|
[
"Apache-2.0"
] | null | null | null |
tests/test_macsec.py
|
superchild/sonic-swss
|
0f069108215ec5cb456e87554309ee99febb302d
|
[
"Apache-2.0"
] | 1
|
2022-01-21T20:05:43.000Z
|
2022-01-25T19:17:40.000Z
|
tests/test_macsec.py
|
superchild/sonic-swss
|
0f069108215ec5cb456e87554309ee99febb302d
|
[
"Apache-2.0"
] | null | null | null |
from swsscommon import swsscommon
import conftest
import sys
import functools
import typing
import re
import time
def to_string(value):
if isinstance(value, bool):
return "true" if value else "false"
return str(value)
class Table(object):
def __init__(self, database: conftest.DVSDatabase, table_name: str):
self.db = database
self.table_name = table_name
def convert_key(self, key: str):
return key
def __setitem__(self, key: str, pairs: dict):
pairs_str = {}
for k, v in pairs.items():
pairs_str[to_string(k)] = to_string(v)
key = self.convert_key(key)
if self.__getitem__(key) is None:
self.db.create_entry(self.table_name, key, pairs_str)
else:
self.db.update_entry(self.table_name, key, pairs_str)
def __getitem__(self, key: str):
key = self.convert_key(key)
return self.db.get_entry(self.table_name, key)
def __delitem__(self, key: str):
key = self.convert_key(key)
self.db.delete_entry(self.table_name, key)
def wait(self, key: str):
key = self.convert_key(key)
# return True
return self.db.wait_for_entry(self.table_name, key)
def wait_delete(self, key: str):
key = self.convert_key(key)
# return True
return self.db.wait_for_deleted_entry(self.table_name, key)
class ProduceStateTable(object):
def __init__(self, database: conftest.DVSDatabase, table_name: str):
self.table = swsscommon.ProducerStateTable(
database.db_connection,
table_name)
def __setitem__(self, key: str, pairs: typing.Union[dict, list, tuple]):
pairs_str = []
if isinstance(pairs, dict):
pairs = pairs.items()
for k, v in pairs:
pairs_str.append((to_string(k), to_string(v)))
self.table.set(key, pairs_str)
def __delitem__(self, key: str):
self.table.delete(key)
class AppDBTable(ProduceStateTable):
SEPARATOR = ":"
def __init__(self, dvs: conftest.DockerVirtualSwitch, table_name: str):
super(AppDBTable, self).__init__(dvs.get_app_db(), table_name)
class StateDBTable(Table):
SEPARATOR = "|"
def __init__(self, dvs: conftest.DockerVirtualSwitch, table_name: str):
super(StateDBTable, self).__init__(dvs.get_state_db(), table_name)
def convert_key(self, key: str):
return key.translate(
str.maketrans(
AppDBTable.SEPARATOR,
StateDBTable.SEPARATOR))
def gen_sci(macsec_system_identifier: str, macsec_port_identifier: int) -> str:
macsec_system_identifier = macsec_system_identifier.translate(
str.maketrans("", "", ":.-"))
sci = "{}{}".format(
macsec_system_identifier,
str(macsec_port_identifier).zfill(4))
sci = int(sci, 16)
if sys.byteorder == "little":
sci = int.from_bytes(sci.to_bytes(8, 'big'), 'little', signed=False)
return str(sci)
def gen_sc_key(
separator: str,
port_name: str,
macsec_system_identifier: str,
macsec_port_identifier: int) -> str:
sci = gen_sci(macsec_system_identifier, macsec_port_identifier)
key = "{}{}{}".format(
port_name,
separator,
sci)
return key
def gen_sa_key(
separator: str,
port_name: str,
macsec_system_identifier: str,
macsec_port_identifier: int,
an: int):
sc_key = gen_sc_key(
separator,
port_name,
macsec_system_identifier,
macsec_port_identifier)
key = "{}{}{}".format(sc_key, separator, an)
return key
def macsec_sc(separator: str = AppDBTable.SEPARATOR):
def inner(func: typing.Callable) -> typing.Callable:
@functools.wraps(func)
def wrap_func(
self,
port_name: str,
macsec_system_identifier: str,
macsec_port_identifier: int,
*args,
**kwargs) -> typing.Any:
key = gen_sc_key(
separator,
port_name,
macsec_system_identifier,
macsec_port_identifier)
return func(self, key, *args, **kwargs)
return wrap_func
return inner
def macsec_sa(separator: str = AppDBTable.SEPARATOR):
def inner(func: typing.Callable) -> typing.Callable:
@functools.wraps(func)
def wrap_func(
self,
port_name: str,
macsec_system_identifier: str,
macsec_port_identifier: int,
an: int,
*args,
**kwargs) -> typing.Any:
key = gen_sa_key(
separator,
port_name,
macsec_system_identifier,
macsec_port_identifier,
an)
return func(self, key, *args, **kwargs)
return wrap_func
return inner
class WPASupplicantMock(object):
def __init__(self, dvs: conftest.DockerVirtualSwitch):
self.dvs = dvs
self.app_port_table = AppDBTable(
self.dvs, swsscommon.APP_MACSEC_PORT_TABLE_NAME)
self.app_receive_sc_table = AppDBTable(
self.dvs, swsscommon.APP_MACSEC_INGRESS_SC_TABLE_NAME)
self.app_transmit_sc_table = AppDBTable(
self.dvs, swsscommon.APP_MACSEC_EGRESS_SC_TABLE_NAME)
self.app_receive_sa_table = AppDBTable(
self.dvs, swsscommon.APP_MACSEC_INGRESS_SA_TABLE_NAME)
self.app_transmit_sa_table = AppDBTable(
self.dvs, swsscommon.APP_MACSEC_EGRESS_SA_TABLE_NAME)
self.state_port_table = StateDBTable(
self.dvs, swsscommon.STATE_MACSEC_PORT_TABLE_NAME)
self.state_receive_sc_table = StateDBTable(
self.dvs, swsscommon.STATE_MACSEC_INGRESS_SC_TABLE_NAME)
self.state_transmit_sc_table = StateDBTable(
self.dvs, swsscommon.STATE_MACSEC_EGRESS_SC_TABLE_NAME)
self.state_receive_sa_table = StateDBTable(
self.dvs, swsscommon.STATE_MACSEC_INGRESS_SA_TABLE_NAME)
self.state_transmit_sa_table = StateDBTable(
self.dvs, swsscommon.STATE_MACSEC_EGRESS_SA_TABLE_NAME)
def init_macsec_port(self, port_name: str):
self.app_port_table[port_name] = {
"enable": False,
"cipher_suite": "GCM-AES-128",
}
self.state_port_table.wait(port_name)
def deinit_macsec_port(self, port_name: str):
del self.app_port_table[port_name]
self.state_port_table.wait_delete(port_name)
def config_macsec_port(
self,
port_name: str,
config: typing.Dict[str, typing.Any]):
self.app_port_table[port_name] = config
def set_macsec_control(self, port_name: str, enable: bool):
self.app_port_table[port_name] = {"enable": True}
@macsec_sc()
def create_receive_sc(self, sci: str):
self.app_receive_sc_table[sci] = {"NULL": "NULL"}
self.state_receive_sc_table.wait(sci)
@macsec_sc()
def delete_receive_sc(self, sci: str):
del self.app_receive_sc_table[sci]
self.state_receive_sc_table.wait_delete(sci)
@macsec_sc()
def create_transmit_sc(self, sci: str):
self.app_transmit_sc_table[sci] = {"encoding_an": 0}
self.state_transmit_sc_table.wait(sci)
@macsec_sc()
def delete_transmit_sc(self, sci: str):
del self.app_transmit_sc_table[sci]
self.state_transmit_sc_table.wait_delete(sci)
def check_valid_sa_parameter(
self,
sak: str,
auth_key: str,
lowest_acceptable_pn: int,
ssci: int,
salt: str) -> bool:
# Check SAK is hex string
int(sak, 16)
assert(
len(sak) == 32 or len(sak) == 64,
"Wrong length {} sak {}".format(
len(sak),
sak))
# Check auth_key is valid
int(auth_key, 16)
assert(
len(auth_key) == 32,
"Wrong length {} auth_key {}".format(
len(auth_key),
auth_key))
# Check lowest acceptable packet number is valid
assert(
lowest_acceptable_pn > 0,
"Wrong packet number {}".format(lowest_acceptable_pn))
return True
@macsec_sa()
def create_receive_sa(
self,
sai: str,
sak: str,
auth_key: str,
lowest_acceptable_pn: int,
ssci: int,
salt: str):
assert(
self.check_valid_sa_parameter(
sak,
auth_key,
lowest_acceptable_pn,
ssci,
salt),
"Wrong parameter to MACsec receive SA")
self.app_receive_sa_table[sai] = {
"active": False, "sak": sak, "auth_key": auth_key,
"lowest_acceptable_pn": lowest_acceptable_pn,
"ssci": ssci, "salt": salt}
@macsec_sa()
def delete_receive_sa(self, sai: str):
del self.app_receive_sa_table[sai]
self.state_receive_sa_table.wait_delete(sai)
@macsec_sa()
def set_enable_receive_sa(self, sai: str, enable: bool):
self.app_receive_sa_table[sai] = {"active": enable}
if enable:
self.state_receive_sa_table.wait(sai)
@macsec_sa()
def create_transmit_sa(
self,
sai: str,
sak: str,
auth_key: str,
init_pn: int,
ssci: int,
salt: str):
assert(
self.check_valid_sa_parameter(
sak,
auth_key,
init_pn,
ssci,
salt),
"Wrong parameter to MACsec receive SA")
self.app_transmit_sa_table[sai] = {
"sak": sak, "auth_key": auth_key,
"next_pn": init_pn, "ssci": ssci, "salt": salt}
@macsec_sa()
def delete_transmit_sa(self, sai: str):
del self.app_transmit_sa_table[sai]
self.state_transmit_sa_table.wait_delete(sai)
@macsec_sc()
def set_enable_transmit_sa(self, sci: str, an: int, enable: bool):
if enable:
self.app_transmit_sc_table[sci] = {"encoding_an": an}
assert(
self.state_transmit_sa_table.wait(
"{}{}{}".format(
sci,
StateDBTable.SEPARATOR,
an)))
class MACsecInspector(object):
def __init__(self, dvs: conftest.DockerVirtualSwitch):
self.dvs = dvs
def __load_macsec_info(self, port_name: str) -> (bool, str):
return self.dvs.runcmd("ip macsec show {}".format(port_name))
def get_macsec_port(self, port_name: str) -> str:
exitcode, info = self.__load_macsec_info(port_name)
if exitcode != 0 or not info:
return ""
print(info)
return info
def get_macsec_sc(
self,
port_name: str,
macsec_system_identifier: str,
macsec_port_identifier: int) -> str:
info = self.get_macsec_port(port_name)
if not info:
return ""
macsec_system_identifier = macsec_system_identifier.translate(
str.maketrans("", "", ":.-"))
sci = "{}{}".format(
macsec_system_identifier,
str(macsec_port_identifier).zfill(4))
sc_pattern = r"(TXSC|RXSC):\s*{}[ \w,]+\n?(?:\s*\d:[,\w ]+\n?)*".format(
sci)
info = re.search(sc_pattern, info, re.IGNORECASE)
if not info:
return ""
print(info.group(0))
return info.group(0)
def get_macsec_sa(
self,
port_name: str,
macsec_system_identifier: str,
macsec_port_identifier: str,
an: int) -> str:
info = self.get_macsec_sc(
port_name,
macsec_system_identifier,
macsec_port_identifier)
if not info:
return ""
sa_pattern = r"\s*{}:\s*PN\s*\d+[,\w ]+\n?".format(an)
info = re.search(sa_pattern, info, re.IGNORECASE)
if not info:
return ""
print(info.group(0))
return info.group(0)
class TestMACsec(object):
def init_macsec(
self,
wpa: WPASupplicantMock,
port_name: str,
local_mac_address: str,
macsec_port_identifier: int):
wpa.init_macsec_port(port_name)
wpa.config_macsec_port(port_name, {"enable_protect": True})
wpa.config_macsec_port(port_name, {"enable_encrypt": True})
wpa.config_macsec_port(
port_name,
{
"enable_replay_protect": True,
"replay_window": 0
})
wpa.set_macsec_control(port_name, False)
wpa.create_transmit_sc(
port_name,
local_mac_address,
macsec_port_identifier)
def establish_macsec(
self,
wpa: WPASupplicantMock,
port_name: str,
local_mac_address: str,
peer_mac_address: str,
macsec_port_identifier: int,
an: int,
sak: str,
packet_number: int,
auth_key: str,
ssci: int,
salt: str):
wpa.create_receive_sc(
port_name,
peer_mac_address,
macsec_port_identifier)
wpa.create_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
an,
sak,
auth_key,
packet_number,
ssci,
salt)
wpa.create_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
an,
sak,
auth_key,
packet_number,
ssci,
salt)
wpa.set_enable_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
an,
True)
wpa.set_macsec_control(port_name, True)
wpa.set_enable_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
an,
True)
def rekey_macsec(
self,
wpa: WPASupplicantMock,
port_name: str,
local_mac_address: str,
peer_mac_address: str,
macsec_port_identifier: int,
an: int,
last_an: int,
sak: str,
packet_number: int,
auth_key: str,
ssci: int,
salt: str):
wpa.create_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
an,
sak,
auth_key,
packet_number,
ssci,
salt)
wpa.create_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
an,
sak,
auth_key,
packet_number,
ssci,
salt)
wpa.set_enable_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
an,
True)
wpa.set_macsec_control(port_name, True)
wpa.set_enable_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
an,
True)
wpa.set_enable_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
last_an,
False)
wpa.delete_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
last_an)
wpa.set_enable_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
last_an,
False)
wpa.delete_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
last_an)
def deinit_macsec(
self,
wpa: WPASupplicantMock,
inspector: MACsecInspector,
port_name: str,
macsec_port: str,
local_mac_address: str,
peer_mac_address: str,
macsec_port_identifier: int,
last_an: int):
wpa.set_enable_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
last_an,
False)
wpa.delete_receive_sa(
port_name,
peer_mac_address,
macsec_port_identifier,
last_an)
assert(
not inspector.get_macsec_sa(
macsec_port,
peer_mac_address,
macsec_port_identifier,
last_an))
wpa.delete_receive_sc(
port_name,
peer_mac_address,
macsec_port_identifier)
assert(
not inspector.get_macsec_sc(
macsec_port,
peer_mac_address,
macsec_port_identifier))
wpa.set_enable_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
last_an,
False)
wpa.delete_transmit_sa(
port_name,
local_mac_address,
macsec_port_identifier,
last_an)
assert(
not inspector.get_macsec_sa(
macsec_port,
local_mac_address,
macsec_port_identifier,
last_an))
wpa.delete_transmit_sc(
port_name,
local_mac_address,
macsec_port_identifier)
assert(
not inspector.get_macsec_sc(
macsec_port,
local_mac_address,
macsec_port_identifier))
wpa.deinit_macsec_port(port_name)
def test_macsec_term_orch(self, dvs: conftest.DockerVirtualSwitch, testlog):
port_name = "Ethernet0"
local_mac_address = "00-15-5D-78-FF-C1"
peer_mac_address = "00-15-5D-78-FF-C2"
macsec_port_identifier = 1
macsec_port = "macsec_eth1"
sak = "0" * 32
auth_key = "0" * 32
packet_number = 1
ssci = 1
salt = "0" * 24
wpa = WPASupplicantMock(dvs)
inspector = MACsecInspector(dvs)
self.init_macsec(
wpa,
port_name,
local_mac_address,
macsec_port_identifier)
self.establish_macsec(
wpa,
port_name,
local_mac_address,
peer_mac_address,
macsec_port_identifier,
0,
sak,
packet_number,
auth_key,
ssci,
salt)
assert(inspector.get_macsec_port(macsec_port))
assert(
inspector.get_macsec_sc(
macsec_port,
local_mac_address,
macsec_port_identifier))
assert(
inspector.get_macsec_sc(
macsec_port,
peer_mac_address,
macsec_port_identifier))
assert(
inspector.get_macsec_sa(
macsec_port,
local_mac_address,
macsec_port_identifier,
0))
assert(
inspector.get_macsec_sa(
macsec_port,
peer_mac_address,
macsec_port_identifier,
0))
self.rekey_macsec(
wpa,
port_name,
local_mac_address,
peer_mac_address,
macsec_port_identifier,
1,
0,
sak,
packet_number,
auth_key,
ssci,
salt)
assert(
inspector.get_macsec_sa(
macsec_port,
local_mac_address,
macsec_port_identifier,
1))
assert(
inspector.get_macsec_sa(
macsec_port,
peer_mac_address,
macsec_port_identifier,
1))
assert(
not inspector.get_macsec_sa(
macsec_port,
local_mac_address,
macsec_port_identifier,
0))
assert(
not inspector.get_macsec_sa(
macsec_port,
peer_mac_address,
macsec_port_identifier,
0))
# Exit MACsec port
self.deinit_macsec(
wpa,
inspector,
port_name,
macsec_port,
local_mac_address,
peer_mac_address,
macsec_port_identifier,
1)
assert(not inspector.get_macsec_port(macsec_port))
def test_macsec_attribute_change(self, dvs: conftest.DockerVirtualSwitch, testlog):
port_name = "Ethernet0"
local_mac_address = "00-15-5D-78-FF-C1"
peer_mac_address = "00-15-5D-78-FF-C2"
macsec_port_identifier = 1
macsec_port = "macsec_eth1"
sak = "0" * 32
auth_key = "0" * 32
packet_number = 1
ssci = 1
salt = "0" * 24
wpa = WPASupplicantMock(dvs)
inspector = MACsecInspector(dvs)
self.init_macsec(
wpa,
port_name,
local_mac_address,
macsec_port_identifier)
wpa.set_macsec_control(port_name, True)
wpa.config_macsec_port(port_name, {"enable_encrypt": False})
wpa.config_macsec_port(port_name, {"cipher_suite": "GCM-AES-256"})
self.establish_macsec(
wpa,
port_name,
local_mac_address,
peer_mac_address,
macsec_port_identifier,
0,
sak,
packet_number,
auth_key,
ssci,
salt)
macsec_info = inspector.get_macsec_port(macsec_port)
assert("encrypt off" in macsec_info)
assert("GCM-AES-256" in macsec_info)
self.deinit_macsec(
wpa,
inspector,
port_name,
macsec_port,
local_mac_address,
peer_mac_address,
macsec_port_identifier,
0)
# Add Dummy always-pass test at end as workaroud
# for issue when Flaky fail on final test it invokes module tear-down
# before retrying
def test_nonflaky_dummy():
pass
| 30.116402
| 87
| 0.549895
|
b3eea5a37c4caa9d2daf7f971c1ca72c6887b18d
| 3,583
|
py
|
Python
|
geomesa-spark/geomesa_pyspark/src/test/python/test_types.py
|
khobbs-ccri/geomesa
|
09b5c503c55be11b343f3a6e559b9a3cfb7f76d7
|
[
"Apache-2.0"
] | 1,197
|
2015-01-08T18:50:52.000Z
|
2022-03-31T04:10:26.000Z
|
geomesa-spark/geomesa_pyspark/src/test/python/test_types.py
|
khobbs-ccri/geomesa
|
09b5c503c55be11b343f3a6e559b9a3cfb7f76d7
|
[
"Apache-2.0"
] | 1,307
|
2015-01-05T21:25:30.000Z
|
2022-03-30T17:00:49.000Z
|
geomesa-spark/geomesa_pyspark/src/test/python/test_types.py
|
khobbs-ccri/geomesa
|
09b5c503c55be11b343f3a6e559b9a3cfb7f76d7
|
[
"Apache-2.0"
] | 436
|
2015-01-26T15:44:18.000Z
|
2022-03-14T18:50:04.000Z
|
from geomesa_pyspark.types import *
from shapely.wkt import loads
from unittest import TestCase, main
class PointUDTTest(TestCase):
udt = Point.__UDT__
def test_udt(self):
self.assertIsInstance(self.udt, PointUDT)
def test_udt_roundtrip(self):
wkt = "POINT (30 10)"
g = loads(wkt)
self.assertEqual(self.udt.deserialize(self.udt.serialize(g)), g)
class LineStringUDTTest(TestCase):
udt = LineString.__UDT__
def test_udt(self):
self.assertIsInstance(self.udt, LineStringUDT)
def test_udt_roundtrip(self):
wkt = "LINESTRING (30 10, 10 30, 40 40)"
g = loads(wkt)
self.assertEqual(self.udt.deserialize(self.udt.serialize(g)), g)
class PolygonUDTTest(TestCase):
udt = Polygon.__UDT__
def test_udt(self):
self.assertIsInstance(self.udt, PolygonUDT)
def test_roundtrip(self):
wkt = "POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))"
g = loads(wkt)
self.assertEqual(self.udt.deserialize(self.udt.serialize(g)), g)
def test_roundtrip2(self):
wkt = "POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10), (20 30, 35 35, 30 20, 20 30))"
g = loads(wkt)
self.assertEqual(self.udt.deserialize(self.udt.serialize(g)), g)
class MultiPointUDTTest(TestCase):
udt = MultiPoint.__UDT__
def test_udt(self):
self.assertIsInstance(self.udt, MultiPointUDT)
def test_udt_roundtrip(self):
wkt = "MULTIPOINT ((10 40), (40 30), (20 20), (30 10))"
g = loads(wkt)
self.assertEqual(self.udt.deserialize(self.udt.serialize(g)), g)
def test_udt_roundtrip2(self):
wkt = "MULTIPOINT (10 40, 40 30, 20 20, 30 10)"
g = loads(wkt)
self.assertEqual(self.udt.deserialize(self.udt.serialize(g)), g)
class MultiLineStringUDTTest(TestCase):
udt = MultiLineString.__UDT__
def test_udt(self):
self.assertIsInstance(self.udt, MultiLineStringUDT)
def test_udt_roundtrip(self):
wkt = "MULTILINESTRING ((10 10, 20 20, 10 40), (40 40, 30 30, 40 20, 30 10))"
g = loads(wkt)
self.assertEqual(self.udt.deserialize(self.udt.serialize(g)), g)
class MultiPolygonUDTTest(TestCase):
udt = MultiPolygon.__UDT__
def test_udt(self):
self.assertIsInstance(self.udt, MultiPolygonUDT)
def test_udt_roundtrip(self):
wkt = "MULTIPOLYGON (((30 20, 45 40, 10 40, 30 20)), ((15 5, 40 10, 10 20, 5 10, 15 5)))"
g = loads(wkt)
self.assertEqual(self.udt.deserialize(self.udt.serialize(g)), g)
def test_udt_roundtrip2(self):
wkt = """MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)),
((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),
(30 20, 20 15, 20 25, 30 20)))"""
g = loads(wkt)
self.assertEqual(self.udt.deserialize(self.udt.serialize(g)), g)
class MultiGeometryCollectionUDTTest(TestCase):
udt = GeometryCollection.__UDT__
def test_udt(self):
self.assertIsInstance(self.udt, GeometryCollectionUDT)
def test_udt_roundtrip(self):
wkt = "GEOMETRYCOLLECTION(POINT(4 6),LINESTRING(4 6,7 10))"
g = loads(wkt)
self.assertEqual(self.udt.deserialize(self.udt.serialize(g)), g)
class GeometryUDTTest(TestCase):
udt = BaseGeometry.__UDT__
def test_udt(self):
self.assertIsInstance(self.udt, GeometryUDT)
def test_udt_rouundtrip(self):
wkt = "POINT (0 0)"
g = loads(wkt)
self.assertEqual(self.udt.deserialize(self.udt.serialize(g)), g)
if __name__ == '__main__':
main()
| 27.992188
| 97
| 0.639129
|
398b3adc68edb142da84c3453af098d6c61526a8
| 7,195
|
py
|
Python
|
service_matcher_app/service_matcher/utils.py
|
City-of-Turku/PaohServiceMatchEngine
|
39f580003f9c0d10708acd93644f796f764ec2f0
|
[
"MIT"
] | null | null | null |
service_matcher_app/service_matcher/utils.py
|
City-of-Turku/PaohServiceMatchEngine
|
39f580003f9c0d10708acd93644f796f764ec2f0
|
[
"MIT"
] | null | null | null |
service_matcher_app/service_matcher/utils.py
|
City-of-Turku/PaohServiceMatchEngine
|
39f580003f9c0d10708acd93644f796f764ec2f0
|
[
"MIT"
] | null | null | null |
import logging
import re
import numpy as np
from typing import Optional
from .models import *
PROVINCE_CODES = ["02"]
class ServiceMatcherUtils():
"""
A class for various auxillary functions related to service matching
"""
def __init__(self) -> None:
pass
def _filter_service_data_by_municipality(self, service: Service, municipality_ids: list) -> list:
languages = ['en', 'fi', 'sv']
service_filtered = service.copy(deep=True)
for language in languages:
service_areas = service_filtered.areas[language]
if len(service_areas) > 0:
area_municipalities = [area for area in service_areas if area.get(
'type') == 'Municipality' and area.get('code') in municipality_ids]
area_provinces = [area for area in service_areas if (area.get(
'type') == 'Province' or area.get('type') == 'Region') and area.get('code') in PROVINCE_CODES]
other_area_elements = [area for area in service_areas if area.get(
'type') != 'Province' and area.get('type') != 'Municipality' and area.get('type') != 'Region']
filtered_areas = area_municipalities + area_provinces + other_area_elements
service_filtered.areas[language] = filtered_areas
return(service_filtered)
def _filter_service_channel_data_by_municipality(self, channel: ServiceChannel, municipality_ids: list) -> list:
languages = ['en', 'fi', 'sv']
channel_filtered = channel.copy(deep=True)
for language in languages:
channel_areas = channel_filtered.areas[language]
channel_addresses = channel_filtered.addresses[language]
if len(channel_areas) > 0:
area_municipalities = [area for area in channel_areas if area.get(
'type') == 'Municipality' and area.get('code') in municipality_ids]
area_provinces = [area for area in channel_areas if (area.get(
'type') == 'Province' or area.get('type') == 'Region') and area.get('code') in PROVINCE_CODES]
other_area_elements = [area for area in channel_areas if area.get(
'type') != 'Province' and area.get('type') != 'Municipality' and area.get('type') != 'Region']
filtered_areas = area_municipalities + area_provinces + other_area_elements
channel_filtered.areas[language] = filtered_areas
if len(channel_addresses) > 0:
filtered_addresses = [add for add in channel_addresses if add.get(
'municipalityCode') is None or add.get('municipalityCode') in municipality_ids]
channel_filtered.addresses[language] = filtered_addresses
return(channel_filtered)
def _nest_form_events(self, events: list) -> list:
nested_events = []
form_on = False
start_events = 0
for event in events:
event = event.copy()
is_form_starting_event = ('parse_data' in event.keys() and re.search('service_search$', event['parse_data']['intent']['name']) is not None) or (event.get('name') is not None and re.search('_form$', event.get('name')) is not None)
is_form_ending_event = (event.get('event') == 'active_loop' and event.get('name') is None) or event.get('event') == 'action_execution_rejected'
if is_form_starting_event and not form_on:
form_on = True
start_events = start_events + 1
event['form_events'] = []
nested_events.append(event)
elif form_on:
nested_events[-1]['form_events'].append(event)
if is_form_ending_event:
form_on = False
elif not form_on:
nested_events.append(event)
else:
raise Exception("Something went wrong, event has not type")
return(nested_events)
def _get_municipality_ids_by_names(self, municipality_names: list, all_municipalities: list) -> list:
matching_municipality_ids = []
if municipality_names is not None and len(municipality_names) > 0:
input_names_lower = [mun_name.lower()
for mun_name in municipality_names]
for municipality in all_municipalities:
municipality_real_names_lower = [
mun_name_l.lower() for mun_name_l in list(municipality.get('name').values())]
is_match = any(
[True for input_name_lower in input_names_lower if input_name_lower in municipality_real_names_lower])
if is_match:
matching_municipality_ids.append(municipality.get('id'))
# If no info, don't limit by municipalities
if len(matching_municipality_ids) == 0:
for municipality in all_municipalities:
matching_municipality_ids.append(municipality.get('id'))
return(matching_municipality_ids)
def _check_life_events(self, life_events: list, all_life_events: list) -> list:
filtered_life_events = [
le for le in life_events if le in all_life_events]
# If no info, don't limit by life events
if len(filtered_life_events) == 0:
filtered_life_events = all_life_events
return(filtered_life_events)
def _check_service_classes(self, service_classes: list, all_service_classes: list) -> list:
all_service_classes = [sc.code for sc in all_service_classes]
filtered_service_classes = [
sc for sc in service_classes if sc in all_service_classes]
# If no info, don't limit by service classes
if len(filtered_service_classes) == 0:
filtered_service_classes = all_service_classes
return(filtered_service_classes)
def _swap(self, list_1: list, i: int, j: int) -> list:
list_1[i], list_1[j] = list_1[j], list_1[i]
return(list_1)
def _get_service_classes_from_intent_name(self, intent: Optional[str]) -> list:
if intent is not None:
service_class_regex = re.compile(
'p\d{1,2}(?:[.]\d{1,2}){0,1}', re.IGNORECASE)
found_service_classes = re.findall(service_class_regex, intent)
if len(found_service_classes) > 0:
found_service_classes = list(set([found_service_class.upper(
) for found_service_class in found_service_classes]))
return (found_service_classes)
else:
return([])
def _get_life_events_from_intent_name(self, intent: Optional[str]) -> list:
if intent is not None:
life_event_regex = re.compile(
'ke\d{1,2}(?:[.]\d{1,2}){0,1}', re.IGNORECASE)
found_life_events = re.findall(life_event_regex, intent)
if len(found_life_events) > 0:
found_life_events = list(set([found_life_event.upper()
for found_life_event in found_life_events]))
return (found_life_events)
else:
return([])
| 49.965278
| 241
| 0.620014
|
b2cf74795375349ffe6dbe785252fe83e6b3d14e
| 10,087
|
py
|
Python
|
ilm/distill_classic.py
|
asappresearch/neural-ilm
|
fd7e09960525391f4084a5753429deabd7ff00aa
|
[
"MIT"
] | null | null | null |
ilm/distill_classic.py
|
asappresearch/neural-ilm
|
fd7e09960525391f4084a5753429deabd7ff00aa
|
[
"MIT"
] | null | null | null |
ilm/distill_classic.py
|
asappresearch/neural-ilm
|
fd7e09960525391f4084a5753429deabd7ff00aa
|
[
"MIT"
] | 2
|
2021-02-25T04:42:14.000Z
|
2021-02-25T04:43:06.000Z
|
"""
run a classic distillation, from deep model to shallow model
forked from ilm_distill.py, 5 apr 2019
"""
import torch
from torch import nn, optim, autograd
import torch.nn.functional as F
import numpy as np
import argparse
import math
from os.path import expanduser as expand
from torchvision import datasets, transforms
from ulfs import tensor_utils
# Dataset = datasets.MNIST
Dataset = datasets.CIFAR10
input_units = 3072
import torch
import torch.nn as nn
def kl(p, q, eps=1e-6):
kl = - (p * (((q + eps) / (p + eps)).log())).sum()
return kl
class DeepModel2(nn.Module):
def __init__(self, size=28):
super().__init__()
last_channels = 3
# size = 28
layers = []
# 28
layers.append(nn.Conv2d(3, 16, kernel_size=3))
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
# 14
layers.append(nn.Conv2d(16, 32, kernel_size=3))
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
# 7
self.conv = nn.Sequential(*layers)
self.h1 = nn.Linear(6 * 6 * 32, 50)
self.h2 = nn.Linear(50, 10)
self.drop = nn.Dropout(0.5)
def forward(self, x):
N = x.size(0)
x = self.conv(x)
x = x.view(N, 6 * 6 * 32)
x = self.h1(x)
x = self.drop(x)
x = self.h2(x)
return x
class DeepModel(nn.Module):
def __init__(self, input_units):
super().__init__()
self.h1 = nn.Linear(input_units, 1200)
self.h2 = nn.Linear(1200, 1200)
self.h3 = nn.Linear(1200, 10)
self.drop = nn.Dropout(0.5)
def forward(self, x):
N = x.size(0)
x = x.view(N, -1)
x = self.h1(x)
x = F.relu(x)
x = self.h2(x)
x = F.relu(x)
x = self.drop(x)
x = self.h3(x)
return x
class Model(nn.Module):
def __init__(self, input_units=input_units):
super().__init__()
print('input_units', input_units)
self.h1 = nn.Linear(input_units, 800)
self.h2 = nn.Linear(800, 800)
self.h3 = nn.Linear(800, 10)
self.drop = nn.Dropout(0.5)
def forward(self, x):
N = x.size(0)
x = x.view(N, -1)
x = self.h1(x)
x = F.relu(x)
x = self.h2(x)
x = F.relu(x)
x = self.drop(x)
x = self.h3(x)
return x
def run(enable_cuda, lr, num_epochs, clip_grad_norm, teacher_model, batch_size, load_cached, soft_alpha, soft_alpha_decay, tau):
def distill(teacher_logits_all, train_inputs, train_targets, student, soft_alpha):
N = teacher_logits_all.size(0)
print('N', N)
perm_idx = torch.from_numpy(np.random.choice(N, N, replace=False))
print('=========== distill =================')
opt = optim.Adam(lr=lr, params=student.parameters())
epoch = 0
crit = nn.CrossEntropyLoss()
for epoch in range(num_epochs):
# while True:
epoch_loss = 0
epoch_cnt = 0
student.train()
# for b, (in_batch, tgt_batch) in enumerate(batches):
num_batches = (N + batch_size - 1) // batch_size
for b in range(num_batches):
batch_idx = perm_idx[b * batch_size:(b + 1) * batch_size]
in_batch = train_inputs[batch_idx]
tgt_batch = train_targets[batch_idx]
if enable_cuda:
in_batch = in_batch.cuda()
tgt_batch = tgt_batch.cuda()
logits_student = student(in_batch)
if teacher_logits_all is not None:
teacher_logits_batch = teacher_logits_all[batch_idx]
if enable_cuda:
teacher_logits_batch = teacher_logits_batch.cuda()
loss_soft = kl(p=tensor_utils.softmax(teacher_logits_batch, tau=tau).detach(), q=F.softmax(logits_student, dim=-1))
loss_hard = crit(logits_student, tgt_batch)
if soft_alpha == 0:
loss = loss_hard
else:
assert teacher_logits_all is not None
loss = soft_alpha * loss_soft + (1 - soft_alpha) * loss_hard
opt.zero_grad()
loss.backward()
if clip_grad_norm is not None:
nn.utils.clip_grad_norm_(student.parameters(), clip_grad_norm)
opt.step()
epoch_loss += loss.item()
epoch_cnt += in_batch.size(0)
soft_alpha = max(0, soft_alpha - soft_alpha_decay)
eval_acc_sum = 0
eval_cnt = 0
student.eval()
for in_batch, tgt_batch in test:
if enable_cuda:
in_batch = in_batch.cuda()
tgt_batch = tgt_batch.cuda()
with autograd.no_grad():
logits = student(in_batch)
_, argmax = logits.max(dim=-1)
correct = (argmax == tgt_batch)
acc = correct.float().mean().item()
eval_cnt += in_batch.size(0)
eval_acc_sum += acc * in_batch.size(0)
eval_acc = eval_acc_sum / eval_cnt
eval_err = eval_cnt - int(eval_acc_sum)
print('e=%i' % epoch, 'l=%.3f' % epoch_loss, 'eval acc %.3f' % eval_acc, 'eval err %i' % eval_err, 'soft_alpha %.3f' % soft_alpha)
# epoch += 1
# if epoch >= num_epochs:
print('finished epochs')
return eval_acc
# break
kwargs = {'num_workers': 1, 'pin_memory': True}
train = torch.utils.data.DataLoader(
Dataset(expand('~/data'), train=True, download=True,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=64, shuffle=False, **kwargs)
test = torch.utils.data.DataLoader(
Dataset(expand('~/data'), train=False,
transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])),
batch_size=64, shuffle=True, **kwargs)
train_inputs = []
train_targets = []
for inputs, targets in train:
train_inputs.append(inputs)
train_targets.append(targets)
train_inputs = torch.cat(train_inputs)
train_targets = torch.cat(train_targets)
train = None
Teacher = globals()[teacher_model]
# if load_model:
# with open(load_model, 'rb') as f:
# state = torch.load(f)
# import pretrain_model
# Teacher = getattr(pretrain_model, state['meta']['teacher_model'])
# student = Teacher()
# enable_cuda = state['meta']['enable_cuda']
# if enable_cuda:
# student = student.cuda()
# student.load_state_dict(state['model_state'])
# print('loaded model')
teacher_logits_all = None
student = None
if load_cached:
state = torch.load(load_cached)
teacher_logits_all = state['cached']
print('loaded teacher_logits_all', teacher_logits_all.size())
else:
asdfasdf()
# else:
# student = Teacher()
# if enable_cuda:
# student = student.cuda()
# print('created new model, of class', teacher_model)
# distill(teacher=None, student=student, soft_alpha=0)
# print('trained teacher')
distill_epoch = 0
final_distill_eval_by_distill_epoch = []
# final_eval_by_distill_epoch = []
while True:
print('distill_epoch %i' % distill_epoch)
teacher = student
student = Model(input_units=input_units)
if enable_cuda:
student = student.cuda()
# batches = list(train)
if teacher is not None and teacher_logits_all is None:
print('running teacher...')
teacher_logits_all = []
N = train_inputs.size(0)
idx = list(range(N))
num_batches = (N + batch_size - 1) // batch_size
for b in range(num_batches):
# for in_batch, tgt_batch in batches:
batch_idx = perm_idx[b * batch_size:(b + 1) * batch_size]
in_batch = train_inputs[batch_idx]
if enable_cuda:
in_batch = in_batch.cuda()
with autograd.no_grad():
logits_teacher = teacher(in_batch)
teacher_logits_all.append(logits_teacher.detach().cpu())
print('finished running teacher')
distill_eval = distill(teacher_logits_all=teacher_logits_all, train_inputs=train_inputs, train_targets=train_targets, student=student, soft_alpha=soft_alpha)
final_distill_eval_by_distill_epoch.append(distill_eval)
# final_eval = train_model(student)
# final_eval_by_distill_epoch.append(final_eval)
for i, eval in enumerate(final_distill_eval_by_distill_epoch):
print(' ', i, eval)
teacher_logits_all = None
distill_epoch += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--enable-cuda', action='store_true')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--tau', type=float, default=2)
parser.add_argument('--num-epochs', type=int, default=10)
parser.add_argument('--clip-grad-norm', type=float)
parser.add_argument('--batch-size', type=int, default=64)
# parser.add_argument('--load-model', type=str, help='overrides --teacher-model')
parser.add_argument('--load-cached', type=str, help='overrides --teacher-model')
parser.add_argument('--teacher-model', type=str, default='DeepModel')
parser.add_argument('--soft-alpha', type=float, default=0.5, help='how much weight to give the soft targets')
parser.add_argument('--soft-alpha-decay', type=float, default=0, help='how much to decrease soft-alpha each epoch')
args = parser.parse_args()
run(**args.__dict__)
| 36.284173
| 165
| 0.575592
|
19582785128ccb31df4c48c417ca54a649adc73e
| 5,864
|
py
|
Python
|
pysync.py
|
plinecom/pysync
|
c0b5c5ccb38b214be2b5cf5d4686eabdbadd8553
|
[
"MIT"
] | 1
|
2020-05-04T02:22:15.000Z
|
2020-05-04T02:22:15.000Z
|
pysync.py
|
plinecom/pysync
|
c0b5c5ccb38b214be2b5cf5d4686eabdbadd8553
|
[
"MIT"
] | null | null | null |
pysync.py
|
plinecom/pysync
|
c0b5c5ccb38b214be2b5cf5d4686eabdbadd8553
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import os
import os.path
import time
import shutil
def listup(srcRoot, destRoot, oldRoot, relativePath):
srcPath = os.path.join(srcRoot, relativePath)
destPath = os.path.join(destRoot, relativePath)
oldPath = os.path.join(oldRoot, relativePath)
# System::String ^ srcPath = System::IO::Path::Combine(srcRoot, relativePath);
# array < System::String ^ > ^ srcDirItems = System::IO::Directory::GetDirectories(srcPath);
items = os.listdir(srcPath)
dir_list=[]
symlink_list=[]
file_list=[]
for item in items:
item_path = os.path.join(srcPath, item)
if os.path.isdir(item_path):
dir_list.append(item)
elif os.path.islink(item_path):
symlink_list.append(item)
elif os.path.isfile(item_path):
file_list.append(item)
# print dir_list
# print file_list
for directory in dir_list:
src_dir = os.path.join(srcPath, directory)
dest_dir = os.path.join(destPath, directory)
print src_dir
print u"->"+dest_dir
# exception?
os.makedirs(dest_dir)
listup(srcRoot, destRoot, oldRoot, os.path.join(relativePath, directory))
for file_item in file_list:
hardlinked = False
src_file = os.path.join(srcPath, file_item)
dest_file = os.path.join(destPath, file_item)
old_file = os.path.join(oldPath, file_item)
print src_file
print u"->" + dest_file
if os.path.exists(old_file):
old_file_size = os.path.getsize(old_file)
src_file_size = os.path.getsize(src_file)
# ๅคใDestใจๆฅไปใใปใผไธ็ทใงใใตใคใบใๅใ
if old_file_size == src_file_size:
old_file_last_write_time = time.gmtime(os.path.getmtime(old_file))
src_file_last_write_time = time.gmtime(os.path.getmtime(src_file))
# print old_file_last_write_time
# print src_file_last_write_time
if old_file_last_write_time.tm_year == src_file_last_write_time.tm_year \
and old_file_last_write_time.tm_mon == src_file_last_write_time.tm_mon \
and old_file_last_write_time.tm_mday == src_file_last_write_time.tm_mday \
and old_file_last_write_time.tm_hour == src_file_last_write_time.tm_hour \
and old_file_last_write_time.tm_min == src_file_last_write_time.tm_min \
and old_file_last_write_time.tm_sec == src_file_last_write_time.tm_sec:
success = True
try:
os.link(old_file, dest_file)
except IOError:
success = False
if success:
print "HLinked"
hardlinked = True
if not hardlinked:
shutil.copy2(src_file, dest_file)
"""
array < System::String ^ > ^ srcFileItems = System::IO::Directory::GetFiles(srcPath);
for (int i = 0; i < srcFileItems->Length; i + +){
System::String ^ srcItem = srcFileItems[i];
System::String ^ filename = System::IO::Path::GetFileName(srcItem);
System::String ^ destItem = System::IO::Path::Combine(destRoot, relativePath, filename);
System::String ^ oldItem = System::IO::Path::Combine(oldRoot, relativePath, filename);
// ใใกใคใซใ ใใใผใใชใณใฏๅฟ
่ฆใ่ชฟในใฆใใใกใชใใณใใผใ ใ
bool hardlinked = false;
if (System: :IO::File::Exists(oldItem)){
if (System: : IO::File::Exists(oldItem)){
// ๅคใDestใจๆฅไปใใปใผไธ็ทใงใใตใคใบใๅใ
System::IO::FileInfo ^ oldfi = gcnew
System::IO::FileInfo(oldItem);
System::IO::FileInfo ^ srcfi = gcnew
System::IO::FileInfo(srcItem);
if (oldfi->Length == srcfi->Length){
if (oldfi->LastWriteTime.Year == srcfi->LastWriteTime.Year
& & oldfi->LastWriteTime.Month == srcfi->LastWriteTime.Month
& & oldfi->LastWriteTime.Day == srcfi->LastWriteTime.Day
& & oldfi->LastWriteTime.Hour == srcfi->LastWriteTime.Hour
& & oldfi->LastWriteTime.Minute == srcfi->LastWriteTime.Minute
& & oldfi->LastWriteTime.Second == srcfi->LastWriteTime.Secon)
{
ATL::CString
atlOldItem(oldItem);
ATL::CString
atlDestItem(destItem);
BOOL success =::CreateHardLink(atlDestItem, atlOldItem, NULL);
if (success){
hardlinked = true;
// System::Console::WriteLine(L"HLinked");
}
else{
System::Console::WriteLine(L"Error:HLink");
System::Console::WriteLine(destItem);
}
}
}
}
}
if (!hardlinked){
// System::Console::WriteLine(L"Copy");
try{
System::IO::File::Copy(srcItem, destItem, true);
}catch(System::Exception ^ e){
System::Console::WriteLine(L"An error occurred: '{0}'", e);
}
}
}
}
"""
if __name__ == "__main__":
srcRoot = sys.argv[1] # ใใใฏใขใใๅ
oldRoot = sys.argv[2] # ใใใฏใขใใ๏ผใตใใใฎ๏ผ
destRoot = sys.argv[3] # ใใใฏใขใใๅ
listup(srcRoot, destRoot, oldRoot, u"")
| 29.32
| 100
| 0.527115
|
b8a3551a0c50ea5a4a86670c980f7278187316b2
| 554
|
py
|
Python
|
orb_simulator/lexer/regex_ast/__init__.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | 1
|
2022-01-19T22:49:09.000Z
|
2022-01-19T22:49:09.000Z
|
orb_simulator/lexer/regex_ast/__init__.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | 15
|
2021-11-10T14:25:02.000Z
|
2022-02-12T19:17:11.000Z
|
orb_simulator/lexer/regex_ast/__init__.py
|
dmguezjaviersnet/IA-Sim-Comp-Project
|
8165b9546efc45f98091a3774e2dae4f45942048
|
[
"MIT"
] | null | null | null |
from lexer.regex_ast.regex_node import Node
from lexer.regex_ast.regex_atomic_node import AtomicNode
from lexer.regex_ast.regex_binary_node import BinaryNode
from lexer.regex_ast.regex_concat_node import ConcatNode
from lexer.regex_ast.regex_closure_node import ClosureNode
from lexer.regex_ast.regex_epsilon_node import EpsilonNode
from lexer.regex_ast.regex_range_node import RangeNode
from lexer.regex_ast.regex_symbol_node import SymbolNode
from lexer.regex_ast.regex_unary_node import UnaryNode
from lexer.regex_ast.regex_union_node import UnionNode
| 55.4
| 58
| 0.893502
|
e336bc89d334aa97eb29ace99e033e12f6422129
| 1,742
|
py
|
Python
|
src/figures/multi_instrument_analysis.py
|
LauraOlivera/gammapy-v1.0-paper
|
212b87975575347e0249746c9e5a490e1bc549a5
|
[
"MIT"
] | 1
|
2022-02-14T22:42:34.000Z
|
2022-02-14T22:42:34.000Z
|
src/figures/multi_instrument_analysis.py
|
LauraOlivera/gammapy-v1.0-paper
|
212b87975575347e0249746c9e5a490e1bc549a5
|
[
"MIT"
] | 24
|
2022-02-07T15:04:27.000Z
|
2022-03-31T20:12:56.000Z
|
src/figures/multi_instrument_analysis.py
|
LauraOlivera/gammapy-v1.0-paper
|
212b87975575347e0249746c9e5a490e1bc549a5
|
[
"MIT"
] | 10
|
2022-01-27T20:22:15.000Z
|
2022-03-08T17:17:18.000Z
|
import config
import astropy.units as u
from gammapy.estimators import FluxPoints
from gammapy.modeling.models import Models
import matplotlib.pyplot as plt
sed_x_label = r"$E\,/\,{\rm TeV}$"
sed_y_label = (
r"$E^2\,{\rm d}\phi/{\rm d}\phi\,/\,({\rm erg}\,{\rm cm}^{-2}\,{\rm s}^{-1})$"
)
figsize = config.FigureSizeAA(aspect_ratio=1.618, width_aa="intermediate")
fig = plt.figure(figsize=figsize.inch)
ax = fig.add_axes([0.1, 0.1, 0.9, 0.9])
# load the flux points and plot them
fermi_flux_points = FluxPoints.read(
"../data/multi-instrument/datasets/flux_points/crab_fermi_flux_points.fits"
)
magic_flux_points = FluxPoints.read(
"../data/multi-instrument/datasets/flux_points/crab_magic_flux_points.fits"
)
hawc_flux_points = FluxPoints.read(
"../data/multi-instrument/input/hawc/HAWC19_flux_points.fits"
)
# load the best-fit model
models = Models.read("../data/multi-instrument/results/crab_multi_instrument_fit.yaml")
crab_lp = models["Crab Nebula"].spectral_model
plot_kwargs = {
"energy_bounds": [0.01, 300] * u.TeV,
"sed_type": "e2dnde",
"yunits": u.Unit("erg cm-2 s-1"),
"xunits": u.TeV,
}
crab_lp.plot(ax=ax, ls="-", lw=1.5, color="crimson", label="joint fit", **plot_kwargs)
crab_lp.plot_error(ax=ax, facecolor="crimson", alpha=0.4, **plot_kwargs)
fermi_flux_points.plot(ax=ax, sed_type="e2dnde", color="k", label="Fermi-LAT")
magic_flux_points.plot(ax=ax, sed_type="e2dnde", color="dodgerblue", label="MAGIC")
hawc_flux_points.plot(ax=ax, sed_type="e2dnde", color="goldenrod", label="HAWC")
ax.set_xlim(plot_kwargs["energy_bounds"])
ax.set_xlabel(sed_x_label)
ax.set_ylabel(sed_y_label)
ax.legend()
fig.savefig("multi_instrument_analysis.pdf")
fig.savefig("multi_instrument_analysis.png")
| 34.156863
| 87
| 0.726177
|
0a8b6b139ab38b2c90c026e58ebfa8a15b6c6fb7
| 1,844
|
py
|
Python
|
src/179.largest-number/179.largest-number.py
|
AnestLarry/LeetCodeAnswer
|
ca06a9cabe72879812f4b41d68ec882139974d84
|
[
"MIT"
] | null | null | null |
src/179.largest-number/179.largest-number.py
|
AnestLarry/LeetCodeAnswer
|
ca06a9cabe72879812f4b41d68ec882139974d84
|
[
"MIT"
] | null | null | null |
src/179.largest-number/179.largest-number.py
|
AnestLarry/LeetCodeAnswer
|
ca06a9cabe72879812f4b41d68ec882139974d84
|
[
"MIT"
] | null | null | null |
#
# @lc app=leetcode id=179 lang=python3
#
# [179] Largest Number
#
# Given a list of non negative integers, arrange them such that they form the largest number.
# Example 1:
# Input: [10,2]
# Output: "210"
# Example 2:
# Input: [3,30,34,5,9]
# Output: "9534330"
# Note: The result may be very large, so you need to return a string instead of an integer.
import functools
class Solution:
def largestNumber1(self, nums: List[int]) -> str:
# Accepted
# 222/222 cases passed (40 ms)
# Your runtime beats 93.96 % of python3 submissions
# Your memory usage beats 20 % of python3 submissions (13.7 MB)
if not nums or nums.count(0) == len(nums):
return "0"
lists = []
numsl = len(nums)
for i in range(numsl):
nums_i_str = str(nums[i])
if lists:
inserted = False
for j in range(len(lists)):
if lists[j]+nums_i_str < nums_i_str+lists[j]:
lists.insert(j, nums_i_str)
inserted = True
break
if not inserted:
lists.append(nums_i_str)
else:
lists.append(nums_i_str)
return "".join(lists)
def largestNumber(self, nums: List[int]) -> str:
# Accepted
# 222/222 cases passed (40 ms)
# Your runtime beats 93.96 % of python3 submissions
# Your memory usage beats 20 % of python3 submissions (13.7 MB)
if not any(nums):
return "0"
return "".join(
sorted(
map(str, nums),
key=functools.cmp_to_key(
lambda n1, n2: -1 if n1 + n2 > n2 +
n1 else (1 if n1+n2 < n2+n1 else 0)
)
)
)
| 30.229508
| 93
| 0.517896
|
2ab3c0e970125048471e9cfab721c1292b4aa723
| 185,657
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_webapp_commands.py
|
GaneshaThirumurthi/azure-cli
|
72850c4a1ca0f2d36ad6dce319bd22e59ad387e1
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_webapp_commands.py
|
GaneshaThirumurthi/azure-cli
|
72850c4a1ca0f2d36ad6dce319bd22e59ad387e1
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/appservice/tests/latest/test_webapp_commands.py
|
GaneshaThirumurthi/azure-cli
|
72850c4a1ca0f2d36ad6dce319bd22e59ad387e1
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import json
import unittest
import mock
import os
import time
import tempfile
import requests
import datetime
from azure_devtools.scenario_tests import AllowLargeResponse, record_only
from azure.cli.testsdk import (ScenarioTest, LocalContextScenarioTest, LiveScenarioTest, ResourceGroupPreparer,
StorageAccountPreparer, JMESPathCheck, live_only)
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
# pylint: disable=line-too-long
# In the future, for any reasons the repository get removed, the source code is under "sample-repo-for-deployment-test"
# you can use to rebuild the repository
TEST_REPO_URL = 'https://github.com/yugangw-msft/azure-site-test.git'
WINDOWS_ASP_LOCATION_WEBAPP = 'japanwest'
WINDOWS_ASP_LOCATION_FUNCTIONAPP = 'francecentral'
LINUX_ASP_LOCATION_WEBAPP = 'eastus2'
LINUX_ASP_LOCATION_FUNCTIONAPP = 'ukwest'
class WebappBasicE2ETest(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_e2e(self, resource_group):
webapp_name = self.create_random_name(prefix='webapp-e2e', length=24)
plan = self.create_random_name(prefix='webapp-e2e-plan', length=24)
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
self.cmd('appservice plan list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', plan),
JMESPathCheck('[0].perSiteScaling', False)
])
# test idempotency
self.cmd(
'appservice plan create -g {} -n {} --per-site-scaling'.format(resource_group, plan))
self.cmd('appservice plan list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', plan),
JMESPathCheck('[0].sku.tier', 'Basic'),
JMESPathCheck('[0].sku.name', 'B1'),
JMESPathCheck('[0].perSiteScaling', True)
])
self.cmd('appservice plan list -g {}'.format(resource_group), checks=[
JMESPathCheck("length([?name=='{}' && resourceGroup=='{}'])".format(
plan, resource_group), 1)
])
self.cmd('appservice plan show -g {} -n {}'.format(resource_group, plan), checks=[
JMESPathCheck('name', plan)
])
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan), checks=[
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', webapp_name),
JMESPathCheck('hostNames[0]', webapp_name + '.azurewebsites.net')
])
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group,
webapp_name, plan)) # test idempotency
self.cmd('webapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', webapp_name),
JMESPathCheck('[0].hostNames[0]', webapp_name +
'.azurewebsites.net')
])
self.cmd('webapp show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('name', webapp_name),
JMESPathCheck('hostNames[0]', webapp_name + '.azurewebsites.net')
])
result = self.cmd('webapp deployment source config-local-git -g {} -n {}'.format(
resource_group, webapp_name)).get_output_in_json()
self.assertTrue(result['url'].endswith(webapp_name + '.git'))
self.cmd('webapp deployment source show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck(
'repoUrl', 'https://{}.scm.azurewebsites.net'.format(webapp_name))
])
# turn on diagnostics
test_cmd = ('webapp log config -g {} -n {} --level verbose'.format(resource_group, webapp_name) + ' '
'--application-logging filesystem --detailed-error-messages true --failed-request-tracing true --web-server-logging filesystem')
self.cmd(test_cmd)
self.cmd('webapp log show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('detailedErrorMessages.enabled', True),
JMESPathCheck('failedRequestsTracing.enabled', True)
])
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('detailedErrorLoggingEnabled', True),
JMESPathCheck('httpLoggingEnabled', True),
JMESPathCheck('scmType', 'LocalGit'),
JMESPathCheck('requestTracingEnabled', True)
# TODO: contact webapp team for where to retrieve 'level'
])
# show publish profile info
result = self.cmd('webapp deployment list-publishing-profiles -g {} -n {}'.format(
resource_group, webapp_name)).get_output_in_json()
self.assertTrue(result[1]['publishUrl'].startswith('ftp://'))
self.cmd('webapp stop -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('state', 'Stopped'),
JMESPathCheck('name', webapp_name)
])
self.cmd('webapp start -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', webapp_name)
])
# show publishing credentials
result = self.cmd('webapp deployment list-publishing-credentials -g {} -n {}'.format(
resource_group, webapp_name)).get_output_in_json()
self.assertTrue('scm' in result['scmUri'])
# verify httpsOnly is false
self.cmd('webapp show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('httpsOnly', False),
])
# verify creating an non node app using --runtime
self.cmd(
'webapp create -g {} -n {} --plan {} -r "php|7.3"'.format(resource_group, webapp_name, plan))
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('phpVersion', '7.3')
])
def test_webapp_runtimes(self):
self.cmd('webapp list-runtimes')
class WebappQuickCreateTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_win_webapp_quick_create(self, resource_group):
webapp_name = self.create_random_name(prefix='webapp-quick', length=24)
plan = self.create_random_name(prefix='plan-quick', length=24)
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
r = self.cmd('webapp create -g {} -n {} --plan {} --deployment-local-git'.format(
resource_group, webapp_name, plan)).get_output_in_json()
self.assertTrue(r['ftpPublishingUrl'].startswith('ftp://'))
self.cmd('webapp config appsettings list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('[0].name', 'WEBSITE_NODE_DEFAULT_VERSION'),
JMESPathCheck('[0].value', '10.14'),
])
@ResourceGroupPreparer(name_prefix="clitest", random_name_length=24, location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_win_webapp_quick_create_runtime(self, resource_group):
webapp_name = self.create_random_name(prefix='webapp-quick', length=24)
webapp_name_2 = self.create_random_name(prefix='webapp-quick', length=24)
plan = self.create_random_name(prefix='plan-quick', length=24)
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
r = self.cmd('webapp create -g {} -n {} --plan {} --deployment-local-git -r "node|10.14"'.format(
resource_group, webapp_name, plan)).get_output_in_json()
self.assertTrue(r['ftpPublishingUrl'].startswith('ftp://'))
self.cmd('webapp config appsettings list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('[0].name', 'WEBSITE_NODE_DEFAULT_VERSION'),
JMESPathCheck('[0].value', '10.14'),
])
r = self.cmd('webapp create -g {} -n {} --plan {} --deployment-local-git -r "DOTNETCORE|3.1"'.format(
resource_group, webapp_name_2, plan)).get_output_in_json()
self.assertTrue(r['ftpPublishingUrl'].startswith('ftp://'))
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_win_webapp_quick_create_cd(self, resource_group):
webapp_name = self.create_random_name(prefix='webapp-quick-cd', length=24)
plan = self.create_random_name(prefix='plan-quick', length=24)
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} --deployment-source-url {} -r "node|10.14"'.format(
resource_group, webapp_name, plan, TEST_REPO_URL))
# 30 seconds should be enough for the deployment finished(Skipped under playback mode)
time.sleep(30)
r = requests.get('http://{}.azurewebsites.net'.format(webapp_name))
# verify the web page
self.assertTrue('Hello world' in str(r.content))
@ResourceGroupPreparer(location='canadacentral')
def test_linux_webapp_quick_create(self, resource_group):
webapp_name = self.create_random_name(
prefix='webapp-quick-linux', length=24)
plan = self.create_random_name(prefix='plan-quick-linux', length=24)
self.cmd(
'appservice plan create -g {} -n {} --is-linux'.format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} -i patle/ruby-hello'.format(
resource_group, webapp_name, plan))
r = requests.get(
'http://{}.azurewebsites.net'.format(webapp_name), timeout=240)
# verify the web page
self.assertTrue('Ruby on Rails in Web Apps on Linux' in str(r.content))
# verify app settings
self.cmd('webapp config appsettings list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('[0].name', 'WEBSITES_ENABLE_APP_SERVICE_STORAGE'),
JMESPathCheck('[0].value', 'false'),
])
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_linux_webapp_multicontainer_create(self, resource_group):
webapp_name = self.create_random_name(
prefix='webapp-linux-multi', length=24)
plan = self.create_random_name(prefix='plan-linux-multi', length=24)
config_file = os.path.join(TEST_DIR, 'sample-compose.yml')
self.cmd(
'appservice plan create -g {} -n {} --is-linux'.format(resource_group, plan))
self.cmd("webapp create -g {} -n {} --plan {} --multicontainer-config-file \"{}\" "
"--multicontainer-config-type COMPOSE".format(resource_group, webapp_name, plan, config_file))
self.cmd("webapp show -g {} -n {}".format(resource_group, webapp_name))\
.assert_with_checks([JMESPathCheck('kind', "app,linux,container")])
r = requests.get('http://{}.azurewebsites.net'.format(webapp_name), timeout=400)
self.assertTrue('Hello World! I have been' in str(r.content))
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_linux_webapp_quick_create_cd(self, resource_group):
webapp_name = self.create_random_name(
prefix='webapp-linux-cd', length=24)
plan = 'plan-quick-linux-cd'
self.cmd(
'appservice plan create -g {} -n {} --is-linux'.format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} -u {} -r "node|10.14"'.format(resource_group, webapp_name,
plan, TEST_REPO_URL))
# 45 seconds should be enough for the deployment finished(Skipped under playback mode)
time.sleep(45)
r = requests.get(
'http://{}.azurewebsites.net'.format(webapp_name), timeout=500)
# verify the web page
if 'Hello world' not in str(r.content):
# dump out more info for diagnose
self.fail(
"'Hello world' is not found in the web page. We get instead:" + str(r.content))
@ResourceGroupPreparer(parameter_name='resource_group', parameter_name_for_location='resource_group_location', location=WINDOWS_ASP_LOCATION_WEBAPP)
@ResourceGroupPreparer(parameter_name='resource_group2', parameter_name_for_location='resource_group_location2', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_create_in_different_group(self, resource_group, resource_group_location, resource_group2, resource_group_location2):
plan = 'planInOneRG'
self.cmd('group create -n {} -l {}'.format(resource_group2,
resource_group_location))
plan_id = self.cmd('appservice plan create -g {} -n {}'.format(
resource_group, plan)).get_output_in_json()['id']
self.cmd('webapp create -g {} -n webInOtherRG --plan {}'.format(resource_group2, plan_id), checks=[
JMESPathCheck('name', 'webInOtherRG')
])
@AllowLargeResponse()
@ResourceGroupPreparer(parameter_name="resource_group_one", name_prefix="clitest", random_name_length=24, location=WINDOWS_ASP_LOCATION_WEBAPP)
@ResourceGroupPreparer(parameter_name="resource_group_two", name_prefix="clitest", random_name_length=24, location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_create_names_are_substrings(self, resource_group_one, resource_group_two):
webapp_name_one = "test-webapp-name-on"
webapp_name_two = "test-webapp-name-one"
webapp_name_three = "test-webapp-nam"
plan_name_one = "webapp-plan-one"
plan_name_two = "webapp-plan-two"
plan_id_one = self.cmd('appservice plan create -g {} -n {}'.format(
resource_group_one, plan_name_one)).get_output_in_json()['id']
plan_id_two = self.cmd('appservice plan create -g {} -n {}'.format(
resource_group_two, plan_name_two)).get_output_in_json()['id']
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group_one, webapp_name_one, plan_id_one), checks=[
JMESPathCheck('name', webapp_name_one)
])
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group_two, webapp_name_two, plan_id_two), checks=[
JMESPathCheck('name', webapp_name_two)
])
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group_one, webapp_name_three, plan_id_one), checks=[
JMESPathCheck('name', webapp_name_three)
])
# Re running webapp create to make sure there are no mix ups with existing apps that have names that are substrings of each other.
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group_one, webapp_name_one, plan_id_one), checks=[
JMESPathCheck('name', webapp_name_one)
])
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group_two, webapp_name_two, plan_id_two), checks=[
JMESPathCheck('name', webapp_name_two)
])
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group_one, webapp_name_three, plan_id_one), checks=[
JMESPathCheck('name', webapp_name_three)
])
class BackupWithName(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(parameter_name='resource_group', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_backup_with_name(self, resource_group):
plan = self.create_random_name(prefix='plan-backup', length=24)
self.cmd('appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan))
webapp = self.create_random_name(prefix='backup-webapp', length=24)
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group, webapp, plan))
storage_Account = self.create_random_name(prefix='backup', length=24)
self.cmd('storage account create -n {} -g {} --location {}'.format(storage_Account, resource_group, WINDOWS_ASP_LOCATION_WEBAPP))
container = self.create_random_name(prefix='backupcontainer', length=24)
self.cmd('storage container create --account-name {} --name {}'.format(storage_Account, container))
expirydate = (datetime.datetime.now() + datetime.timedelta(days=1, hours=3)).strftime("\"%Y-%m-%dT%H:%MZ\"")
sastoken = self.cmd('storage container generate-sas --account-name {} --name {} --expiry {} --permissions rwdl'.format(storage_Account, container, expirydate))
sasurl = '\"https://{}.blob.core.windows.net/{}?{}\"'.format(storage_Account, container, sastoken)
backup_name = self.create_random_name(prefix='backup-name', length=24)
self.cmd('webapp config backup create -g {} --webapp-name {} --backup-name {} --container-url {}'.format(resource_group, webapp, backup_name, sasurl), checks=[
JMESPathCheck('backupItemName', backup_name)
])
# Test Framework is not able to handle binary file format, hence, only run live
class AppServiceLogTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_download_win_web_log(self, resource_group):
import zipfile
webapp_name = self.create_random_name(
prefix='webapp-win-log', length=24)
plan = self.create_random_name(prefix='win-log', length=24)
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} --deployment-source-url {} -r "node|10.14"'.format(
resource_group, webapp_name, plan, TEST_REPO_URL))
# 30 seconds should be enough for the deployment finished(Skipped under playback mode)
time.sleep(30)
# sanity check the traces
_, log_file = tempfile.mkstemp()
log_dir = log_file + '-dir'
self.cmd('webapp log download -g {} -n {} --log-file "{}"'.format(
resource_group, webapp_name, log_file))
zip_ref = zipfile.ZipFile(log_file, 'r')
zip_ref.extractall(log_dir)
self.assertTrue(os.path.isdir(os.path.join(
log_dir, 'LogFiles', 'kudu', 'trace')))
class AppServicePlanScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_retain_plan(self, resource_group):
webapp_name = self.create_random_name('web', 24)
plan = self.create_random_name('web-plan', 24)
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd('webapp delete -g {} -n {} --keep-dns-registration --keep-empty-plan --keep-metrics'.format(resource_group, webapp_name))
self.cmd('appservice plan list -g {}'.format(resource_group), checks=[
JMESPathCheck('[0].name', plan)
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_auto_delete_plan(self, resource_group):
webapp_name = self.create_random_name('web-del-test', 24)
plan = self.create_random_name('web-del-plan', 24)
self.cmd(
'appservice plan create -g {} -n {} -l {}'.format(resource_group, plan, WINDOWS_ASP_LOCATION_WEBAPP))
self.cmd('appservice plan update -g {} -n {} --sku S1'.format(resource_group, plan),
checks=[JMESPathCheck('name', plan),
JMESPathCheck('sku.tier', 'Standard'),
JMESPathCheck('sku.name', 'S1')])
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd('webapp delete -g {} -n {}'.format(resource_group, webapp_name))
# test empty service plan should be automatically deleted.
self.cmd('appservice plan list -g {}'.format(resource_group),
checks=[JMESPathCheck('length(@)', 0)])
class WebappConfigureTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_webapp_config', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_config(self, resource_group):
webapp_name = self.create_random_name('webapp-config-test', 40)
plan_name = self.create_random_name('webapp-config-plan', 40)
self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
# verify the baseline
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck('alwaysOn', True),
JMESPathCheck('autoHealEnabled', False),
JMESPathCheck('phpVersion', '5.6'),
JMESPathCheck('netFrameworkVersion', 'v4.0'),
JMESPathCheck('pythonVersion', ''),
JMESPathCheck('use32BitWorkerProcess', True),
JMESPathCheck('webSocketsEnabled', False),
JMESPathCheck('minTlsVersion', '1.2'),
JMESPathCheck('ftpsState', 'AllAllowed')])
# update and verify
checks = [
JMESPathCheck('alwaysOn', True),
JMESPathCheck('autoHealEnabled', True),
JMESPathCheck('phpVersion', '7.2'),
JMESPathCheck('netFrameworkVersion', 'v3.0'),
JMESPathCheck('pythonVersion', '3.4'),
JMESPathCheck('use32BitWorkerProcess', False),
JMESPathCheck('webSocketsEnabled', True),
JMESPathCheck('minTlsVersion', '1.0'),
JMESPathCheck('http20Enabled', True),
JMESPathCheck('ftpsState', 'Disabled')]
self.cmd('webapp config set -g {} -n {} --always-on true --auto-heal-enabled true --php-version 7.2 '
'--net-framework-version v3.5 --python-version 3.4 --use-32bit-worker-process=false '
'--web-sockets-enabled=true --http20-enabled true --min-tls-version 1.0 --ftps-state Disabled'.format(resource_group, webapp_name)).assert_with_checks(checks)
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name)) \
.assert_with_checks(checks)
# site appsettings testing
# update through key value pairs
self.cmd('webapp config appsettings set -g {} -n {} --settings s1=foo s2=bar s3=bar2'.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck("length([?name=='s1'])", 1),
JMESPathCheck("length([?name=='s2'])", 1),
JMESPathCheck("length([?name=='s3'])", 1),
JMESPathCheck("length([?value=='foo'])", 1),
JMESPathCheck("length([?value=='bar'])", 1),
JMESPathCheck("length([?value=='bar2'])", 1)
])
# show
result = self.cmd('webapp config appsettings list -g {} -n {}'.format(
resource_group, webapp_name)).get_output_in_json()
s2 = next((x for x in result if x['name'] == 's2'))
self.assertEqual(s2['name'], 's2')
self.assertEqual(s2['slotSetting'], False)
self.assertEqual(s2['value'], 'bar')
self.assertEqual(set([x['name'] for x in result]), set(
['s1', 's2', 's3', 'WEBSITE_NODE_DEFAULT_VERSION']))
# delete
self.cmd('webapp config appsettings delete -g {} -n {} --setting-names s1 s2'
.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck("length([?name=='s3'])", 1),
JMESPathCheck("length([?name=='s1'])", 0),
JMESPathCheck("length([?name=='s2'])", 0)])
# hostnames
self.cmd('webapp config hostname list -g {} --webapp-name {}'
.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', '{0}.azurewebsites.net'.format(webapp_name))])
# site azure storage account configurations tests
runtime = 'node|10.14'
linux_plan = self.create_random_name(
prefix='webapp-linux-plan', length=24)
linux_webapp = self.create_random_name(
prefix='webapp-linux', length=24)
self.cmd('appservice plan create -g {} -n {} -l eastus2 --sku S1 --is-linux'.format(resource_group, linux_plan),
checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('webapp create -g {} -n {} --plan {} --runtime {}'.format(resource_group, linux_webapp, linux_plan, runtime),
checks=[
JMESPathCheck('name', linux_webapp),
])
# add
self.cmd(('webapp config storage-account add -g {} -n {} --custom-id Id --storage-type AzureFiles --account-name name '
'--share-name sharename --access-key key --mount-path /path/to/mount').format(resource_group, linux_webapp))
self.cmd('webapp config storage-account list -g {} -n {}'.format(resource_group, linux_webapp)).assert_with_checks([
JMESPathCheck('length(@)', 1),
JMESPathCheck("[?name=='Id']|[0].value.type", "AzureFiles"),
JMESPathCheck("[?name=='Id']|[0].value.accountName", "name"),
JMESPathCheck("[?name=='Id']|[0].value.shareName", "sharename"),
JMESPathCheck("[?name=='Id']|[0].value.accessKey", "key"),
JMESPathCheck("[?name=='Id']|[0].value.mountPath", "/path/to/mount")])
# update
self.cmd('webapp config storage-account update -g {} -n {} --custom-id Id --mount-path /different/path'
.format(resource_group, linux_webapp))
self.cmd('webapp config storage-account list -g {} -n {}'.format(resource_group, linux_webapp)).assert_with_checks([
JMESPathCheck("length(@)", 1),
JMESPathCheck("[?name=='Id']|[0].value.type", "AzureFiles"),
JMESPathCheck("[?name=='Id']|[0].value.accountName", "name"),
JMESPathCheck("[?name=='Id']|[0].value.shareName", "sharename"),
JMESPathCheck("[?name=='Id']|[0].value.accessKey", "key"),
JMESPathCheck("[?name=='Id']|[0].value.mountPath", "/different/path")])
# list
self.cmd('webapp config storage-account list -g {} -n {}'.format(resource_group, linux_webapp)).assert_with_checks([
JMESPathCheck("length(@)", 1),
JMESPathCheck('[0].name', 'Id')])
# delete
self.cmd('webapp config storage-account delete -g {} -n {} --custom-id Id'.format(resource_group, linux_webapp)).assert_with_checks([
JMESPathCheck("length(@)", 0)])
# site connection string tests
self.cmd('webapp config connection-string set -t mysql -g {} -n {} --settings c1="conn1" c2=conn2 '
'--slot-settings c3=conn3'.format(resource_group, linux_webapp))
self.cmd('webapp config connection-string list -g {} -n {}'
.format(resource_group, linux_webapp)).assert_with_checks([
JMESPathCheck('length([])', 3),
JMESPathCheck("[?name=='c1']|[0].slotSetting", False),
JMESPathCheck("[?name=='c1']|[0].type", 'MySql'),
JMESPathCheck("[?name=='c1']|[0].value", 'conn1'),
JMESPathCheck("[?name=='c2']|[0].slotSetting", False),
JMESPathCheck("[?name=='c3']|[0].slotSetting", True)])
self.cmd('webapp config connection-string delete -g {} -n {} --setting-names c1 c3'
.format(resource_group, linux_webapp))
self.cmd('webapp config connection-string list -g {} -n {}'
.format(resource_group, linux_webapp)).assert_with_checks([
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].slotSetting', False),
JMESPathCheck('[0].name', 'c2')])
# see deployment user; just make sure the command does return something
self.assertTrue(
self.cmd('webapp deployment user show').get_output_in_json()['type'])
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_test_webapp_config_appsettings', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_config_appsettings(self, resource_group):
webapp_name = self.create_random_name('webapp-config-appsettings-test', 40)
plan_name = self.create_random_name('webapp-config-appsettings-plan', 40)
self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
# site appsettings testing
# update through key value pairs
self.cmd('webapp config appsettings set -g {} -n {} --settings s1=foo s2=bar s3=bar2'.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck("length([?name=='s1'])", 1),
JMESPathCheck("length([?name=='s2'])", 1),
JMESPathCheck("length([?name=='s3'])", 1),
JMESPathCheck("length([?value=='foo'])", 1),
JMESPathCheck("length([?value=='bar'])", 1),
JMESPathCheck("length([?value=='bar2'])", 1)
])
# show
result = self.cmd('webapp config appsettings list -g {} -n {}'.format(
resource_group, webapp_name)).get_output_in_json()
s2 = next((x for x in result if x['name'] == 's2'))
self.assertEqual(s2['name'], 's2')
self.assertEqual(s2['slotSetting'], False)
self.assertEqual(s2['value'], 'bar')
self.assertEqual(set([x['name'] for x in result]), set(
['s1', 's2', 's3', 'WEBSITE_NODE_DEFAULT_VERSION']))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
# show
result = self.cmd('webapp config appsettings list -g {} -n {}'.format(
resource_group, webapp_name)).get_output_in_json()
s2 = next((x for x in result if x['name'] == 's2'))
self.assertEqual(s2['name'], 's2')
self.assertEqual(s2['slotSetting'], False)
self.assertEqual(s2['value'], 'bar')
self.assertEqual(set([x['name'] for x in result]), set(
['s1', 's2', 's3', 'WEBSITE_NODE_DEFAULT_VERSION']))
@ResourceGroupPreparer(name_prefix='cli_test_webapp_json', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_update_webapp_settings_thru_json(self, resource_group):
webapp_name = self.create_random_name('webapp-config-test', 40)
plan_name = self.create_random_name('webapp-config-plan', 40)
# update through a json file with key value pair
_, settings_file = tempfile.mkstemp()
with open(settings_file, 'w+') as file:
file.write(json.dumps({'s2': 'value2'}))
self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
output = self.cmd('webapp config appsettings set -g {} -n {} --settings s=value "@{}"'.format(
resource_group, webapp_name, settings_file)).get_output_in_json()
output = [s for s in output if s['name'] in ['s', 's2']]
output.sort(key=lambda s: s['name'])
self.assertEqual(output[0], {
'name': 's',
'value': 'value',
'slotSetting': False
})
self.assertEqual(output[1], {
'name': 's2',
'value': 'value2',
'slotSetting': False
})
# output using the output of the set/list command
output.append({
'name': 's3',
'value': 'value3',
'slotSetting': True
})
with open(settings_file, 'w') as file:
file.write(json.dumps(output))
output = self.cmd('webapp config appsettings set -g {} -n {} --settings "@{}"'.format(
resource_group, webapp_name, settings_file)).get_output_in_json()
output = [s for s in output if s['name'] in ['s', 's2', 's3']]
output.sort(key=lambda s: s['name'])
self.assertEqual(output[0], {
'name': 's',
'value': 'value',
'slotSetting': False
})
self.assertEqual(output[1], {
'name': 's2',
'value': 'value2',
'slotSetting': False
})
self.assertEqual(output[2], {
'name': 's3',
'value': 'value3',
'slotSetting': True
})
# update site config
site_configs = {
"requestTracingEnabled": True,
"alwaysOn": True
}
with open(settings_file, 'w') as file:
file.write(json.dumps(site_configs))
self.cmd('webapp config set -g {} -n {} --generic-configurations "@{}"'.format(resource_group, webapp_name, settings_file)).assert_with_checks([
JMESPathCheck("requestTracingEnabled", True),
JMESPathCheck("alwaysOn", True),
])
class WebappScaleTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_scale(self, resource_group):
plan = self.create_random_name(prefix='scale-plan', length=24)
# start with shared sku
self.cmd('appservice plan create -g {} -n {} --sku SHARED'.format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'D1'),
JMESPathCheck('sku.tier', 'Shared'),
JMESPathCheck('sku.size', 'D1'),
JMESPathCheck('sku.family', 'D'),
# 0 means the default value: 1 instance
JMESPathCheck('sku.capacity', 0)
])
# scale up
self.cmd(
'appservice plan update -g {} -n {} --sku S2'.format(resource_group, plan))
self.cmd('appservice plan show -g {} -n {}'.format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'S2'),
JMESPathCheck('sku.tier', 'Standard'),
JMESPathCheck('sku.size', 'S2'),
JMESPathCheck('sku.family', 'S')
])
# scale down
self.cmd(
'appservice plan update -g {} -n {} --sku B1'.format(resource_group, plan))
self.cmd('appservice plan show -g {} -n {}'.format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'B1'),
JMESPathCheck('sku.tier', 'Basic'),
JMESPathCheck('sku.size', 'B1'),
JMESPathCheck('sku.family', 'B')
])
# scale out
self.cmd(
'appservice plan update -g {} -n {} --number-of-workers 2'.format(resource_group, plan))
self.cmd('appservice plan show -g {} -n {}'.format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'B1'),
JMESPathCheck('sku.tier', 'Basic'),
JMESPathCheck('sku.size', 'B1'),
JMESPathCheck('sku.family', 'B'),
JMESPathCheck('sku.capacity', 2)
])
class AppServiceBadErrorPolishTest(ScenarioTest):
@ResourceGroupPreparer(parameter_name='resource_group', location=WINDOWS_ASP_LOCATION_WEBAPP)
@ResourceGroupPreparer(parameter_name='resource_group2', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_appservice_error_polish(self, resource_group, resource_group2):
plan = self.create_random_name(prefix='web-error-plan', length=24)
webapp_name = self.create_random_name(prefix='web-error', length=24)
self.cmd('group create -n {} -l {}'.format(resource_group2, WINDOWS_ASP_LOCATION_WEBAPP))
self.cmd(
'appservice plan create -g {} -n {} --sku b1'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd(
'appservice plan create -g {} -n {} --sku b1'.format(resource_group2, plan))
# we will try to produce an error by try creating 2 webapp with same name in different groups
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group2,
webapp_name, plan), expect_failure=True)
# TODO: ensure test fx can capture error details for us to verify
# allowed_exceptions='Website with given name {} already exists'.format(webapp_name)
# this test doesn't contain the ultimate verification which you need to manually load the frontpage in a browser
class LinuxWebappScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_linux_webapp(self, resource_group):
runtime = 'node|10.14'
plan = self.create_random_name(prefix='webapp-linux-plan', length=24)
webapp = self.create_random_name(prefix='webapp-linux', length=24)
self.cmd('appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('webapp create -g {} -n {} --plan {} --runtime {}'.format(resource_group, webapp, plan, runtime), checks=[
JMESPathCheck('name', webapp),
])
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp), checks=[
JMESPathCheck('windowsFxVersion', None)
])
# workaround the fact that a new linux web's "kind" won't be settled instantatest_linux_webapp_remote_sshneously
time.sleep(30)
self.cmd('webapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].name', webapp),
JMESPathCheck('[0].kind', 'app,linux')
])
self.cmd('webapp show -g {} -n {}'.format(resource_group, webapp), checks=[
JMESPathCheck('name', webapp),
JMESPathCheck('kind', 'app,linux')
])
self.cmd('webapp config set -g {} -n {} --startup-file {}'.format(resource_group, webapp, 'process.json'), checks=[
JMESPathCheck('appCommandLine', 'process.json')
])
result = self.cmd('webapp deployment container config -g {} -n {} --enable-cd true'.format(
resource_group, webapp)).get_output_in_json()
self.assertTrue(result['CI_CD_URL'].startswith('https://'))
self.assertTrue(result['CI_CD_URL'].endswith(
'.scm.azurewebsites.net/docker/hook'))
result = self.cmd('webapp config container set -g {} -n {} --docker-custom-image-name {} --docker-registry-server-password {} --docker-registry-server-user {} --docker-registry-server-url {} --enable-app-service-storage {}'.format(
resource_group, webapp, 'foo-image', 'foo-password', 'foo-user', 'foo-url', 'false')).get_output_in_json()
self.assertEqual(set(x['value'] for x in result if x['name'] ==
'DOCKER_REGISTRY_SERVER_PASSWORD'), set([None])) # we mask the password
result = self.cmd('webapp config container show -g {} -n {} '.format(
resource_group, webapp)).get_output_in_json()
self.assertEqual(set(x['name'] for x in result), set(['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_CUSTOM_IMAGE_NAME', 'DOCKER_REGISTRY_SERVER_PASSWORD', 'WEBSITES_ENABLE_APP_SERVICE_STORAGE']))
self.assertEqual(set(x['value'] for x in result if x['name'] ==
'DOCKER_REGISTRY_SERVER_PASSWORD'), set([None])) # we mask the password
sample = next(
(x for x in result if x['name'] == 'DOCKER_REGISTRY_SERVER_URL'))
self.assertEqual(sample, {
'name': 'DOCKER_REGISTRY_SERVER_URL', 'slotSetting': False, 'value': 'foo-url'})
sample = next(
(x for x in result if x['name'] == 'WEBSITES_ENABLE_APP_SERVICE_STORAGE'))
self.assertEqual(sample, {
'name': 'WEBSITES_ENABLE_APP_SERVICE_STORAGE', 'slotSetting': False, 'value': 'false'})
self.cmd(
'webapp config container delete -g {} -n {}'.format(resource_group, webapp))
result2 = self.cmd('webapp config container show -g {} -n {} '.format(
resource_group, webapp)).get_output_in_json()
self.assertEqual(result2, [])
class LinuxWebappSSHScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_linux_webapp_ssh(self, resource_group):
# On Windows, test 'webapp ssh' throws error
import platform
if platform.system() == "Windows":
from azure.cli.core.util import CLIError
with self.assertRaises(CLIError):
self.cmd('webapp ssh -g {} -n {} --timeout 5'.format("foo", "bar"))
return
runtime = 'node|12-lts'
plan = self.create_random_name(prefix='webapp-ssh-plan', length=24)
webapp = self.create_random_name(prefix='webapp-ssh', length=24)
self.cmd(
'appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} --runtime {}'.format(
resource_group, webapp, plan, runtime))
time.sleep(30)
requests.get('http://{}.azurewebsites.net'.format(webapp), timeout=240)
time.sleep(30)
self.cmd('webapp ssh -g {} -n {} --timeout 5'.format(resource_group, webapp))
time.sleep(30)
class LinuxWebappRemoteSSHScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_linux_webapp_remote_ssh(self, resource_group):
runtime = 'node|12-lts'
plan = self.create_random_name(
prefix='webapp-remote-ssh-plan', length=40)
webapp = self.create_random_name(prefix='webapp-remote-ssh', length=40)
self.cmd(
'appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} --runtime {}'.format(
resource_group, webapp, plan, runtime))
time.sleep(30)
requests.get('http://{}.azurewebsites.net'.format(webapp), timeout=240)
time.sleep(30)
self.cmd(
'webapp create-remote-connection -g {} -n {} --timeout 5'.format(resource_group, webapp))
time.sleep(30)
@unittest.skip("Remote connection feature is a preview feature that doesn't work on Linux, need to get update from Linux team")
class LinuxWebappRemoteDebugScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_linux_webapp_remote_debug(self, resource_group):
runtime = 'node|12-lts'
plan = self.create_random_name(
prefix='webapp-remote-debug-plan', length=40)
webapp = self.create_random_name(
prefix='webapp-remote-debug', length=40)
self.cmd(
'appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} --runtime {}'.format(
resource_group, webapp, plan, runtime))
time.sleep(30)
requests.get('http://{}.azurewebsites.net'.format(webapp), timeout=240)
self.cmd(
'webapp config set --remote-debugging-enabled true -g {} -n {}'.format(resource_group, webapp))\
.assert_with_checks(JMESPathCheck('remoteDebuggingEnabled', True))
self.cmd('webapp create-remote-connection -g {} -n {} --timeout 5 &'.format(resource_group, webapp))
class LinuxWebappMulticontainerSlotScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_linux_webapp_multicontainer_slot(self, resource_group):
webapp_name = self.create_random_name(
prefix='webapp-linux-multi', length=24)
plan = self.create_random_name(prefix='plan-linux-multi', length=24)
config_file = os.path.join(TEST_DIR, 'sample-compose.yml')
slot = "stage"
slot_webapp_name = "{}-{}".format(webapp_name, slot)
slot_config_file = os.path.join(TEST_DIR, 'sample-compose-slot.yml')
self.cmd(
'appservice plan create -g {} -n {} --is-linux --sku S1'.format(resource_group, plan))
self.cmd("webapp create -g {} -n {} --plan {} --multicontainer-config-file \"{}\" "
"--multicontainer-config-type COMPOSE".format(resource_group, webapp_name, plan, config_file))
last_number_seen = 99999999
for x in range(0, 10):
r = requests.get(
'http://{}.azurewebsites.net'.format(webapp_name), timeout=240)
# verify the web page
self.assertTrue('Hello World! I have been seen' in str(r.content))
current_number = [int(s)
for s in r.content.split() if s.isdigit()][0]
self.assertNotEqual(current_number, last_number_seen)
last_number_seen = current_number
self.cmd('webapp deployment slot create -g {} -n {} --slot {}'.format(
resource_group, webapp_name, slot))
self.cmd("webapp config container set -g {} -n {} --slot {} --multicontainer-config-file \"{}\" "
"--multicontainer-config-type COMPOSE".format(resource_group, webapp_name, slot, slot_config_file))
last_number_seen = 99999999
for x in range(0, 10):
r = requests.get(
'http://{}.azurewebsites.net'.format(slot_webapp_name), timeout=240)
# verify the web page
self.assertTrue(
'Hello from a slot! I have been seen' in str(r.content))
current_number = [int(s)
for s in r.content.split() if s.isdigit()][0]
self.assertNotEqual(current_number, last_number_seen)
last_number_seen = current_number
class WebappACRScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_WEBAPP)
def test_acr_integration(self, resource_group):
plan = self.create_random_name(prefix='acrtestplan', length=24)
webapp = self.create_random_name(prefix='webappacrtest', length=24)
runtime = 'node|10.14'
acr_registry_name = webapp
self.cmd('acr create --admin-enabled -g {} -n {} --sku Basic'.format(
resource_group, acr_registry_name))
self.cmd(
'appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan))
self.cmd('webapp create -g {} -n {} --plan {} --runtime {}'.format(
resource_group, webapp, plan, runtime))
creds = self.cmd('acr credential show -n {} -g {}'.format(
acr_registry_name, resource_group)).get_output_in_json()
self.cmd('webapp config container set -g {0} -n {1} --docker-custom-image-name {2}.azurecr.io/image-name:latest --docker-registry-server-url https://{2}.azurecr.io'.format(
resource_group, webapp, acr_registry_name), checks=[
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_USERNAME']|[0].value", creds['username'])
])
class FunctionappACRScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='northeurope')
@StorageAccountPreparer()
@AllowLargeResponse()
def test_acr_integration_function_app(self, resource_group, storage_account):
plan = self.create_random_name(prefix='acrtestplanfunction', length=24)
functionapp = self.create_random_name(
prefix='functionappacrtest', length=24)
runtime = 'node'
acr_registry_name = functionapp
self.cmd('acr create --admin-enabled -g {} -n {} --sku Basic'.format(
resource_group, acr_registry_name))
self.cmd(
'appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan))
self.cmd('functionapp create -g {} -n {} -s {} --plan {} --runtime {}'.format(
resource_group, functionapp, storage_account, plan, runtime))
creds = self.cmd('acr credential show -n {} -g {}'.format(
acr_registry_name, resource_group)).get_output_in_json()
self.cmd('functionapp config container set -g {0} -n {1} --docker-custom-image-name {2}.azurecr.io/image-name:latest --docker-registry-server-url https://{2}.azurecr.io'.format(
resource_group, functionapp, acr_registry_name), checks=[
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_USERNAME']|[0].value", creds['username'])
])
self.cmd('functionapp config container show -g {} -n {} '.format(resource_group, functionapp), checks=[
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_USERNAME']|[0].value", creds['username']),
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_URL']|[0].name", 'DOCKER_REGISTRY_SERVER_URL')
])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck(
"[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'node'),
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_USERNAME'].value|[0]", creds['username'])
])
self.cmd(
'functionapp config container delete -g {} -n {} '.format(resource_group, functionapp))
json_result = self.cmd('functionapp config appsettings list -g {} -n {}'.format(
resource_group, functionapp)).get_output_in_json()
all_settings = [setting['name'] for setting in json_result]
# Make sure the related settings are deleted
self.assertNotIn('DOCKER_REGISTRY_SERVER_USERNAME', all_settings)
self.assertNotIn('DOCKER_REGISTRY_SERVER_URL', all_settings)
self.assertNotIn('DOCKER_REGISTRY_SERVER_PASSWORD', all_settings)
self.assertIn('FUNCTIONS_WORKER_RUNTIME', all_settings)
self.cmd('functionapp delete -g {} -n {}'.format(resource_group, functionapp))
class FunctionAppCreateUsingACR(ScenarioTest):
@ResourceGroupPreparer(location='brazilsouth')
@StorageAccountPreparer(name_prefix='clitestacr')
@AllowLargeResponse()
def test_acr_create_function_app(self, resource_group, storage_account):
plan = self.create_random_name(prefix='acrtestplanfunction', length=24)
functionapp = self.create_random_name(
prefix='functionappacrtest', length=24)
runtime = 'node'
acr_registry_name = functionapp
self.cmd('acr create --admin-enabled -g {} -n {} --sku Basic'.format(
resource_group, acr_registry_name))
acr_creds = self.cmd('acr credential show -n {} -g {}'.format(
acr_registry_name, resource_group)).get_output_in_json()
username = acr_creds['username']
password = acr_creds['passwords'][0]['value']
self.cmd(
'functionapp plan create -g {} -n {} --sku S1 --is-linux'.format(resource_group, plan))
self.cmd('functionapp create -g {} -n {} -s {} --plan {} --runtime {}'
' --deployment-container-image-name {}.azurecr.io/image-name:latest --docker-registry-server-user {}'
' --docker-registry-server-password {}'.format(resource_group, functionapp, storage_account, plan, runtime,
acr_registry_name, username, password))
self.cmd('functionapp config container show -g {} -n {} '.format(resource_group, functionapp), checks=[
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_USERNAME']|[0].value", username),
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_URL']|[0].name", 'DOCKER_REGISTRY_SERVER_URL')
])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck(
"[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", None),
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_USERNAME'].value|[0]", username)
])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'DOCKER|{}.azurecr.io/image-name:latest'.format(acr_registry_name))])
self.cmd(
'functionapp config container delete -g {} -n {} '.format(resource_group, functionapp))
json_result = self.cmd(
'functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp)).get_output_in_json()
all_settings = [setting['name'] for setting in json_result]
# Make sure the related settings are deleted
self.assertNotIn('DOCKER_REGISTRY_SERVER_USERNAME', all_settings)
self.assertNotIn('DOCKER_REGISTRY_SERVER_URL', all_settings)
self.assertNotIn('DOCKER_REGISTRY_SERVER_PASSWORD', all_settings)
self.assertNotIn('FUNCTIONS_WORKER_RUNTIME', all_settings)
class FunctionappACRDeploymentScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location='brazilsouth')
@StorageAccountPreparer(name_prefix='clitestacrdeploy')
def test_acr_deployment_function_app(self, resource_group, storage_account):
plan = self.create_random_name(prefix='acrtestplanfunction', length=24)
functionapp = self.create_random_name(
prefix='functionappacrtest', length=24)
runtime = 'node'
acr_registry_name = functionapp
self.cmd('acr create --admin-enabled -g {} -n {} --sku Basic'.format(
resource_group, acr_registry_name))
self.cmd(
'appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan))
self.cmd('functionapp create -g {} -n {} -s {} --plan {} --runtime {}'.format(
resource_group, functionapp, storage_account, plan, runtime))
creds = self.cmd('acr credential show -g {} -n {}'.format(
resource_group, acr_registry_name)).get_output_in_json()
self.cmd('functionapp config container set -g {0} -n {1} --docker-custom-image-name {2}.azurecr.io/image-name:latest --docker-registry-server-url https://{2}.azurecr.io'.format(
resource_group, functionapp, acr_registry_name), checks=[
JMESPathCheck(
"[?name=='DOCKER_REGISTRY_SERVER_USERNAME']|[0].value", creds['username'])
])
result = self.cmd('functionapp deployment container config -g {} -n {} --enable-cd true'.format(resource_group,
functionapp)).get_output_in_json()
self.assertTrue(result['CI_CD_URL'].startswith('https://'))
self.assertTrue(result['CI_CD_URL'].endswith(
'.scm.azurewebsites.net/docker/hook'))
# verify that show-cd-url works the same way
show_result = self.cmd('functionapp deployment container show-cd-url -g {} -n {}'.format(resource_group,
functionapp)).get_output_in_json()
self.assertTrue(show_result['CI_CD_URL'].startswith('https://'))
self.assertTrue(show_result['CI_CD_URL'].endswith(
'.scm.azurewebsites.net/docker/hook'))
self.cmd('functionapp delete -g {} -n {}'.format(resource_group, functionapp))
class FunctionAppReservedInstanceTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_reserved_instance(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwithreservedinstance', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config set -g {} -n {} --prewarmed-instance-count 4'
.format(resource_group, functionapp_name)).assert_with_checks([
JMESPathCheck('preWarmedInstanceCount', 4)])
self.cmd(
'functionapp delete -g {} -n {}'.format(resource_group, functionapp_name))
class WebappGitScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_git(self, resource_group):
plan = self.create_random_name(prefix='webapp-git-plan5', length=24)
webapp = self.create_random_name(prefix='web-git-test2', length=24)
# You can create and use any repros with the 3 files under "./sample_web"
test_git_repo = 'https://github.com/yugangw-msft/azure-site-test'
self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp, plan))
self.cmd('webapp deployment source config -g {} -n {} --repo-url {} --branch {} --manual-integration'.format(resource_group, webapp, test_git_repo, 'master'), checks=[
JMESPathCheck('repoUrl', test_git_repo),
JMESPathCheck('isMercurial', False),
JMESPathCheck('branch', 'master')
])
self.cmd('webapp deployment source show -g {} -n {}'.format(resource_group, webapp), checks=[
JMESPathCheck('repoUrl', test_git_repo),
JMESPathCheck('isMercurial', False),
JMESPathCheck('branch', 'master')
])
self.cmd(
'webapp deployment source delete -g {} -n {}'.format(resource_group, webapp))
self.cmd('webapp deployment source show -g {} -n {}'.format(resource_group, webapp),
checks=JMESPathCheck('repoUrl', None))
class WebappSlotScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_slot(self, resource_group):
plan = self.create_random_name(prefix='slot-test-plan', length=24)
webapp = self.create_random_name(prefix='slot-test-web', length=24)
plan_result = self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan)).get_output_in_json()
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group,
webapp, plan_result['name']))
# You can create and use any repros with the 3 files under "./sample_web" and with a 'staging 'branch
slot = 'staging'
slot2 = 'dev'
test_git_repo = 'https://github.com/yugangw-msft/azure-site-test'
test_php_version = '5.6'
# create a few app-settings to test they can be cloned
self.cmd('webapp config appsettings set -g {} -n {} --settings s1=v1 --slot-settings s2=v2'.format(resource_group, webapp))
# create an empty slot
self.cmd('webapp deployment slot create -g {} -n {} --slot {}'.format(resource_group, webapp, slot), checks=[
JMESPathCheck('name', slot)
])
self.cmd('webapp deployment source config -g {} -n {} --repo-url {} --branch {} -s {} --manual-integration'.format(resource_group, webapp, test_git_repo, slot, slot), checks=[
JMESPathCheck('repoUrl', test_git_repo),
JMESPathCheck('branch', slot)
])
# swap with prod and verify the git branch also switched
self.cmd(
'webapp deployment slot swap -g {} -n {} -s {}'.format(resource_group, webapp, slot))
result = self.cmd('webapp config appsettings list -g {} -n {} -s {}'.format(
resource_group, webapp, slot)).get_output_in_json()
self.assertEqual(set([x['name'] for x in result]), set(
['s1', 'WEBSITE_NODE_DEFAULT_VERSION']))
# create a new slot by cloning from prod slot
self.cmd('webapp config set -g {} -n {} --php-version {}'.format(
resource_group, webapp, test_php_version))
self.cmd('webapp deployment slot create -g {} -n {} --slot {} --configuration-source {}'.format(
resource_group, webapp, slot2, webapp))
self.cmd('webapp config show -g {} -n {} --slot {}'.format(resource_group, webapp, slot2), checks=[
JMESPathCheck("phpVersion", test_php_version),
])
self.cmd('webapp config appsettings set -g {} -n {} --slot {} --settings s3=v3 --slot-settings s4=v4'.format(resource_group, webapp, slot2), checks=[
JMESPathCheck("[?name=='s4']|[0].slotSetting", True),
JMESPathCheck("[?name=='s3']|[0].slotSetting", False),
])
self.cmd('webapp config connection-string set -g {} -n {} -t mysql --slot {} --settings c1=connection1 --slot-settings c2=connection2'.format(resource_group, webapp, slot2))
# verify we can swap with non production slot
self.cmd('webapp deployment slot swap -g {} -n {} --slot {} --target-slot {}'.format(
resource_group, webapp, slot, slot2))
result = self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(
resource_group, webapp, slot2)).get_output_in_json()
self.assertEqual(set([x['name'] for x in result]), set(
['s1', 's4', 'WEBSITE_NODE_DEFAULT_VERSION']))
result = self.cmd('webapp config connection-string list -g {} -n {} --slot {}'.format(
resource_group, webapp, slot2)).get_output_in_json()
self.assertEqual(set([x['name'] for x in result]), set(['c2']))
result = self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(
resource_group, webapp, slot)).get_output_in_json()
self.assertTrue(set(['s3']).issubset(set([x['name'] for x in result])))
result = self.cmd('webapp config connection-string list -g {} -n {} --slot {}'.format(
resource_group, webapp, slot)).get_output_in_json()
self.assertEqual(set([x['name'] for x in result]), set(['c1']))
self.cmd('webapp deployment slot list -g {} -n {}'.format(resource_group, webapp), checks=[
JMESPathCheck("length([])", 2),
JMESPathCheck("length([?name=='{}'])".format(slot2), 1),
JMESPathCheck("length([?name=='{}'])".format(slot), 1),
])
self.cmd(
'webapp deployment slot delete -g {} -n {} --slot {}'.format(resource_group, webapp, slot))
# try another way to delete a slot and exercise all options
self.cmd('webapp delete -g {} -n {} --slot {} --keep-dns-registration --keep-empty-plan --keep-metrics'.format(resource_group, webapp, slot2))
class WebappSlotTrafficRouting(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_traffic_routing(self, resource_group):
plan = self.create_random_name(prefix='slot-traffic-plan', length=24)
webapp = self.create_random_name(prefix='slot-traffic-web', length=24)
plan_result = self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan)).get_output_in_json()
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group,
webapp, plan_result['name']))
# You can create and use any repros with the 3 files under "./sample_web" and with a 'staging 'branch
slot = 'staging'
# create an empty slot
self.cmd(
'webapp deployment slot create -g {} -n {} --slot {}'.format(resource_group, webapp, slot))
self.cmd('webapp traffic-routing set -g {} -n {} -d {}=15'.format(resource_group, webapp, slot), checks=[
JMESPathCheck("[0].actionHostName", webapp +
'-' + slot + '.azurewebsites.net'),
JMESPathCheck("[0].reroutePercentage", 15.0)
])
self.cmd('webapp traffic-routing show -g {} -n {}'.format(resource_group, webapp), checks=[
JMESPathCheck("[0].actionHostName", webapp +
'-' + slot + '.azurewebsites.net'),
JMESPathCheck("[0].reroutePercentage", 15.0)
])
self.cmd(
'webapp traffic-routing clear -g {} -n {}'.format(resource_group, webapp))
class AppServiceCors(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_cors(self, resource_group):
self.kwargs.update({
'plan': self.create_random_name(prefix='slot-traffic-plan', length=24),
'web': self.create_random_name(prefix='slot-traffic-web', length=24),
'slot': 'slot1'
})
self.cmd('appservice plan create -g {rg} -n {plan} --sku S1')
self.cmd('webapp create -g {rg} -n {web} --plan {plan}')
self.cmd(
'webapp cors add -g {rg} -n {web} --allowed-origins https://msdn.com https://msn.com')
self.cmd('webapp cors show -g {rg} -n {web}',
checks=self.check('allowedOrigins', ['https://msdn.com', 'https://msn.com']))
self.cmd(
'webapp cors remove -g {rg} -n {web} --allowed-origins https://msn.com')
self.cmd('webapp cors show -g {rg} -n {web}',
checks=self.check('allowedOrigins', ['https://msdn.com']))
self.cmd(
'webapp deployment slot create -g {rg} -n {web} --slot {slot}')
self.cmd(
'webapp cors add -g {rg} -n {web} --slot {slot} --allowed-origins https://foo.com')
self.cmd('webapp cors show -g {rg} -n {web} --slot {slot}',
checks=self.check('allowedOrigins', ['https://foo.com']))
self.cmd(
'webapp cors remove -g {rg} -n {web} --slot {slot} --allowed-origins https://foo.com')
self.cmd('webapp cors show -g {rg} -n {web} --slot {slot}',
checks=self.check('allowedOrigins', []))
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
@StorageAccountPreparer()
def test_functionapp_cors(self, resource_group, storage_account):
self.kwargs.update({
'plan': self.create_random_name(prefix='slot-traffic-plan', length=24),
'function': self.create_random_name(prefix='slot-traffic-web', length=24),
'storage': self.create_random_name(prefix='storage', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan} --sku S1')
self.cmd(
'storage account create --name {storage} -g {rg} --sku Standard_LRS')
self.cmd(
'functionapp create -g {rg} -n {function} --plan {plan} -s {storage}')
self.cmd(
'functionapp cors add -g {rg} -n {function} --allowed-origins https://msdn.com https://msn.com')
result = self.cmd(
'functionapp cors show -g {rg} -n {function}').get_output_in_json()['allowedOrigins']
# functionapp has pre-defined cors. We verify the ones we added are in the list
self.assertTrue(
set(['https://msdn.com', 'https://msn.com']).issubset(set(result)))
class WebappSlotSwapScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_slot_swap(self, resource_group):
plan = self.create_random_name(prefix='slot-swap-plan', length=24)
webapp = self.create_random_name(prefix='slot-swap-web', length=24)
plan_result = self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan)).get_output_in_json()
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group,
webapp, plan_result['name']))
# You can create and use any repros with the 3 files under "./sample_web" and with a 'staging 'branch
slot = 'staging'
self.cmd(
'webapp config appsettings set -g {} -n {} --slot-settings s1=prod'.format(resource_group, webapp))
# create an empty slot
self.cmd(
'webapp deployment slot create -g {} -n {} --slot {}'.format(resource_group, webapp, slot))
self.cmd('webapp config appsettings set -g {} -n {} --slot-settings s1=slot --slot {}'.format(
resource_group, webapp, slot))
# swap with preview
self.cmd('webapp deployment slot swap -g {} -n {} -s {} --action preview'.format(
resource_group, webapp, slot))
self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(resource_group, webapp, slot), checks=[
JMESPathCheck("[?name=='s1']|[0].value", 'prod')
])
# complete the swap
self.cmd(
'webapp deployment slot swap -g {} -n {} -s {}'.format(resource_group, webapp, slot))
self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(resource_group, webapp, slot), checks=[
JMESPathCheck("[?name=='s1']|[0].value", 'slot')
])
# reset
self.cmd('webapp deployment slot swap -g {} -n {} -s {} --action reset'.format(
resource_group, webapp, slot))
self.cmd('webapp config appsettings list -g {} -n {} --slot {}'.format(resource_group, webapp, slot), checks=[
JMESPathCheck("[?name=='s1']|[0].value", 'slot')
])
class WebappSSLCertTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_ssl(self, resource_group, resource_group_location):
plan = self.create_random_name(prefix='ssl-test-plan', length=24)
webapp_name = self.create_random_name(prefix='web-ssl-test', length=20)
slot_name = self.create_random_name(prefix='slot-ssl-test', length=20)
# Cert Generated using
# https://docs.microsoft.com/azure/app-service-web/web-sites-configure-ssl-certificate#bkmk_ssopenssl
pfx_file = os.path.join(TEST_DIR, 'server.pfx')
cert_password = 'test'
cert_thumbprint = '9E9735C45C792B03B3FFCCA614852B32EE71AD6B'
# we configure tags here in a hope to capture a repro for https://github.com/Azure/azure-cli/issues/6929
self.cmd(
'appservice plan create -g {} -n {} --sku S1 --tags plan=plan1'.format(resource_group, plan))
self.cmd('appservice plan show -g {} -n {}'.format(resource_group,
plan), self.check('tags.plan', 'plan1'))
self.cmd('webapp create -g {} -n {} --plan {} --tags web=web1'.format(
resource_group, webapp_name, plan))
self.cmd('webapp config ssl upload -g {} -n {} --certificate-file "{}" --certificate-password {}'.format(resource_group, webapp_name, pfx_file, cert_password), checks=[
JMESPathCheck('thumbprint', cert_thumbprint)
])
self.cmd('webapp show -g {} -n {}'.format(resource_group,
webapp_name), self.check('tags.web', 'web1'))
self.cmd('webapp config ssl bind -g {} -n {} --certificate-thumbprint {} --ssl-type {}'.format(resource_group, webapp_name, cert_thumbprint, 'SNI'), checks=[
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].sslState".format(
webapp_name), 'SniEnabled'),
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].thumbprint".format(
webapp_name), cert_thumbprint)
])
self.cmd('webapp show -g {} -n {}'.format(resource_group,
webapp_name), self.check('tags.web', 'web1'))
self.cmd('webapp config ssl unbind -g {} -n {} --certificate-thumbprint {}'.format(resource_group, webapp_name, cert_thumbprint), checks=[
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].sslState".format(
webapp_name), 'Disabled'),
])
self.cmd('webapp show -g {} -n {}'.format(resource_group,
webapp_name), self.check('tags.web', 'web1'))
self.cmd('webapp config ssl delete -g {} --certificate-thumbprint {}'.format(
resource_group, cert_thumbprint))
self.cmd('webapp show -g {} -n {}'.format(resource_group,
webapp_name), self.check('tags.web', 'web1'))
# test with slot
self.cmd('webapp deployment slot create -g {} -n {} --slot {}'.format(
resource_group, webapp_name, slot_name))
self.cmd('webapp config ssl upload -g {} -n {} --certificate-file "{}" --certificate-password {} -s {}'.format(resource_group, webapp_name, pfx_file, cert_password, slot_name), checks=[
JMESPathCheck('thumbprint', cert_thumbprint)
])
self.cmd(
'webapp show -g {} -n {} -s {}'.format(resource_group, webapp_name, slot_name))
self.cmd('webapp config ssl bind -g {} -n {} --certificate-thumbprint {} --ssl-type {} -s {}'.format(resource_group, webapp_name, cert_thumbprint, 'SNI', slot_name), checks=[
JMESPathCheck("hostNameSslStates|[?name=='{}-{}.azurewebsites.net']|[0].sslState".format(
webapp_name, slot_name), 'SniEnabled'),
JMESPathCheck("hostNameSslStates|[?name=='{}-{}.azurewebsites.net']|[0].thumbprint".format(
webapp_name, slot_name), cert_thumbprint)
])
self.cmd(
'webapp show -g {} -n {} -s {}'.format(resource_group, webapp_name, slot_name))
self.cmd('webapp config ssl unbind -g {} -n {} --certificate-thumbprint {} -s {}'.format(resource_group, webapp_name, cert_thumbprint, slot_name), checks=[
JMESPathCheck("hostNameSslStates|[?name=='{}-{}.azurewebsites.net']|[0].sslState".format(
webapp_name, slot_name), 'Disabled'),
])
self.cmd(
'webapp show -g {} -n {} -s {}'.format(resource_group, webapp_name, slot_name))
self.cmd('webapp config ssl delete -g {} --certificate-thumbprint {}'.format(
resource_group, cert_thumbprint))
self.cmd(
'webapp show -g {} -n {} -s {}'.format(resource_group, webapp_name, slot_name))
self.cmd('webapp delete -g {} -n {}'.format(resource_group, webapp_name))
class WebappSSLImportCertTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_ssl_import(self, resource_group):
plan_name = self.create_random_name(prefix='ssl-test-plan', length=24)
webapp_name = self.create_random_name(prefix='web-ssl-test', length=20)
kv_name = self.create_random_name(prefix='kv-ssl-test', length=20)
# Cert Generated using
# https://docs.microsoft.com/azure/app-service-web/web-sites-configure-ssl-certificate#bkmk_ssopenssl
pfx_file = os.path.join(TEST_DIR, 'server.pfx')
cert_password = 'test'
cert_thumbprint = '9E9735C45C792B03B3FFCCA614852B32EE71AD6B'
cert_name = 'test-cert'
# we configure tags here in a hope to capture a repro for https://github.com/Azure/azure-cli/issues/6929
self.cmd(
'appservice plan create -g {} -n {} --sku B1'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
self.cmd('keyvault create -g {} -n {}'.format(resource_group, kv_name))
self.cmd('keyvault set-policy -g {} --name {} --spn {} --secret-permissions get'.format(
resource_group, kv_name, 'Microsoft.Azure.WebSites'))
self.cmd('keyvault certificate import --name {} --vault-name {} --file "{}" --password {}'.format(
cert_name, kv_name, pfx_file, cert_password))
self.cmd('webapp config ssl import --resource-group {} --name {} --key-vault {} --key-vault-certificate-name {}'.format(resource_group, webapp_name, kv_name, cert_name), checks=[
JMESPathCheck('thumbprint', cert_thumbprint)
])
self.cmd('webapp config ssl bind -g {} -n {} --certificate-thumbprint {} --ssl-type {}'.format(resource_group, webapp_name, cert_thumbprint, 'SNI'), checks=[
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].sslState".format(
webapp_name), 'SniEnabled'),
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].thumbprint".format(
webapp_name), cert_thumbprint)
])
@ResourceGroupPreparer(parameter_name='kv_resource_group', location=WINDOWS_ASP_LOCATION_WEBAPP)
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_ssl_import_crossrg(self, resource_group, kv_resource_group):
plan_name = self.create_random_name(prefix='ssl-test-plan', length=24)
webapp_name = self.create_random_name(prefix='web-ssl-test', length=20)
kv_name = self.create_random_name(prefix='kv-ssl-test', length=20)
# Cert Generated using
# https://docs.microsoft.com/azure/app-service-web/web-sites-configure-ssl-certificate#bkmk_ssopenssl
pfx_file = os.path.join(TEST_DIR, 'server.pfx')
cert_password = 'test'
cert_thumbprint = '9E9735C45C792B03B3FFCCA614852B32EE71AD6B'
cert_name = 'test-cert'
# we configure tags here in a hope to capture a repro for https://github.com/Azure/azure-cli/issues/6929
self.cmd(
'appservice plan create -g {} -n {} --sku B1'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
kv_id = self.cmd('keyvault create -g {} -n {}'.format(kv_resource_group, kv_name)).get_output_in_json()['id']
self.cmd('keyvault set-policy -g {} --name {} --spn {} --secret-permissions get'.format(
kv_resource_group, kv_name, 'Microsoft.Azure.WebSites'))
self.cmd('keyvault certificate import --name {} --vault-name {} --file "{}" --password {}'.format(
cert_name, kv_name, pfx_file, cert_password))
self.cmd('webapp config ssl import --resource-group {} --name {} --key-vault {} --key-vault-certificate-name {}'.format(resource_group, webapp_name, kv_id, cert_name), checks=[
JMESPathCheck('thumbprint', cert_thumbprint)
])
self.cmd('webapp config ssl bind -g {} -n {} --certificate-thumbprint {} --ssl-type {}'.format(resource_group, webapp_name, cert_thumbprint, 'SNI'), checks=[
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].sslState".format(
webapp_name), 'SniEnabled'),
JMESPathCheck("hostNameSslStates|[?name=='{}.azurewebsites.net']|[0].thumbprint".format(
webapp_name), cert_thumbprint)
])
class WebappUndeleteTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_deleted_list(self, resource_group):
plan = self.create_random_name(prefix='delete-me-plan', length=24)
webapp_name = self.create_random_name(
prefix='delete-me-web', length=24)
self.cmd(
'appservice plan create -g {} -n {} --sku B1 --tags plan=plan1'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd('webapp delete -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp deleted list -g {}'.format(resource_group), checks=[
JMESPathCheck('[0].deletedSiteName', webapp_name)
])
class FunctionAppWithPlanE2ETest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@ResourceGroupPreparer(parameter_name='resource_group2', location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
def test_functionapp_e2e(self, resource_group, resource_group2):
functionapp_name, functionapp_name2 = self.create_random_name(
'func-e2e', 24), self.create_random_name('func-e2e', 24)
plan = self.create_random_name('func-e2e-plan', 24)
storage, storage2 = 'functionappplanstorage', 'functionappplanstorage2'
plan_id = self.cmd('appservice plan create -g {} -n {}'.format(
resource_group, plan)).get_output_in_json()['id']
self.cmd('appservice plan list -g {}'.format(resource_group))
self.cmd(
'storage account create --name {} -g {} -l {} --sku Standard_LRS'.format(storage, resource_group, WINDOWS_ASP_LOCATION_FUNCTIONAPP))
storage_account_id2 = self.cmd('storage account create --name {} -g {} -l {} --sku Standard_LRS'.format(
storage2, resource_group2, WINDOWS_ASP_LOCATION_FUNCTIONAPP)).get_output_in_json()['id']
self.cmd('functionapp create -g {} -n {} -p {} -s {}'.format(resource_group, functionapp_name, plan, storage), checks=[
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('hostNames[0]',
functionapp_name + '.azurewebsites.net')
])
self.cmd('functionapp create -g {} -n {} -p {} -s {}'.format(resource_group2,
functionapp_name2, plan_id, storage_account_id2))
self.cmd(
'functionapp delete -g {} -n {}'.format(resource_group, functionapp_name))
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_app_service_java(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcapplinplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1 --is-linux'.format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime java --functions-version 3'
.format(resource_group, functionapp, plan, storage_account),
checks=[
JMESPathCheck('name', functionapp)
])
result = self.cmd('functionapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].name', functionapp)
]).get_output_in_json()
self.assertTrue('functionapp,linux' in result[0]['kind'])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'Java|8')])
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_app_service_java_with_runtime_version(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcapplinplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1 --is-linux'.format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime java --runtime-version 11 --functions-version 3'
.format(resource_group, functionapp, plan, storage_account),
checks=[
JMESPathCheck('name', functionapp)
])
result = self.cmd('functionapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].name', functionapp)
]).get_output_in_json()
self.assertTrue('functionapp,linux' in result[0]['kind'])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'Java|11')])
class FunctionUpdatePlan(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_move_plan_to_elastic(self, resource_group, storage_account):
functionapp_name = self.create_random_name('functionappelastic', 40)
ep_plan_name = self.create_random_name('somerandomplan', 40)
second_plan_name = self.create_random_name('secondplan', 40)
s1_plan_name = self.create_random_name('ab1planname', 40)
plan_result = self.cmd('functionapp plan create -g {} -n {} --sku EP1'.format(resource_group, ep_plan_name), checks=[
JMESPathCheck('sku.name', 'EP1')
]).get_output_in_json()
self.cmd('functionapp plan create -g {} -n {} --sku EP1'.format(resource_group, second_plan_name), checks=[
JMESPathCheck('sku.name', 'EP1')
]).get_output_in_json()
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, s1_plan_name), checks=[
JMESPathCheck('sku.name', 'S1')
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {}'
.format(resource_group, functionapp_name, second_plan_name, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp update -g {} -n {} --plan {}'
.format(resource_group, functionapp_name, ep_plan_name)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('serverFarmId', plan_result['id']),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
# Moving to and from an App Service plan (not Elastic Premium) is not allowed right now
self.cmd('functionapp update -g {} -n {} --plan {}'
.format(resource_group, functionapp_name, s1_plan_name), expect_failure=True)
class FunctionAppWithConsumptionPlanE2ETest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='azurecli-functionapp-c-e2e', location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_consumption_e2e(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappconsumption', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {}'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('[0].kind', 'functionapp'),
JMESPathCheck('[0].name', functionapp_name)
])
self.cmd('functionapp show -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('name', functionapp_name)
])
self.cmd('functionapp update -g {} -n {} --set clientAffinityEnabled=true'.format(resource_group, functionapp_name),
checks=[self.check('clientAffinityEnabled', True)]
)
self.cmd(
'functionapp delete -g {} -n {}'.format(resource_group, functionapp_name))
@ResourceGroupPreparer(name_prefix='azurecli-functionapp-c-e2e-ragrs', location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer(sku='Standard_RAGRS')
def test_functionapp_consumption_ragrs_storage_e2e(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappconsumption', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {}'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp show -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('name', functionapp_name)
])
class FunctionAppWithLinuxConsumptionPlanTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='azurecli-functionapp-linux', location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_consumption_linux(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionapplinuxconsumption', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Linux --runtime node'
.format(resource_group, functionapp_name, LINUX_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('reserved', True),
JMESPathCheck('kind', 'functionapp,linux'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck("[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'node')])
@ResourceGroupPreparer(name_prefix='azurecli-functionapp-linux', location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_consumption_linux_java(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionapplinuxconsumption', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Linux --runtime java --functions-version 3'
.format(resource_group, functionapp_name, LINUX_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('reserved', True),
JMESPathCheck('kind', 'functionapp,linux'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck("[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'java')])
class FunctionAppOnWindowsWithRuntime(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_runtime(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowsruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows --runtime node'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck("[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'node')])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_runtime_java(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowsruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows --runtime java'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck("[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'java')])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('javaVersion', '1.8')])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_runtime_powershell(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowsruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows --runtime powershell'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck("[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'powershell')])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('powerShellVersion', '~6')])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_runtime_version(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowsruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows --runtime node --runtime-version 8'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck(
"[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'node'),
JMESPathCheck("[?name=='WEBSITE_NODE_DEFAULT_VERSION'].value|[0]", '~8')])
self.cmd(
'functionapp delete -g {} -n {}'.format(resource_group, functionapp_name))
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_runtime_version_invalid(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowsruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} '
'--os-type Windows --runtime node --runtime-version 8.2'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account), expect_failure=True)
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_runtime_functions_version(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowsruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --functions-version 3 --os-type Windows --runtime node'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck(
"[?name=='FUNCTIONS_EXTENSION_VERSION'].value|[0]", '~3'),
JMESPathCheck("[?name=='WEBSITE_NODE_DEFAULT_VERSION'].value|[0]", '~12')])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_runtime_custom_handler(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowsruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --functions-version 3 --os-type Windows --runtime custom'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck("[?name=='FUNCTIONS_EXTENSION_VERSION'].value|[0]", '~3'),
JMESPathCheck("[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'custom')])
class FunctionAppOnWindowsWithoutRuntime(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_windows_without_runtime(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwindowswithoutruntime', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd(
'functionapp delete -g {} -n {}'.format(resource_group, functionapp_name))
class FunctionAppWithAppInsightsKey(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_with_app_insights_key(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwithappinsights', 40)
app_insights_key = '00000000-0000-0000-0000-123456789123'
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows'
' --app-insights-key {}'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account, app_insights_key)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp_name)).assert_with_checks([
JMESPathCheck(
"[?name=='APPINSIGHTS_INSTRUMENTATIONKEY'].value|[0]", app_insights_key)
])
self.cmd(
'functionapp delete -g {} -n {}'.format(resource_group, functionapp_name))
class FunctionAppWithAppInsightsDefault(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_with_default_app_insights(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwithappinsights', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
app_set = self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group,
functionapp_name)).get_output_in_json()
self.assertTrue('APPINSIGHTS_INSTRUMENTATIONKEY' in [
kp['name'] for kp in app_set])
self.assertTrue('AzureWebJobsDashboard' not in [
kp['name'] for kp in app_set])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_with_no_default_app_insights(self, resource_group, storage_account):
functionapp_name = self.create_random_name(
'functionappwithappinsights', 40)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type Windows --disable-app-insights'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
app_set = self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group,
functionapp_name)).get_output_in_json()
self.assertTrue('APPINSIGHTS_INSTRUMENTATIONKEY' not in [
kp['name'] for kp in app_set])
self.assertTrue('AzureWebJobsDashboard' in [
kp['name'] for kp in app_set])
class FunctionAppOnLinux(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcapplinplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime node'.format(resource_group, functionapp, plan, storage_account), checks=[
JMESPathCheck('name', functionapp)
])
result = self.cmd('functionapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].name', functionapp)
]).get_output_in_json()
self.assertTrue('functionapp,linux' in result[0]['kind'])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'Node|10')])
self.cmd('functionapp delete -g {} -n {}'.format(resource_group, functionapp))
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_version(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcapplinplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1 --is-linux'.format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime node --runtime-version 10'
.format(resource_group, functionapp, plan, storage_account),
checks=[
JMESPathCheck('name', functionapp)
])
result = self.cmd('functionapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].name', functionapp)
]).get_output_in_json()
self.assertTrue('functionapp,linux' in result[0]['kind'])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'Node|10')])
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_version_consumption(self, resource_group, storage_account):
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --os-type linux --runtime python --runtime-version 3.7'
.format(resource_group, functionapp, LINUX_ASP_LOCATION_FUNCTIONAPP, storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'Python|3.7')])
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_version_error(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcapplinplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1 --is-linux'.format(resource_group, plan), checks=[
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime python --runtime-version 3.8'
.format(resource_group, functionapp, plan, storage_account), expect_failure=True)
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_functions_version(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcapplinplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1')
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --functions-version 3 --runtime node'
.format(resource_group, functionapp, plan, storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'Node|12')
])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp)).assert_with_checks([
JMESPathCheck(
"[?name=='FUNCTIONS_EXTENSION_VERSION'].value|[0]", '~3')
])
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_custom_handler(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcapplinplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('appservice plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'S1')
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --functions-version 3 --runtime custom'
.format(resource_group, functionapp, plan, storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp)).assert_with_checks([
JMESPathCheck("[?name=='FUNCTIONS_WORKER_RUNTIME'].value|[0]", 'custom')])
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_functions_version_consumption(self, resource_group, storage_account):
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --functions-version 3 --runtime node --os-type linux'
.format(resource_group, functionapp, LINUX_ASP_LOCATION_FUNCTIONAPP, storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'Node|12')
])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp)).assert_with_checks([
JMESPathCheck(
"[?name=='FUNCTIONS_EXTENSION_VERSION'].value|[0]", '~3')
])
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_on_linux_dotnet_consumption(self, resource_group, storage_account):
functionapp = self.create_random_name(
prefix='functionapp-linux', length=24)
self.cmd('functionapp create -g {} -n {} -c {} -s {} --functions-version 3 --runtime dotnet --os-type linux'
.format(resource_group, functionapp, LINUX_ASP_LOCATION_FUNCTIONAPP, storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp config show -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck('linuxFxVersion', 'dotnet|3.1')
])
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp)).assert_with_checks([
JMESPathCheck(
"[?name=='FUNCTIONS_EXTENSION_VERSION'].value|[0]", '~3')
])
class FunctionAppServicePlan(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
def test_functionapp_app_service_plan(self, resource_group):
plan = self.create_random_name(prefix='funcappplan', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1' .format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'S1')
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
def test_functionapp_elastic_plan(self, resource_group):
plan = self.create_random_name(prefix='funcappplan', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku EP1 --min-instances 4 --max-burst 12' .format(resource_group, plan), checks=[
JMESPathCheck('maximumElasticWorkerCount', 12),
JMESPathCheck('sku.name', 'EP1'),
JMESPathCheck('sku.capacity', 4)
])
self.cmd('functionapp plan update -g {} -n {} --min-instances 5 --max-burst 11' .format(resource_group, plan), checks=[
JMESPathCheck('maximumElasticWorkerCount', 11),
JMESPathCheck('sku.name', 'EP1'),
JMESPathCheck('sku.capacity', 5)
])
self.cmd('functionapp plan show -g {} -n {} '.format(resource_group, plan), checks=[
JMESPathCheck('maximumElasticWorkerCount', 11),
JMESPathCheck('sku.name', 'EP1'),
JMESPathCheck('sku.capacity', 5)
])
self.cmd('functionapp delete -g {} -n {}'.format(resource_group, plan))
class FunctionAppServicePlanLinux(ScenarioTest):
@ResourceGroupPreparer(location=LINUX_ASP_LOCATION_FUNCTIONAPP)
def test_functionapp_app_service_plan_linux(self, resource_group):
plan = self.create_random_name(prefix='funcappplan', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1 --is-linux' .format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'S1'),
JMESPathCheck('kind', 'linux')
])
class FunctionAppSlotTests(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_slot_creation(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcappplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-slot', length=24)
slotname = self.create_random_name(prefix='slotname', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime node'.format(resource_group, functionapp, plan,
storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp deployment slot create -g {} -n {} --slot {}'.format(resource_group, functionapp, slotname),
checks=[
JMESPathCheck('name', slotname),
JMESPathCheck('type', 'Microsoft.Web/sites/slots'),
])
pre_slot_list = self.cmd('functionapp deployment slot list -g {} -n {}'.format(resource_group, functionapp),
checks=[
JMESPathCheck("[?name=='{}'].type|[0]".format(
slotname), 'Microsoft.Web/sites/slots')
]).get_output_in_json()
self.assertEqual(len(pre_slot_list), 1)
self.cmd('functionapp deployment slot delete -g {} -n {} --slot {}'.format(
resource_group, functionapp, slotname))
deleted_slot_list = self.cmd('functionapp deployment slot list -g {} -n {}'.format(
resource_group, functionapp)).get_output_in_json()
self.assertEqual(len(deleted_slot_list), 0)
self.cmd('functionapp delete -g {} -n {}'.format(resource_group, functionapp))
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_slot_appsetting_update(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcappplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-slot', length=24)
slotname = self.create_random_name(prefix='slotname', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime node'.format(resource_group, functionapp, plan,
storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp deployment slot create -g {} -n {} --slot {}'.format(resource_group, functionapp, slotname), checks=[
JMESPathCheck('name', slotname)
])
self.cmd('functionapp config appsettings set -g {} -n {} --slot {} --slot-settings FOO=BAR'.format(resource_group, functionapp,
slotname), checks=[
JMESPathCheck("[?name=='FOO'].value|[0]", 'BAR'),
JMESPathCheck("[?name=='FOO'].slotSetting|[0]", True)
])
self.cmd('functionapp config appsettings list -g {} -n {} --slot {}'.format(resource_group, functionapp, slotname), checks=[
JMESPathCheck("[?name=='FOO'].value|[0]", 'BAR'),
JMESPathCheck("[?name=='FOO'].slotSetting|[0]", True)
])
self.cmd('functionapp delete -g {} -n {}'.format(resource_group, functionapp))
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_slot_swap(self, resource_group, storage_account):
plan = self.create_random_name(prefix='funcappplan', length=24)
functionapp = self.create_random_name(
prefix='functionapp-slot', length=24)
slotname = self.create_random_name(prefix='slotname', length=24)
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan), checks=[
JMESPathCheck('sku.name', 'S1'),
])
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime node'.format(resource_group, functionapp,
plan,
storage_account), checks=[
JMESPathCheck('name', functionapp)
])
self.cmd('functionapp deployment slot create -g {} -n {} --slot {}'.format(resource_group, functionapp,
slotname), checks=[
JMESPathCheck('name', slotname)
])
self.cmd('functionapp config appsettings set -g {} -n {} --slot {} --settings FOO=BAR'.format(resource_group, functionapp,
slotname), checks=[
JMESPathCheck("[?name=='FOO'].value|[0]", 'BAR')
])
self.cmd('functionapp deployment slot swap -g {} -n {} --slot {} --action swap'.format(
resource_group, functionapp, slotname))
self.cmd('functionapp config appsettings list -g {} -n {}'.format(resource_group, functionapp), checks=[
JMESPathCheck("[?name=='FOO'].value|[0]", 'BAR')
])
self.cmd('functionapp delete -g {} -n {}'.format(resource_group, functionapp))
class FunctionAppKeysTests(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_keys_set(self, resource_group, storage_account):
functionapp_name = self.create_random_name('functionappkeys', 40)
key_name = "keyname1"
key_value = "keyvalue1"
key_type = "functionKeys"
self.cmd('functionapp create -g {} -n {} -c {} -s {}'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp keys set -g {} -n {} --key-name {} --key-value {} --key-type {}'
.format(resource_group, functionapp_name, key_name, key_value, key_type)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value),
JMESPathCheck('type', 'Microsoft.Web/sites/host/functionKeys')])
key_value = "keyvalue1_changed"
self.cmd('functionapp keys set -g {} -n {} --key-name {} --key-value {} --key-type {}'
.format(resource_group, functionapp_name, key_name, key_value, key_type)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value),
JMESPathCheck('type', 'Microsoft.Web/sites/host/functionKeys')])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_keys_list(self, resource_group, storage_account):
functionapp_name = self.create_random_name('functionappkeys', 40)
key_name = "keyname1"
key_value = "keyvalue1"
key_type = "functionKeys"
self.cmd('functionapp create -g {} -n {} -c {} -s {}'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp keys set -g {} -n {} --key-name {} --key-value {} --key-type {}'
.format(resource_group, functionapp_name, key_name, key_value, key_type)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value),
JMESPathCheck('type', 'Microsoft.Web/sites/host/functionKeys')])
self.cmd('functionapp keys list -g {} -n {}'
.format(resource_group, functionapp_name)).assert_with_checks([
JMESPathCheck('functionKeys.{}'.format(key_name), key_value)])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_keys_delete(self, resource_group, storage_account):
functionapp_name = self.create_random_name('functionappkeys', 40)
key_name = "keyname1"
key_value = "keyvalue1"
key_type = "functionKeys"
self.cmd('functionapp create -g {} -n {} -c {} -s {}'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp keys set -g {} -n {} --key-name {} --key-value {} --key-type {}'
.format(resource_group, functionapp_name, key_name, key_value, key_type)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value),
JMESPathCheck('type', 'Microsoft.Web/sites/host/functionKeys')])
self.cmd('functionapp keys delete -g {} -n {} --key-name {} --key-type {}'
.format(resource_group, functionapp_name, key_name, key_type))
self.cmd('functionapp keys list -g {} -n {}'
.format(resource_group, functionapp_name)).assert_with_checks([
JMESPathCheck('functionKeys.{}'.format(key_name), None)])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_keys_set_slot(self, resource_group, storage_account):
functionapp_name = self.create_random_name('functionappkeys', 40)
slot_name = self.create_random_name(prefix='slotname', length=24)
key_name = "keyname1"
key_value = "keyvalue1"
key_type = "functionKeys"
self.cmd('functionapp create -g {} -n {} -c {} -s {}'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp deployment slot create -g {} -n {} --slot {}'
.format(resource_group, functionapp_name, slot_name)).assert_with_checks([
JMESPathCheck('name', slot_name),
JMESPathCheck('type', 'Microsoft.Web/sites/slots')])
self.cmd('functionapp keys set -g {} -n {} -s {} --key-name {} --key-value {} --key-type {}'
.format(resource_group, functionapp_name, slot_name, key_name, key_value, key_type)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value),
JMESPathCheck('type', 'Microsoft.Web/sites/host/functionKeys')])
key_value = "keyvalue1_changed"
self.cmd('functionapp keys set -g {} -n {} -s {} --key-name {} --key-value {} --key-type {}'
.format(resource_group, functionapp_name, slot_name, key_name, key_value, key_type)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value),
JMESPathCheck('type', 'Microsoft.Web/sites/host/functionKeys')])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_keys_list_slot(self, resource_group, storage_account):
functionapp_name = self.create_random_name('functionappkeys', 40)
slot_name = self.create_random_name(prefix='slotname', length=24)
key_name = "keyname1"
key_value = "keyvalue1"
key_type = "functionKeys"
self.cmd('functionapp create -g {} -n {} -c {} -s {}'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp deployment slot create -g {} -n {} --slot {}'
.format(resource_group, functionapp_name, slot_name)).assert_with_checks([
JMESPathCheck('name', slot_name),
JMESPathCheck('type', 'Microsoft.Web/sites/slots')])
self.cmd('functionapp keys set -g {} -n {} -s {} --key-name {} --key-value {} --key-type {}'
.format(resource_group, functionapp_name, slot_name, key_name, key_value, key_type)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value),
JMESPathCheck('type', 'Microsoft.Web/sites/host/functionKeys')])
self.cmd('functionapp keys list -g {} -n {} -s {}'
.format(resource_group, functionapp_name, slot_name)).assert_with_checks([
JMESPathCheck('functionKeys.{}'.format(key_name), key_value)])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_keys_delete_slot(self, resource_group, storage_account):
functionapp_name = self.create_random_name('functionappkeys', 40)
slot_name = self.create_random_name(prefix='slotname', length=24)
key_name = "keyname1"
key_value = "keyvalue1"
key_type = "functionKeys"
self.cmd('functionapp create -g {} -n {} -c {} -s {}'
.format(resource_group, functionapp_name, WINDOWS_ASP_LOCATION_FUNCTIONAPP, storage_account)).assert_with_checks([
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', functionapp_name),
JMESPathCheck('kind', 'functionapp'),
JMESPathCheck('hostNames[0]', functionapp_name + '.azurewebsites.net')])
self.cmd('functionapp deployment slot create -g {} -n {} --slot {}'
.format(resource_group, functionapp_name, slot_name)).assert_with_checks([
JMESPathCheck('name', slot_name),
JMESPathCheck('type', 'Microsoft.Web/sites/slots')])
self.cmd('functionapp keys set -g {} -n {} -s {} --key-name {} --key-value {} --key-type {}'
.format(resource_group, functionapp_name, slot_name, key_name, key_value, key_type)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value),
JMESPathCheck('type', 'Microsoft.Web/sites/host/functionKeys')])
self.cmd('functionapp keys delete -g {} -n {} -s {} --key-name {} --key-type {}'
.format(resource_group, functionapp_name, slot_name, key_name, key_type))
self.cmd('functionapp keys list -g {} -n {} -s {}'
.format(resource_group, functionapp_name, slot_name)).assert_with_checks([
JMESPathCheck('functionKeys.{}'.format(key_name), None)])
# LiveScenarioTest due to issue https://github.com/Azure/azure-cli/issues/10705
class FunctionAppFunctionKeysTests(LiveScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_function_keys_set(self, resource_group, storage_account):
zip_file = os.path.join(TEST_DIR, 'sample_csx_function_httptrigger/sample_csx_function_httptrigger.zip')
functionapp_name = self.create_random_name('functionappkeys', 40)
plan_name = self.create_random_name(prefix='functionappkeysplan', length=40)
function_name = "HttpTrigger"
key_name = "keyname1"
key_value = "keyvalue1"
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime dotnet'.format(resource_group, functionapp_name, plan_name, storage_account))
requests.get('http://{}.scm.azurewebsites.net'.format(functionapp_name), timeout=240)
time.sleep(30)
self.cmd('functionapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, functionapp_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('complete', True)])
# ping function so you know it's ready
requests.get('http://{}.azurewebsites.net/api/{}'.format(functionapp_name, function_name), timeout=240)
time.sleep(30)
self.cmd('functionapp function keys set -g {} -n {} --function-name {} --key-name {} --key-value {}'
.format(resource_group, functionapp_name, function_name, key_name, key_value)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value)])
key_value = "keyvalue1_changed"
self.cmd('functionapp function keys set -g {} -n {} --function-name {} --key-name {} --key-value {}'
.format(resource_group, functionapp_name, function_name, key_name, key_value)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value)])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_function_keys_list(self, resource_group, storage_account):
zip_file = os.path.join(TEST_DIR, 'sample_csx_function_httptrigger/sample_csx_function_httptrigger.zip')
functionapp_name = self.create_random_name('functionappkeys', 40)
plan_name = self.create_random_name(prefix='functionappkeysplan', length=40)
function_name = "HttpTrigger"
key_name = "keyname1"
key_value = "keyvalue1"
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime dotnet'.format(resource_group, functionapp_name, plan_name, storage_account))
requests.get('http://{}.scm.azurewebsites.net'.format(functionapp_name), timeout=240)
time.sleep(30)
self.cmd('functionapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, functionapp_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('complete', True)])
# ping function so you know it's ready
requests.get('http://{}.azurewebsites.net/api/{}'.format(functionapp_name, function_name), timeout=240)
time.sleep(30)
self.cmd('functionapp function keys set -g {} -n {} --function-name {} --key-name {} --key-value {}'
.format(resource_group, functionapp_name, function_name, key_name, key_value)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value)])
self.cmd('functionapp function keys list -g {} -n {} --function-name {}'
.format(resource_group, functionapp_name, function_name)).assert_with_checks([
JMESPathCheck('{}'.format(key_name), key_value)])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_function_keys_delete(self, resource_group, storage_account):
zip_file = os.path.join(TEST_DIR, 'sample_csx_function_httptrigger/sample_csx_function_httptrigger.zip')
functionapp_name = self.create_random_name('functionappkeys', 40)
plan_name = self.create_random_name(prefix='functionappkeysplan', length=40)
function_name = "HttpTrigger"
key_name = "keyname1"
key_value = "keyvalue1"
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime dotnet'.format(resource_group, functionapp_name, plan_name, storage_account))
requests.get('http://{}.scm.azurewebsites.net'.format(functionapp_name), timeout=240)
time.sleep(30)
self.cmd('functionapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, functionapp_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('complete', True)])
# ping function so you know it's ready
requests.get('http://{}.azurewebsites.net/api/{}'.format(functionapp_name, function_name), timeout=240)
time.sleep(30)
self.cmd('functionapp function keys set -g {} -n {} --function-name {} --key-name {} --key-value {}'
.format(resource_group, functionapp_name, function_name, key_name, key_value)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value)])
self.cmd('functionapp function keys delete -g {} -n {} --function-name {} --key-name {}'
.format(resource_group, functionapp_name, function_name, key_name))
self.cmd('functionapp function keys list -g {} -n {} --function-name {}'
.format(resource_group, functionapp_name, function_name)).assert_with_checks([
JMESPathCheck('{}'.format(key_name), None)])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_function_keys_set_slot(self, resource_group, storage_account):
zip_file = os.path.join(TEST_DIR, 'sample_csx_function_httptrigger/sample_csx_function_httptrigger.zip')
functionapp_name = self.create_random_name('functionappkeys', 40)
plan_name = self.create_random_name(prefix='functionappkeysplan', length=40)
slot_name = self.create_random_name(prefix='slotname', length=24)
function_name = "HttpTrigger"
key_name = "keyname1"
key_value = "keyvalue1"
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime dotnet'.format(resource_group, functionapp_name, plan_name, storage_account))
self.cmd('functionapp deployment slot create -g {} -n {} --slot {}'
.format(resource_group, functionapp_name, slot_name)).assert_with_checks([
JMESPathCheck('name', slot_name),
JMESPathCheck('type', 'Microsoft.Web/sites/slots')])
requests.get('http://{}.scm.azurewebsites.net'.format(functionapp_name), timeout=240)
time.sleep(30)
self.cmd('functionapp deployment source config-zip -g {} -n {} -s {} --src "{}"'.format(resource_group, functionapp_name, slot_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('complete', True)])
# ping function so you know it's ready
requests.get('http://{}.azurewebsites.net/api/{}'.format(functionapp_name, function_name), timeout=240)
time.sleep(30)
self.cmd('functionapp function keys set -g {} -n {} --function-name {} -s {} --key-name {} --key-value {}'
.format(resource_group, functionapp_name, function_name, slot_name, key_name, key_value)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value)])
key_value = "keyvalue1_changed"
self.cmd('functionapp function keys set -g {} -n {} --function-name {} -s {} --key-name {} --key-value {}'
.format(resource_group, functionapp_name, function_name, slot_name, key_name, key_value)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value)])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_function_keys_list_slot(self, resource_group, storage_account):
zip_file = os.path.join(TEST_DIR, 'sample_csx_function_httptrigger/sample_csx_function_httptrigger.zip')
functionapp_name = self.create_random_name('functionappkeys', 40)
plan_name = self.create_random_name(prefix='functionappkeysplan', length=40)
slot_name = self.create_random_name(prefix='slotname', length=24)
function_name = "HttpTrigger"
key_name = "keyname1"
key_value = "keyvalue1"
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime dotnet'.format(resource_group, functionapp_name, plan_name, storage_account))
self.cmd('functionapp deployment slot create -g {} -n {} --slot {}'
.format(resource_group, functionapp_name, slot_name)).assert_with_checks([
JMESPathCheck('name', slot_name),
JMESPathCheck('type', 'Microsoft.Web/sites/slots')])
requests.get('http://{}.scm.azurewebsites.net'.format(functionapp_name), timeout=240)
time.sleep(30)
self.cmd('functionapp deployment source config-zip -g {} -n {} -s {} --src "{}"'.format(resource_group, functionapp_name, slot_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('complete', True)])
# ping function so you know it's ready
requests.get('http://{}.azurewebsites.net/api/{}'.format(functionapp_name, function_name), timeout=240)
time.sleep(30)
self.cmd('functionapp function keys set -g {} -n {} --function-name {} -s {} --key-name {} --key-value {}'
.format(resource_group, functionapp_name, function_name, slot_name, key_name, key_value)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value)])
self.cmd('functionapp function keys list -g {} -n {} --function-name {} -s {}'
.format(resource_group, functionapp_name, function_name, slot_name)).assert_with_checks([
JMESPathCheck('{}'.format(key_name), key_value)])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_function_keys_delete_slot(self, resource_group, storage_account):
zip_file = os.path.join(TEST_DIR, 'sample_csx_function_httptrigger/sample_csx_function_httptrigger.zip')
functionapp_name = self.create_random_name('functionappkeys', 40)
plan_name = self.create_random_name(prefix='functionappkeysplan', length=40)
slot_name = self.create_random_name(prefix='slotname', length=24)
function_name = "HttpTrigger"
key_name = "keyname1"
key_value = "keyvalue1"
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime dotnet'.format(resource_group, functionapp_name, plan_name, storage_account))
self.cmd('functionapp deployment slot create -g {} -n {} --slot {}'
.format(resource_group, functionapp_name, slot_name)).assert_with_checks([
JMESPathCheck('name', slot_name),
JMESPathCheck('type', 'Microsoft.Web/sites/slots')])
requests.get('http://{}.scm.azurewebsites.net'.format(functionapp_name), timeout=240)
time.sleep(30)
self.cmd('functionapp deployment source config-zip -g {} -n {} -s {} --src "{}"'.format(resource_group, functionapp_name, slot_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('complete', True)])
# ping function so you know it's ready
requests.get('http://{}.azurewebsites.net/api/{}'.format(functionapp_name, function_name), timeout=240)
time.sleep(30)
self.cmd('functionapp function keys set -g {} -n {} --function-name {} -s {} --key-name {} --key-value {}'
.format(resource_group, functionapp_name, function_name, slot_name, key_name, key_value)).assert_with_checks([
JMESPathCheck('name', key_name),
JMESPathCheck('value', key_value)])
self.cmd('functionapp function keys delete -g {} -n {} --function-name {} -s {} --key-name {}'
.format(resource_group, functionapp_name, function_name, slot_name, key_name))
self.cmd('functionapp function keys list -g {} -n {} --function-name {} -s {}'
.format(resource_group, functionapp_name, function_name, slot_name)).assert_with_checks([
JMESPathCheck('{}'.format(key_name), None)])
# LiveScenarioTest due to issue https://github.com/Azure/azure-cli/issues/10705
class FunctionAppFunctionTests(LiveScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_function_show(self, resource_group, storage_account):
zip_file = os.path.join(TEST_DIR, 'sample_csx_function_httptrigger/sample_csx_function_httptrigger.zip')
functionapp_name = self.create_random_name('functionappkeys', 40)
plan_name = self.create_random_name(prefix='functionappkeysplan', length=40)
function_name = "HttpTrigger"
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime dotnet'.format(resource_group, functionapp_name, plan_name, storage_account))
requests.get('http://{}.scm.azurewebsites.net'.format(functionapp_name), timeout=240)
time.sleep(30)
self.cmd('functionapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, functionapp_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('complete', True)])
self.cmd('functionapp function show -g {} -n {} --function-name {}'.format(resource_group, functionapp_name, function_name)).assert_with_checks([
JMESPathCheck('name', '{}/{}'.format(functionapp_name, function_name)),
JMESPathCheck('resourceGroup', resource_group),
JMESPathCheck('scriptHref', 'https://{}.azurewebsites.net/admin/vfs/site/wwwroot/{}/run.csx'.format(functionapp_name, function_name))])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_function_delete(self, resource_group, storage_account):
zip_file = os.path.join(TEST_DIR, 'sample_csx_function_httptrigger/sample_csx_function_httptrigger.zip')
functionapp_name = self.create_random_name('functionappkeys', 40)
plan_name = self.create_random_name(prefix='functionappkeysplan', length=40)
function_name = "HttpTrigger"
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime dotnet'.format(resource_group, functionapp_name, plan_name, storage_account))
requests.get('http://{}.scm.azurewebsites.net'.format(functionapp_name), timeout=240)
time.sleep(30)
self.cmd('functionapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, functionapp_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('complete', True)])
self.cmd('functionapp function delete -g {} -n {} --function-name {}'.format(resource_group, functionapp_name, function_name))
self.cmd('functionapp function show -g {} -n {} --function-name {}'.format(resource_group, functionapp_name, function_name)).assert_with_checks([
JMESPathCheck('config', {})])
class WebappAuthenticationTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_webapp_authentication', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_authentication(self, resource_group):
webapp_name = self.create_random_name('webapp-authentication-test', 40)
plan_name = self.create_random_name('webapp-authentication-plan', 40)
self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
# testing show command for newly created app and initial fields
self.cmd('webapp auth show -g {} -n {}'.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck('unauthenticatedClientAction', None),
JMESPathCheck('defaultProvider', None),
JMESPathCheck('enabled', False),
JMESPathCheck('tokenStoreEnabled', None),
JMESPathCheck('allowedExternalRedirectUrls', None),
JMESPathCheck('tokenRefreshExtensionHours', None),
JMESPathCheck('runtimeVersion', None),
JMESPathCheck('clientId', None),
JMESPathCheck('clientSecret', None),
JMESPathCheck('clientSecretCertificateThumbprint', None),
JMESPathCheck('allowedAudiences', None),
JMESPathCheck('issuer', None),
JMESPathCheck('facebookAppId', None),
JMESPathCheck('facebookAppSecret', None),
JMESPathCheck('facebookOauthScopes', None)
])
# update and verify
result = self.cmd('webapp auth update -g {} -n {} --enabled true --action LoginWithFacebook '
'--token-store false --token-refresh-extension-hours 7.2 --runtime-version 1.2.8 '
'--aad-client-id aad_client_id --aad-client-secret aad_secret --aad-client-secret-certificate-thumbprint aad_thumbprint '
'--aad-allowed-token-audiences https://audience1 --aad-token-issuer-url https://issuer_url '
'--facebook-app-id facebook_id --facebook-app-secret facebook_secret '
'--facebook-oauth-scopes public_profile email'
.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck(
'unauthenticatedClientAction', 'RedirectToLoginPage'),
JMESPathCheck('defaultProvider', 'Facebook'),
JMESPathCheck('enabled', True),
JMESPathCheck('tokenStoreEnabled', False),
JMESPathCheck('tokenRefreshExtensionHours', 7.2),
JMESPathCheck('runtimeVersion', '1.2.8'),
JMESPathCheck('clientId', 'aad_client_id'),
JMESPathCheck('clientSecret', 'aad_secret'),
JMESPathCheck('clientSecretCertificateThumbprint', 'aad_thumbprint'),
JMESPathCheck('issuer', 'https://issuer_url'),
JMESPathCheck('facebookAppId', 'facebook_id'),
JMESPathCheck('facebookAppSecret', 'facebook_secret')]).get_output_in_json()
self.assertIn('https://audience1', result['allowedAudiences'])
self.assertIn('email', result['facebookOauthScopes'])
self.assertIn('public_profile', result['facebookOauthScopes'])
class WebappUpdateTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_update(self, resource_group):
webapp_name = self.create_random_name('webapp-update-test', 40)
plan_name = self.create_random_name('webapp-update-plan', 40)
self.cmd('appservice plan create -g {} -n {} --sku S1'
.format(resource_group, plan_name))
self.cmd('webapp create -g {} -n {} --plan {}'
.format(resource_group, webapp_name, plan_name)).assert_with_checks([
JMESPathCheck('clientAffinityEnabled', True)])
# testing update command with --set
self.cmd('webapp update -g {} -n {} --client-affinity-enabled false --set tags.foo=bar'
.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck('name', webapp_name),
JMESPathCheck('tags.foo', 'bar'),
JMESPathCheck('clientAffinityEnabled', False)])
# try out on slots
self.cmd(
'webapp deployment slot create -g {} -n {} -s s1'.format(resource_group, webapp_name))
self.cmd('webapp update -g {} -n {} -s s1 --client-affinity-enabled true'.format(resource_group, webapp_name), checks=[
self.check('clientAffinityEnabled', True)
])
class WebappZipDeployScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_webapp_zipDeploy', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_deploy_zip(self, resource_group):
webapp_name = self.create_random_name('webapp-zipDeploy-test', 40)
plan_name = self.create_random_name('webapp-zipDeploy-plan', 40)
zip_file = os.path.join(TEST_DIR, 'test.zip')
self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
self.cmd('webapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, webapp_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('message', 'Created via a push deployment'),
JMESPathCheck('complete', True)
])
class WebappImplictIdentityTest(ScenarioTest):
@AllowLargeResponse(8192)
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_assign_system_identity(self, resource_group):
scope = '/subscriptions/{}/resourcegroups/{}'.format(
self.get_subscription_id(), resource_group)
role = 'Reader'
plan_name = self.create_random_name('web-msi-plan', 20)
webapp_name = self.create_random_name('web-msi', 20)
self.cmd(
'appservice plan create -g {} -n {}'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
with mock.patch('azure.cli.core.commands.arm._gen_guid', side_effect=self.create_guid):
result = self.cmd('webapp identity assign -g {} -n {} --role {} --scope {}'.format(
resource_group, webapp_name, role, scope)).get_output_in_json()
self.cmd('webapp identity show -g {} -n {}'.format(resource_group, webapp_name), checks=[
self.check('principalId', result['principalId'])
])
self.cmd('role assignment list -g {} --assignee {}'.format(resource_group, result['principalId']), checks=[
JMESPathCheck('length([])', 1),
JMESPathCheck('[0].roleDefinitionName', role)
])
self.cmd('webapp identity show -g {} -n {}'.format(resource_group,
webapp_name), checks=self.check('principalId', result['principalId']))
self.cmd(
'webapp identity remove -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp identity show -g {} -n {}'.format(resource_group,
webapp_name), checks=self.is_empty())
@AllowLargeResponse(8192)
@ResourceGroupPreparer(random_name_length=24)
def test_webapp_assign_user_identity(self, resource_group):
plan_name = self.create_random_name('web-msi-plan', 20)
webapp_name = self.create_random_name('web-msi', 20)
identity_name = self.create_random_name('id1', 8)
msi_result = self.cmd('identity create -g {} -n {}'.format(resource_group, identity_name), checks=[
self.check('name', identity_name)]).get_output_in_json()
self.cmd(
'appservice plan create -g {} -n {}'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
self.cmd('webapp identity assign -g {} -n {}'.format(resource_group, webapp_name))
result = self.cmd('webapp identity assign -g {} -n {} --identities {}'.format(
resource_group, webapp_name, msi_result['id'])).get_output_in_json()
self.cmd('webapp identity show -g {} -n {}'.format(resource_group, webapp_name), checks=[
self.check('principalId', result['principalId']),
self.check('userAssignedIdentities."{}".clientId'.format(msi_result['id']), msi_result['clientId']),
])
self.cmd('webapp identity remove -g {} -n {} --identities {}'.format(
resource_group, webapp_name, msi_result['id']))
self.cmd('webapp identity show -g {} -n {}'.format(resource_group, webapp_name), checks=[
self.check('principalId', result['principalId']),
self.check('userAssignedIdentities', None),
])
@AllowLargeResponse(8192)
@ResourceGroupPreparer(random_name_length=24)
def test_webapp_remove_identity(self, resource_group):
plan_name = self.create_random_name('web-msi-plan', 20)
webapp_name = self.create_random_name('web-msi', 20)
identity_name = self.create_random_name('id1', 8)
identity2_name = self.create_random_name('id1', 8)
msi_result = self.cmd('identity create -g {} -n {}'.format(resource_group, identity_name), checks=[
self.check('name', identity_name)]).get_output_in_json()
msi2_result = self.cmd('identity create -g {} -n {}'.format(
resource_group, identity2_name)).get_output_in_json()
self.cmd(
'appservice plan create -g {} -n {}'.format(resource_group, plan_name))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
self.cmd('webapp identity assign -g {} -n {} --identities [system] {} {}'.format(
resource_group, webapp_name, msi_result['id'], msi2_result['id']))
result = self.cmd('webapp identity remove -g {} -n {} --identities {}'.format(
resource_group, webapp_name, msi2_result['id'])).get_output_in_json()
self.cmd('webapp identity show -g {} -n {}'.format(resource_group, webapp_name), checks=[
self.check('principalId', result['principalId']),
self.check('userAssignedIdentities."{}".clientId'.format(msi_result['id']), msi_result['clientId']),
])
self.cmd('webapp identity remove -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp identity show -g {} -n {}'.format(resource_group, webapp_name), checks=[
self.check('principalId', None),
self.check('userAssignedIdentities."{}".clientId'.format(msi_result['id']), msi_result['clientId']),
])
self.cmd('webapp identity remove -g {} -n {} --identities [system] {}'.format(
resource_group, webapp_name, msi_result['id']))
self.cmd('webapp identity show -g {} -n {}'.format(
resource_group, webapp_name), checks=self.is_empty())
class WebappListLocationsFreeSKUTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_webapp_list-locations-free-sku-test')
def test_webapp_list_locations_free_sku(self, resource_group):
asp_F1 = self.cmd(
'appservice list-locations --sku F1').get_output_in_json()
result = self.cmd(
'appservice list-locations --sku Free').get_output_in_json()
self.assertEqual(asp_F1, result)
class WebappTriggeredWebJobListTest(ScenarioTest):
@record_only()
@ResourceGroupPreparer(random_name_length=24)
def test_webapp_triggeredWebjob_list(self, resource_group):
# testing this using a webjob already created
# given there is no create command inorder to re-record please create a webjob before
# recording this. Once the create command is available, please remove the "record_only" flag
resource_group_name = 'cliTestApp'
webapp_name = 'cliTestApp'
webjob_name = 'test-triggered'
# list test
self.cmd('webapp webjob triggered list -g {} -n {}'
.format(resource_group_name, webapp_name)).assert_with_checks([
JMESPathCheck('length(@)', 1),
JMESPathCheck(
'[0].name', '{}/{}'.format(webapp_name, webjob_name)),
JMESPathCheck('[0].type', 'Microsoft.Web/sites/triggeredwebjobs')])
class WebappContinuousWebJobE2ETest(ScenarioTest):
@ResourceGroupPreparer(random_name_length=24)
@record_only()
def test_webapp_continuousWebjob_e2e(self, resource_group):
# testing this using a webjob already created
# given there is no create command inorder to re-record please create a webjob before
# recording this. Once the create command is available, please remove the "record_only" flag
resource_group_name = 'cliTestApp'
webapp_name = 'cliTestApp'
webjob_name = 'test-continuous'
# list test
self.cmd('webapp webjob continuous list -g {} -n {}'
.format(resource_group_name, webapp_name)).assert_with_checks([
JMESPathCheck('length(@)', 1),
JMESPathCheck(
'[0].name', '{}/{}'.format(webapp_name, webjob_name)),
JMESPathCheck('[0].type', 'Microsoft.Web/sites/continuouswebjobs')])
# start
self.cmd('webapp webjob continuous start -g {} -n {} -w {}'
.format(resource_group_name, webapp_name, webjob_name)).assert_with_checks([
JMESPathCheck('status', 'Running')])
# stop
self.cmd('webapp webjob continuous stop -g {} -n {} -w {}'
.format(resource_group_name, webapp_name, webjob_name)).assert_with_checks([
JMESPathCheck('status', 'Disabling')])
class WebappWindowsContainerBasicE2ETest(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='webapp_hyperv_e2e', location='westus2')
def test_webapp_hyperv_e2e(self, resource_group):
webapp_name = self.create_random_name(
prefix='webapp-hyperv-e2e', length=24)
plan = self.create_random_name(prefix='webapp-hyperv-plan', length=24)
self.cmd('appservice plan create -g {} -n {} --hyper-v --sku P1V3'.format(resource_group, plan))
self.cmd('appservice plan list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', plan),
JMESPathCheck('[0].sku.tier', 'PremiumV3'),
JMESPathCheck('[0].sku.name', 'P1v3')
])
self.cmd('appservice plan list -g {}'.format(resource_group), checks=[
JMESPathCheck("length([?name=='{}' && resourceGroup=='{}'])".format(
plan, resource_group), 1)
])
self.cmd('appservice plan show -g {} -n {}'.format(resource_group, plan), checks=[
JMESPathCheck('name', plan)
])
self.cmd('webapp create -g {} -n {} --plan {} --deployment-container-image-name "DOCKER|microsoft/iis:nanoserver-sac2016"'.format(resource_group, webapp_name, plan), checks=[
JMESPathCheck('state', 'Running'),
JMESPathCheck('name', webapp_name),
JMESPathCheck('hostNames[0]', webapp_name + '.azurewebsites.net')
])
self.cmd('webapp list -g {}'.format(resource_group), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', webapp_name),
JMESPathCheck('[0].hostNames[0]', webapp_name +
'.azurewebsites.net')
])
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('windowsFxVersion',
"DOCKER|microsoft/iis:nanoserver-sac2016"),
JMESPathCheck('linuxFxVersion', "")
])
self.cmd('webapp config set -g {} -n {} --windows-fx-version "DOCKER|microsoft/iis"'.format(
resource_group, webapp_name))
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('windowsFxVersion', "DOCKER|microsoft/iis"),
JMESPathCheck('linuxFxVersion', "")
])
# Always on is not supported on all SKUs this is to test that we don't fail create trying to enable AlwaysOn
@ResourceGroupPreparer(name_prefix='cli_test_webapp_alwaysOn', location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_create_noAlwaysOn(self, resource_group):
webapp_name = self.create_random_name('webapp-create-alwaysOn-e2e', 44)
plan = self.create_random_name('plan-create-alwaysOn-e2e', 44)
self.cmd(
'appservice plan create -g {} -n {} --sku SHARED'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
# verify alwaysOn
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck('alwaysOn', False)])
@ResourceGroupPreparer(name_prefix='cli_test_webapp_linux_free', location=LINUX_ASP_LOCATION_WEBAPP)
def test_webapp_create_linux_free(self, resource_group):
webapp_name = self.create_random_name('webapp-linux-free', 24)
plan = self.create_random_name('plan-linux-free', 24)
self.cmd('appservice plan create -g {} -n {} --sku F1 --is-linux'.format(resource_group, plan), checks=[
# this weird field means it is a linux
JMESPathCheck('reserved', True),
JMESPathCheck('sku.name', 'F1')])
self.cmd('webapp create -g {} -n {} --plan {} -u {} -r "node|10.14"'.format(resource_group, webapp_name, plan,
TEST_REPO_URL))
# verify alwaysOn
self.cmd('webapp config show -g {} -n {}'.format(resource_group, webapp_name)).assert_with_checks([
JMESPathCheck('alwaysOn', False)])
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='rg', random_name_length=6)
def test_webapp_create_with_msi(self, resource_group):
scope = '/subscriptions/{}/resourcegroups/{}'.format(
self.get_subscription_id(), resource_group)
role = 'Reader'
webapp_name = self.create_random_name('webapp-with-msi', 26)
plan = self.create_random_name('plan-create-with-msi', 26)
identity_name = self.create_random_name('app-create', 16)
msi_result = self.cmd('identity create -g {} -n {}'.format(
resource_group, identity_name)).get_output_in_json()
self.cmd('appservice plan create -g {} -n {}'.format(resource_group, plan))
with mock.patch('azure.cli.core.commands.arm._gen_guid', side_effect=self.create_guid):
result = self.cmd('webapp create -g {} -n {} --plan {} --assign-identity [system] {} --role {} --scope {}'.format(
resource_group, webapp_name, plan, msi_result['id'], role, scope)).get_output_in_json()
self.cmd('webapp identity show -g {} -n {}'.format(resource_group, webapp_name), checks=[
self.check('principalId', result['identity']['principalId']),
self.check('userAssignedIdentities."{}".clientId'.format(msi_result['id']), msi_result['clientId']),
])
self.cmd('role assignment list -g {} --assignee {}'.format(resource_group, result['identity']['principalId']), checks=[
self.check('length([])', 1),
self.check('[0].roleDefinitionName', role)
])
class WebappNetworkConnectionTests(ScenarioTest):
@AllowLargeResponse()
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_hybridconnectionE2E(self, resource_group):
webapp_name = self.create_random_name('hcwebapp', 24)
plan = self.create_random_name('hcplan', 24)
namespace_name = self.create_random_name('hcnamespace', 24)
hyco_name = self.create_random_name('hcname', 24)
um = "[{{\\\"key\\\":\\\"endpoint\\\",\\\"value\\\":\\\"vmsq1:80\\\"}}]"
self.cmd(
'appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd(
'relay namespace create -g {} --name {}'.format(resource_group, namespace_name))
self.cmd('relay hyco create -g {} --namespace-name {} --name {} --user-metadata {}'.format(
resource_group, namespace_name, hyco_name, um))
self.cmd('webapp hybrid-connection add -g {} -n {} --namespace {} --hybrid-connection {}'.format(
resource_group, webapp_name, namespace_name, hyco_name))
self.cmd('webapp hybrid-connection list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', hyco_name)
])
self.cmd('webapp hybrid-connection remove -g {} -n {} --namespace {} --hybrid-connection {}'.format(
resource_group, webapp_name, namespace_name, hyco_name))
self.cmd('webapp hybrid-connection list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
@AllowLargeResponse()
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_vnetE2E(self, resource_group):
webapp_name = self.create_random_name('swiftwebapp', 24)
plan = self.create_random_name('swiftplan', 24)
subnet_name = self.create_random_name('swiftsubnet', 24)
vnet_name = self.create_random_name('swiftname', 24)
self.cmd('network vnet create -g {} -n {} --address-prefix 10.0.0.0/16 --subnet-name {} --subnet-prefix 10.0.0.0/24'.format(
resource_group, vnet_name, subnet_name))
self.cmd(
'appservice plan create -g {} -n {} --sku P1V2'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd('webapp vnet-integration add -g {} -n {} --vnet {} --subnet {}'.format(
resource_group, webapp_name, vnet_name, subnet_name))
self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', subnet_name)
])
self.cmd(
'webapp vnet-integration remove -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
@AllowLargeResponse()
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_vnetDelegation(self, resource_group):
webapp_name = self.create_random_name('swiftwebapp', 24)
plan = self.create_random_name('swiftplan', 24)
subnet_name = self.create_random_name('swiftsubnet', 24)
vnet_name = self.create_random_name('swiftname', 24)
self.cmd('network vnet create -g {} -n {} --address-prefix 10.0.0.0/16 --subnet-name {} --subnet-prefix 10.0.0.0/24'.format(
resource_group, vnet_name, subnet_name))
self.cmd('network vnet subnet update -g {} --vnet {} --name {} --delegations Microsoft.Web/serverfarms --service-endpoints Microsoft.Storage'.format(
resource_group, vnet_name, subnet_name))
self.cmd(
'appservice plan create -g {} -n {} --sku P1V2'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
self.cmd('webapp vnet-integration add -g {} -n {} --vnet {} --subnet {}'.format(
resource_group, webapp_name, vnet_name, subnet_name))
self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', subnet_name)
])
self.cmd(' network vnet subnet show -g {} -n {} --vnet-name {}'.format(resource_group, subnet_name, vnet_name), checks=[
JMESPathCheck('serviceEndpoints[0].service', "Microsoft.Storage")
])
self.cmd(
'webapp vnet-integration remove -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
@AllowLargeResponse()
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_vnetSameName(self, resource_group):
resource_group_2 = self.create_random_name('swiftwebapp', 24)
webapp_name = self.create_random_name('swiftwebapp', 24)
plan = self.create_random_name('swiftplan', 24)
subnet_name = self.create_random_name('swiftsubnet', 24)
subnet_name_2 = self.create_random_name('swiftsubnet', 24)
vnet_name = self.create_random_name('swiftname', 24)
self.cmd('network vnet create -g {} -n {} --address-prefix 10.0.0.0/16 --subnet-name {} --subnet-prefix 10.0.0.0/24'.format(
resource_group, vnet_name, subnet_name))
self.cmd('group create -n {} -l {}'.format(resource_group_2, WINDOWS_ASP_LOCATION_WEBAPP))
vnet = self.cmd('network vnet create -g {} -n {} --address-prefix 10.0.0.0/16 --subnet-name {} --subnet-prefix 10.0.0.0/24'.format(
resource_group_2, vnet_name, subnet_name_2)).get_output_in_json()
self.cmd(
'appservice plan create -g {} -n {} --sku P1V2'.format(resource_group, plan))
self.cmd(
'webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan))
# Add vnet integration where theres two vnets of the same name. Chosen vnet should default to the one in the same RG
self.cmd('webapp vnet-integration add -g {} -n {} --vnet {} --subnet {}'.format(
resource_group, webapp_name, vnet_name, subnet_name))
self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', subnet_name)
])
self.cmd(
'webapp vnet-integration remove -g {} -n {}'.format(resource_group, webapp_name))
self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
# Add vnet integration using vnet resource ID
self.cmd('webapp vnet-integration add -g {} -n {} --vnet {} --subnet {}'.format(
resource_group, webapp_name, vnet['newVNet']['id'], subnet_name_2))
self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].name', subnet_name_2)
])
# self.cmd(
# 'webapp vnet-integration remove -g {} -n {}'.format(resource_group, webapp_name))
# self.cmd('webapp vnet-integration list -g {} -n {}'.format(resource_group, webapp_name), checks=[
# JMESPathCheck('length(@)', 0)
# ])
# LiveScenarioTest due to issue https://github.com/Azure/azure-cli/issues/10705
class FunctionappDeploymentLogsScenarioTest(LiveScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_show_deployment_logs(self, resource_group, storage_account):
functionapp_name = self.create_random_name(prefix='show-deployment-functionapp', length=40)
plan_name = self.create_random_name(prefix='show-deployment-functionapp', length=40)
zip_file = os.path.join(TEST_DIR, 'sample_dotnet_function/sample_dotnet_function.zip')
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime dotnet'.format(resource_group, functionapp_name, plan_name, storage_account))
self.cmd('functionapp log deployment show -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
requests.get('http://{}.scm.azurewebsites.net'.format(functionapp_name), timeout=240)
time.sleep(30)
deployment_1 = self.cmd('functionapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, functionapp_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('complete', True)
]).get_output_in_json()
self.cmd('functionapp log deployment show -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('length(@) > `0`', True)
])
self.cmd('functionapp log deployment show -g {} -n {} --deployment-id={}'.format(resource_group, functionapp_name, deployment_1['id']), checks=[
JMESPathCheck('length(@) > `0`', True)
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_list_deployment_logs(self, resource_group, storage_account):
functionapp_name = self.create_random_name(prefix='show-deployment-funcapp', length=40)
plan_name = self.create_random_name(prefix='show-deployment-funcapp', length=40)
zip_file = os.path.join(TEST_DIR, 'sample_dotnet_function/sample_dotnet_function.zip')
self.cmd('functionapp plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('functionapp create -g {} -n {} --plan {} -s {} --runtime dotnet'.format(resource_group, functionapp_name, plan_name, storage_account))
self.cmd('functionapp log deployment list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
requests.get('http://{}.scm.azurewebsites.net'.format(functionapp_name), timeout=240)
time.sleep(30)
deployment_1 = self.cmd('functionapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, functionapp_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('complete', True)
]).get_output_in_json()
self.cmd('functionapp log deployment list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].id', deployment_1['id']),
])
requests.get('http://{}.scm.azurewebsites.net'.format(functionapp_name), timeout=240)
time.sleep(30)
self.cmd('functionapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, functionapp_name, zip_file)).assert_with_checks([
JMESPathCheck('status', 4),
JMESPathCheck('deployer', 'ZipDeploy'),
JMESPathCheck('complete', True)
]).get_output_in_json()
self.cmd('functionapp log deployment list -g {} -n {}'.format(resource_group, functionapp_name), checks=[
JMESPathCheck('length(@)', 2)
])
class WebappDeploymentLogsScenarioTest(ScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_show_deployment_logs(self, resource_group):
webapp_name = self.create_random_name('show-deployment-webapp', 40)
plan_name = self.create_random_name('show-deployment-plan', 40)
zip_file = os.path.join(TEST_DIR, 'test.zip')
self.cmd('appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
self.cmd('webapp log deployment show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
deployment_1 = self.cmd('webapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, webapp_name, zip_file)).get_output_in_json()
self.cmd('webapp log deployment show -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@) > `0`', True),
])
self.cmd('webapp log deployment show -g {} -n {} --deployment-id={}'.format(resource_group, webapp_name, deployment_1['id']), checks=[
JMESPathCheck('length(@) > `0`', True),
])
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_list_deployment_logs(self, resource_group):
webapp_name = self.create_random_name('list-deployment-webapp', 40)
plan_name = self.create_random_name('list-deployment-plan', 40)
zip_file = os.path.join(TEST_DIR, 'test.zip')
self.cmd('appservice plan create -g {} -n {} --sku S1'.format(resource_group, plan_name))
self.cmd('webapp create -g {} -n {} --plan {}'.format(resource_group, webapp_name, plan_name))
self.cmd('webapp log deployment list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 0)
])
deployment_1 = self.cmd('webapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, webapp_name, zip_file)).get_output_in_json()
self.cmd('webapp log deployment list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 1),
JMESPathCheck('[0].id', deployment_1['id']),
])
self.cmd('webapp deployment source config-zip -g {} -n {} --src "{}"'.format(resource_group, webapp_name, zip_file)).get_output_in_json()
self.cmd('webapp log deployment list -g {} -n {}'.format(resource_group, webapp_name), checks=[
JMESPathCheck('length(@)', 2)
])
class WebappLocalContextScenarioTest(LocalContextScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_WEBAPP)
def test_webapp_local_context(self, resource_group):
from knack.util import CLIError
self.kwargs.update({
'plan_name': self.create_random_name(prefix='webapp-plan-', length=24),
'webapp_name': self.create_random_name(prefix='webapp-', length=24)
})
self.cmd('appservice plan create -g {rg} -n {plan_name}')
self.cmd('appservice plan show')
with self.assertRaises(CLIError):
self.cmd('appservice plan delete')
self.cmd('webapp create -n {webapp_name}')
self.cmd('webapp show')
with self.assertRaises(CLIError):
self.cmd('webapp delete')
self.cmd('webapp delete -n {webapp_name}')
self.cmd('appservice plan delete -n {plan_name} -y')
class FunctionappLocalContextScenarioTest(LocalContextScenarioTest):
@ResourceGroupPreparer(location=WINDOWS_ASP_LOCATION_FUNCTIONAPP)
@StorageAccountPreparer()
def test_functionapp_local_context(self, resource_group, storage_account):
from knack.util import CLIError
self.kwargs.update({
'plan_name': self.create_random_name(prefix='functionapp-plan-', length=24),
'functionapp_name': self.create_random_name(prefix='functionapp-', length=24),
'storage_account': storage_account
})
self.cmd('functionapp plan create -g {rg} -n {plan_name} --sku B2')
self.cmd('functionapp plan show')
with self.assertRaises(CLIError):
self.cmd('functionapp plan delete')
self.cmd('functionapp create -n {functionapp_name} --storage-account {storage_account}')
self.cmd('functionapp show')
with self.assertRaises(CLIError):
self.cmd('functionapp delete')
self.cmd('functionapp delete -n {functionapp_name}')
self.cmd('functionapp plan delete -n {plan_name} -y')
if __name__ == '__main__':
unittest.main()
| 57.639553
| 239
| 0.628223
|
02fc56f292604959ff994c67f2d8d154db9bc21c
| 1,217
|
py
|
Python
|
flask_blog_api/public/forms.py
|
dannydabbles/flask_blog_api
|
a2eda67d8ea1b35a3a1263ec288565b6254af34c
|
[
"MIT"
] | 1
|
2020-03-04T16:02:47.000Z
|
2020-03-04T16:02:47.000Z
|
flask_blog_api/public/forms.py
|
dannydabbles/flask_blog_api
|
a2eda67d8ea1b35a3a1263ec288565b6254af34c
|
[
"MIT"
] | null | null | null |
flask_blog_api/public/forms.py
|
dannydabbles/flask_blog_api
|
a2eda67d8ea1b35a3a1263ec288565b6254af34c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Public forms."""
from flask_wtf import FlaskForm
from wtforms import PasswordField, StringField
from wtforms.validators import DataRequired
from flask_blog_api.user.models import User
class LoginForm(FlaskForm):
"""Login form."""
username = StringField("Username", validators=[DataRequired()])
password = PasswordField("Password", validators=[DataRequired()])
def __init__(self, *args, **kwargs):
"""Create instance."""
super(LoginForm, self).__init__(*args, **kwargs)
self.user = None
def validate(self):
"""Validate the form."""
initial_validation = super(LoginForm, self).validate()
if not initial_validation:
return False
self.user = User.query.filter_by(username=self.username.data).first()
if not self.user:
self.username.errors.append("Unknown username")
return False
if not self.user.check_password(self.password.data):
self.password.errors.append("Invalid password")
return False
if not self.user.active:
self.username.errors.append("User not activated")
return False
return True
| 30.425
| 77
| 0.645029
|
c17e589c7bbba1061575d4168defde78e2c7a042
| 7,077
|
py
|
Python
|
Question_model/answers/vgg19_chainer.py
|
KuKuXia/DeepLearningMugenKnock
|
979cf05e65e352da36453337380a418a2a2fdccb
|
[
"MIT"
] | null | null | null |
Question_model/answers/vgg19_chainer.py
|
KuKuXia/DeepLearningMugenKnock
|
979cf05e65e352da36453337380a418a2a2fdccb
|
[
"MIT"
] | null | null | null |
Question_model/answers/vgg19_chainer.py
|
KuKuXia/DeepLearningMugenKnock
|
979cf05e65e352da36453337380a418a2a2fdccb
|
[
"MIT"
] | null | null | null |
import chainer
import chainer.links as L
import chainer.functions as F
import argparse
import cv2
import numpy as np
from glob import glob
num_classes = 2
img_height, img_width = 224, 224
GPU = -1
class Mynet(chainer.Chain):
def __init__(self, train=True):
self.train = train
super(Mynet, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(None, 64, ksize=3, pad=1, stride=1, nobias=False)
self.conv1_2 = L.Convolution2D(None, 64, ksize=3, pad=1, stride=1, nobias=False)
self.conv2_1 = L.Convolution2D(None, 128, ksize=3, pad=1, stride=1, nobias=False)
self.conv2_2 = L.Convolution2D(None, 128, ksize=3, pad=1, stride=1, nobias=False)
self.conv3_1 = L.Convolution2D(None, 256, ksize=3, pad=1, stride=1, nobias=False)
self.conv3_2 = L.Convolution2D(None, 256, ksize=3, pad=1, stride=1, nobias=False)
self.conv3_3 = L.Convolution2D(None, 256, ksize=3, pad=1, stride=1, nobias=False)
self.conv3_4 = L.Convolution2D(None, 256, ksize=3, pad=1, stride=1, nobias=False)
self.conv4_1 = L.Convolution2D(None, 512, ksize=3, pad=1, stride=1, nobias=False)
self.conv4_2 = L.Convolution2D(None, 512, ksize=3, pad=1, stride=1, nobias=False)
self.conv4_3 = L.Convolution2D(None, 512, ksize=3, pad=1, stride=1, nobias=False)
self.conv4_4 = L.Convolution2D(None, 512, ksize=3, pad=1, stride=1, nobias=False)
self.conv5_1 = L.Convolution2D(None, 512, ksize=3, pad=1, stride=1, nobias=False)
self.conv5_2 = L.Convolution2D(None, 512, ksize=3, pad=1, stride=1, nobias=False)
self.conv5_3 = L.Convolution2D(None, 512, ksize=3, pad=1, stride=1, nobias=False)
self.conv5_4 = L.Convolution2D(None, 512, ksize=3, pad=1, stride=1, nobias=False)
self.fc1 = L.Linear(None, 4096, nobias=False)
self.fc2 = L.Linear(None, 4096, nobias=False)
self.fc_out = L.Linear(None, num_classes, nobias=False)
def __call__(self, x):
x = F.relu(self.conv1_1(x))
x = F.relu(self.conv1_2(x))
x = F.max_pooling_2d(x, ksize=2, stride=2)
x = F.relu(self.conv2_1(x))
x = F.relu(self.conv2_2(x))
x = F.max_pooling_2d(x, ksize=2, stride=2)
x = F.relu(self.conv3_1(x))
x = F.relu(self.conv3_2(x))
x = F.relu(self.conv3_3(x))
x = F.relu(self.conv3_4(x))
x = F.max_pooling_2d(x, ksize=2, stride=2)
x = F.relu(self.conv4_1(x))
x = F.relu(self.conv4_2(x))
x = F.relu(self.conv4_3(x))
x = F.relu(self.conv4_4(x))
x = F.max_pooling_2d(x, ksize=2, stride=2)
x = F.relu(self.conv5_1(x))
x = F.relu(self.conv5_2(x))
x = F.relu(self.conv5_3(x))
x = F.relu(self.conv5_4(x))
x = F.max_pooling_2d(x, ksize=2, stride=2)
x = F.relu(self.fc1(x))
x = F.dropout(x)
x = F.relu(self.fc2(x))
x = F.dropout(x)
x = self.fc_out(x)
return x
CLS = ['akahara', 'madara']
# get train data
def data_load(path, hf=False, vf=False):
xs = []
ts = []
paths = []
for dir_path in glob(path + '/*'):
for path in glob(dir_path + '/*'):
x = cv2.imread(path)
x = cv2.resize(x, (img_width, img_height)).astype(np.float32)
x /= 255.
xs.append(x)
for i, cls in enumerate(CLS):
if cls in path:
t = i
ts.append(t)
paths.append(path)
if hf:
xs.append(x[:, ::-1])
ts.append(t)
paths.append(path)
if vf:
xs.append(x[::-1])
ts.append(t)
paths.append(path)
if hf and vf:
xs.append(x[::-1, ::-1])
ts.append(t)
paths.append(path)
xs = np.array(xs, dtype=np.float32)
ts = np.array(ts, dtype=np.int)
xs = xs.transpose(0,3,1,2)
return xs, ts, paths
# train
def train():
# model
model = Mynet(train=True)
if GPU >= 0:
chainer.cuda.get_device(GPU).use()
model.to_gpu()
opt = chainer.optimizers.MomentumSGD(0.01, momentum=0.9)
opt.setup(model)
opt.add_hook(chainer.optimizer.WeightDecay(0.0005))
xs, ts, _ = data_load('../Dataset/train/images/', hf=True, vf=True)
# training
mb = 8
mbi = 0
train_ind = np.arange(len(xs))
np.random.seed(0)
np.random.shuffle(train_ind)
for i in range(500):
if mbi + mb > len(xs):
mb_ind = train_ind[mbi:]
np.random.shuffle(train_ind)
mb_ind = np.hstack((mb_ind, train_ind[:(mb-(len(xs)-mbi))]))
mbi = mb - (len(xs) - mbi)
else:
mb_ind = train_ind[mbi: mbi+mb]
mbi += mb
x = xs[mb_ind]
t = ts[mb_ind]
if GPU >= 0:
x = chainer.cuda.to_gpu(x)
t = chainer.cuda.to_gpu(t)
#else:
# x = chainer.Variable(x)
# t = chainer.Variable(t)
y = model(x)
loss = F.softmax_cross_entropy(y, t)
accu = F.accuracy(y, t)
model.cleargrads()
loss.backward()
opt.update()
loss = loss.data
accu = accu.data
if GPU >= 0:
loss = chainer.cuda.to_cpu(loss)
accu = chainer.cuda.to_cpu(accu)
print("iter >>", i+1, ',loss >>', loss.item(), ',accuracy >>', accu)
chainer.serializers.save_npz('cnn.npz', model)
# test
def test():
model = Mynet(train=False)
if GPU >= 0:
chainer.cuda.get_device_from_id(cf.GPU).use()
model.to_gpu()
## Load pretrained parameters
chainer.serializers.load_npz('cnn.npz', model)
xs, ts, paths = data_load('../Dataset/test/images/')
for i in range(len(paths)):
x = xs[i]
t = ts[i]
path = paths[i]
x = np.expand_dims(x, axis=0)
if GPU >= 0:
x = chainer.cuda.to_gpu(x)
pred = model(x).data
pred = F.softmax(pred)
if GPU >= 0:
pred = chainer.cuda.to_cpu(pred)
pred = pred[0].data
print("in {}, predicted probabilities >> {}".format(path, pred))
def arg_parse():
parser = argparse.ArgumentParser(description='CNN implemented with Keras')
parser.add_argument('--train', dest='train', action='store_true')
parser.add_argument('--test', dest='test', action='store_true')
args = parser.parse_args()
return args
# main
if __name__ == '__main__':
args = arg_parse()
if args.train:
train()
if args.test:
test()
if not (args.train or args.test):
print("please select train or test flag")
print("train: python main.py --train")
print("test: python main.py --test")
print("both: python main.py --train --test")
| 30.769565
| 93
| 0.544157
|
de950c9ef18c999d8c40fac9c83b8e707cbae9dc
| 668
|
py
|
Python
|
trees/symmetric_tree/recursion.py
|
sweeneyngo/algo
|
9c63cef40b97d51e018fbbf537c8f59b811b2d19
|
[
"MIT"
] | null | null | null |
trees/symmetric_tree/recursion.py
|
sweeneyngo/algo
|
9c63cef40b97d51e018fbbf537c8f59b811b2d19
|
[
"MIT"
] | null | null | null |
trees/symmetric_tree/recursion.py
|
sweeneyngo/algo
|
9c63cef40b97d51e018fbbf537c8f59b811b2d19
|
[
"MIT"
] | null | null | null |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
return self.checkMirror(root, root)
def checkMirror(self, root1: TreeNode, root2: TreeNode):
if not root1 and not root2:
return True
if not root1 or not root2:
return False
return (
(root1.val == root2.val)
and self.checkMirror(root1.left, root2.right)
and self.checkMirror(root1.right, root2.left)
)
| 27.833333
| 60
| 0.585329
|
b5bad4b74e54e4fbe1dee121b6aff88cb68e4039
| 2,738
|
py
|
Python
|
tests/api_resources/abstract/test_api_resource.py
|
timvisher/stripe-python
|
ae953fd0aa531f5b500e5e86eee5859df95a255d
|
[
"MIT"
] | null | null | null |
tests/api_resources/abstract/test_api_resource.py
|
timvisher/stripe-python
|
ae953fd0aa531f5b500e5e86eee5859df95a255d
|
[
"MIT"
] | null | null | null |
tests/api_resources/abstract/test_api_resource.py
|
timvisher/stripe-python
|
ae953fd0aa531f5b500e5e86eee5859df95a255d
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import pytest
import stripe
class TestAPIResource(object):
class MyResource(stripe.api_resources.abstract.APIResource):
OBJECT_NAME = 'myresource'
def test_retrieve_and_refresh(self, request_mock):
url = '/v1/myresources/foo%2A'
request_mock.stub_request(
'get',
url,
{
'id': 'foo2',
'bobble': 'scrobble',
},
rheaders={'request-id': 'req_id'}
)
res = self.MyResource.retrieve('foo*', myparam=5)
request_mock.assert_requested(
'get',
url,
{
'myparam': 5,
},
None
)
assert res.bobble == 'scrobble'
assert res.id == 'foo2'
assert res.api_key == 'sk_test_123'
assert res.last_response is not None
assert res.last_response.request_id == 'req_id'
url = '/v1/myresources/foo2'
request_mock.stub_request(
'get',
url,
{
'frobble': 5,
}
)
res = res.refresh()
request_mock.assert_requested(
'get',
url,
{
'myparam': 5,
},
None
)
assert res.frobble == 5
with pytest.raises(KeyError):
res['bobble']
def test_convert_to_stripe_object(self):
sample = {
'foo': 'bar',
'adict': {
'object': 'charge',
'id': 42,
'amount': 7,
},
'alist': [
{
'object': 'customer',
'name': 'chilango'
}
]
}
converted = stripe.util.convert_to_stripe_object(
sample, 'akey', None, None)
# Types
assert isinstance(converted, stripe.stripe_object.StripeObject)
assert isinstance(converted.adict, stripe.Charge)
assert len(converted.alist) == 1
assert isinstance(converted.alist[0], stripe.Customer)
# Values
assert converted.foo == 'bar'
assert converted.adict.id == 42
assert converted.alist[0].name == 'chilango'
# Stripping
# TODO: We should probably be stripping out this property
# self.assertRaises(AttributeError, getattr, converted.adict, 'object')
def test_raise_on_incorrect_id_type(self):
for obj in [None, 1, 3.14, dict(), list(), set(), tuple(), object()]:
with pytest.raises(stripe.error.InvalidRequestError):
self.MyResource.retrieve(obj)
| 26.843137
| 79
| 0.505844
|
37476b032f552d5fd4f43e57d4f5ab303374c7c4
| 2,180
|
py
|
Python
|
je_auto_control/windows/record/win32_record.py
|
JE-Chen/Python_JEAutoControl
|
477bf9612e28e9ab6d0a8e269db2f699e50a3744
|
[
"MIT"
] | 9
|
2020-10-12T06:33:36.000Z
|
2021-09-13T07:07:36.000Z
|
je_auto_control/windows/record/win32_record.py
|
JE-Chen/Python_JEAutoControl
|
477bf9612e28e9ab6d0a8e269db2f699e50a3744
|
[
"MIT"
] | 2
|
2021-11-19T13:45:37.000Z
|
2021-12-03T12:25:28.000Z
|
je_auto_control/windows/record/win32_record.py
|
JE-Chen/Python_JEAutoControl
|
477bf9612e28e9ab6d0a8e269db2f699e50a3744
|
[
"MIT"
] | null | null | null |
import sys
from je_auto_control.utils.exception.exception_tag import windows_import_error
from je_auto_control.utils.exception.exceptions import AutoControlException
if sys.platform not in ["win32", "cygwin", "msys"]:
raise AutoControlException(windows_import_error)
from je_auto_control.windows.listener.win32_keyboard_listener import Win32KeyboardListener
from je_auto_control.windows.listener.win32_mouse_listener import Win32MouseListener
from queue import Queue
class Win32Recorder(object):
def __init__(self):
self.mouse_record_listener = None
self.keyboard_record_listener = None
self.record_queue = None
self.result_queue = None
def record(self):
self.mouse_record_listener = Win32MouseListener()
self.keyboard_record_listener = Win32KeyboardListener()
self.record_queue = Queue()
self.mouse_record_listener.record(self.record_queue)
self.keyboard_record_listener.record(self.record_queue)
def stop_record(self):
self.result_queue = self.mouse_record_listener.stop_record()
self.result_queue = self.keyboard_record_listener.stop_record()
self.record_queue = None
return self.result_queue
def record_mouse(self):
self.mouse_record_listener = Win32MouseListener()
self.record_queue = Queue()
self.mouse_record_listener.record(self.record_queue)
def stop_record_mouse(self):
self.result_queue = self.mouse_record_listener.stop_record()
self.record_queue = None
return self.result_queue
def record_keyboard(self):
self.keyboard_record_listener = Win32KeyboardListener()
self.record_queue = Queue()
self.keyboard_record_listener.record(record_queue)
def stop_record_keyboard(self):
self.result_queue = self.keyboard_record_listener.stop_record()
self.record_queue = None
return self.result_queue
win32_recorder = Win32Recorder()
if __name__ == "__main__":
win32_recorder = Win32Recorder()
win32_recorder.record()
from time import sleep
sleep(10)
for i in win32_recorder.stop_record().queue:
print(i)
| 32.537313
| 90
| 0.738532
|
d6474dc035ca8af63f2e1b7bbbbaae9a48656db7
| 99,712
|
py
|
Python
|
Dive-into-DL-paddlepaddle/docs/d2l/paddle.py
|
skywalk163/awesome-DeepLearning
|
277d796fc5e7d9b31431160c5652a8319885f908
|
[
"Apache-2.0"
] | 1
|
2022-03-14T09:03:15.000Z
|
2022-03-14T09:03:15.000Z
|
Dive-into-DL-paddlepaddle/docs/d2l/paddle.py
|
jameszhang-236/awesome-DeepLearning
|
422128eab5cff9c41b618280cbc1c0e064f45874
|
[
"Apache-2.0"
] | null | null | null |
Dive-into-DL-paddlepaddle/docs/d2l/paddle.py
|
jameszhang-236/awesome-DeepLearning
|
422128eab5cff9c41b618280cbc1c0e064f45874
|
[
"Apache-2.0"
] | null | null | null |
################# WARNING ################
# The below part is generated automatically through:
# d2lbook build lib
# Don't edit it directly
import collections
import hashlib
import math
import os
import random
import re
import shutil
import sys
import tarfile
import time
import zipfile
from collections import defaultdict
import pandas as pd
import requests
from IPython import display
from matplotlib import pyplot as plt
d2l = sys.modules[__name__]
import numpy as np
import paddle
from PIL import Image
from paddle import nn
from paddle.nn import functional as F
from paddle.vision import transforms, image_load
from paddle.io import Dataset, DataLoader
"""2.4"""
def use_svg_display():
"""ไฝฟ็จsvgๆ ผๅผๅจJupyterไธญๆพ็คบ็ปๅพ
Defined in :numref:`sec_calculus`"""
display.set_matplotlib_formats('svg')
def set_figsize(figsize=(3.5, 2.5)):
"""่ฎพ็ฝฎmatplotlib็ๅพ่กจๅคงๅฐ
Defined in :numref:`sec_calculus`"""
use_svg_display()
d2l.plt.rcParams['figure.figsize'] = figsize
def set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend):
"""่ฎพ็ฝฎmatplotlib็่ฝด
Defined in :numref:`sec_calculus`"""
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
axes.set_xscale(xscale)
axes.set_yscale(yscale)
axes.set_xlim(xlim)
axes.set_ylim(ylim)
if legend:
axes.legend(legend)
axes.grid()
def plot(X, Y=None, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), figsize=(3.5, 2.5), axes=None):
"""็ปๅถๆฐๆฎ็น
Defined in :numref:`sec_calculus`"""
if legend is None:
legend = []
set_figsize(figsize)
axes = axes if axes else d2l.plt.gca()
# ๅฆๆXๆไธไธช่ฝด๏ผ่พๅบTrue
def has_one_axis(X):
return (hasattr(X, "ndim") and X.ndim == 1 or isinstance(X, list)
and not hasattr(X[0], "__len__"))
if has_one_axis(X):
X = [X]
if Y is None:
X, Y = [[]] * len(X), X
elif has_one_axis(Y):
Y = [Y]
if len(X) != len(Y):
X = X * len(Y)
axes.cla()
for x, y, fmt in zip(X, Y, fmts):
if len(x):
axes.plot(x, y, fmt)
else:
axes.plot(y, fmt)
set_axes(axes, xlabel, ylabel, xlim, ylim, xscale, yscale, legend)# Alias defined in config.ini
"""3.1"""
class Timer: #@save
"""่ฎฐๅฝๅคๆฌก่ฟ่กๆถ้ด"""
def __init__(self):
self.times = []
self.start()
def start(self):
"""ๅฏๅจ่ฎกๆถๅจ"""
self.tik = time.time()
def stop(self):
"""ๅๆญข่ฎกๆถๅจๅนถๅฐๆถ้ด่ฎฐๅฝๅจๅ่กจไธญ"""
self.times.append(time.time() - self.tik)
return self.times[-1]
def avg(self):
"""่ฟๅๅนณๅๆถ้ด"""
return sum(self.times) / len(self.times)
def sum(self):
"""่ฟๅๆถ้ดๆปๅ"""
return sum(self.times)
def cumsum(self):
"""่ฟๅ็ดฏ่ฎกๆถ้ด"""
return np.array(self.times).cumsum().tolist()
"""3.2"""
def synthetic_data(w, b, num_examples): #@save
"""็ๆy=Xw+b+ๅชๅฃฐ"""
X = paddle.normal(0, 1, (num_examples, len(w)))
y = paddle.matmul(X, w) + b
y += paddle.normal(0, 0.01, y.shape)
return X, y.reshape((-1, 1))
def linreg(X, w, b): #@save
"""็บฟๆงๅๅฝๆจกๅ"""
return paddle.matmul(X, w) + b
def squared_loss(y_hat, y):
"""ๅๆนๆๅคฑใ"""
return (y_hat - y.reshape(y_hat.shape))**2 / 2
def sgd(params, lr, batch_size): #@save
"""ๅฐๆน้้ๆบๆขฏๅบฆไธ้"""
a=[]
with paddle.no_grad():
for params in params:
params -= lr * params.grad/ batch_size
params.stop_gradient = False
a.append(params)
return a
"""3.3"""
def load_array(data_arrays, batch_size, is_train=True):
"""ๆ้ ไธไธชPaddleๆฐๆฎ่ฟญไปฃๅจใ"""
dataset = paddle.io.TensorDataset(data_arrays)
return paddle.io.DataLoader(dataset, batch_size=batch_size, shuffle=is_train)
"""3.5"""
def get_fashion_mnist_labels(labels): #@save
"""่ฟๅFashion-MNISTๆฐๆฎ้็ๆๆฌๆ ็ญพ"""
text_labels = ['t-shirt', 'trouser', 'pullover', 'dress', 'coat',
'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
return [text_labels[int(i)] for i in labels]
def show_images(imgs, num_rows, num_cols, titles=None, scale=1.5):
"""Plot a list of images."""
figsize = (num_cols * scale, num_rows * scale)
_, axes = plt.subplots(num_rows, num_cols, figsize=figsize)
axes = axes.flatten()
for i, (ax, img) in enumerate(zip(axes, imgs)):
if paddle.is_tensor(img):
# ๅพ็ๅผ ้
ax.imshow(img.numpy())
else:
# PILๅพ็
ax.imshow(img)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
if titles:
ax.set_title(titles[i])
return axes
def get_dataloader_workers():
"""ไฝฟ็จ4ไธช่ฟ็จๆฅ่ฏปๅๆฐๆฎใ"""
return 4
def load_data_fashion_mnist(batch_size, resize=None): #@save
"""ไธ่ฝฝFashion-MNISTๆฐๆฎ้๏ผ็ถๅๅฐๅ
ถๅ ่ฝฝๅฐๅ
ๅญไธญ"""
trans = [transforms.ToTensor()]
if resize:
trans.insert(0, transforms.Resize(resize))
trans = transforms.Compose(trans)
mnist_train = paddle.vision.datasets.FashionMNIST(mode="train", transform=trans)
mnist_test = paddle.vision.datasets.FashionMNIST(mode="test", transform=trans)
return (paddle.io.DataLoader(dataset=mnist_train,
batch_size=batch_size,
shuffle=True,
num_workers=get_dataloader_workers()),
paddle.io.DataLoader(dataset=mnist_test,
batch_size=batch_size,
shuffle=True,
num_workers=get_dataloader_workers()))
"""3.6"""
def accuracy(y_hat, y): #@save
"""่ฎก็ฎ้ขๆตๆญฃ็กฎ็ๆฐ้"""
if len(y_hat.shape) > 1 and y_hat.shape[1] > 1:
y_hat = y_hat.argmax(axis=1)
"""
ไธบไบ้ฒๆญขๅบ็ฐy_hat.shape=[batch_size]่y.shape=[batch_size,1]็้ฎ้ขๅฏผ่ดๅคๆญ็ธ็ญ้่ฏฏ
"""
if len(y_hat.shape) < len(y.shape):
cmp = y_hat.astype(y.dtype) == y.squeeze()
else:
cmp = y_hat.astype(y.dtype) == y
return float(cmp.astype(y.dtype).sum())
def evaluate_accuracy(net, data_iter): #@save
"""่ฎก็ฎๅจๆๅฎๆฐๆฎ้ไธๆจกๅ็็ฒพๅบฆ"""
if isinstance(net, paddle.nn.Layer):
net.eval() # ๅฐๆจกๅ่ฎพ็ฝฎไธบ่ฏไผฐๆจกๅผ
metric = Accumulator(2) # ๆญฃ็กฎ้ขๆตๆฐใ้ขๆตๆปๆฐ
with paddle.no_grad():
for X, y in data_iter:
metric.add(accuracy(net(X), y), y.numel())
return metric[0] / metric[1]
class Accumulator: #@save
"""ๅจnไธชๅ้ไธ็ดฏๅ """
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
def train_epoch_ch3(net, train_iter, loss, updater): # @save
"""่ฎญ็ปๆจกๅไธไธช่ฟญไปฃๅจๆ๏ผๅฎไน่ง็ฌฌ3็ซ ๏ผ"""
# ๅฐๆจกๅ่ฎพ็ฝฎไธบ่ฎญ็ปๆจกๅผ
if isinstance(net, paddle.nn.Layer):
net.train()
# ่ฎญ็ปๆๅคฑๆปๅใ่ฎญ็ปๅ็กฎๅบฆๆปๅใๆ ทๆฌๆฐ
metric = Accumulator(3)
for X, y in train_iter():
# ่ฎก็ฎๆขฏๅบฆๅนถๆดๆฐๅๆฐ
y_hat = net(X)
l = loss(y_hat, y)
if isinstance(updater, paddle.optimizer.Optimizer):
# ไฝฟ็จpaddleๅ
็ฝฎ็ไผๅๅจๅๆๅคฑๅฝๆฐ
updater.clear_grad()
l.mean().backward()
updater.step()
else:
# ไฝฟ็จๅฎๅถ็ไผๅๅจๅๆๅคฑๅฝๆฐ
l.sum().backward()
updater(X.shape[0])
metric.add(float(l.sum()), accuracy(y_hat, y), y.numel())
return metric[0] / metric[2], metric[1] / metric[2]
class Animator: #@save
"""ๅจๅจ็ปไธญ็ปๅถๆฐๆฎ"""
def __init__(self, xlabel=None, ylabel=None, legend=None, xlim=None,
ylim=None, xscale='linear', yscale='linear',
fmts=('-', 'm--', 'g-.', 'r:'), nrows=1, ncols=1,
figsize=(3.5, 2.5)):
# ๅข้ๅฐ็ปๅถๅคๆก็บฟ
if legend is None:
legend = []
d2l.use_svg_display()
self.fig, self.axes = d2l.plt.subplots(nrows, ncols, figsize=figsize)
if nrows * ncols == 1:
self.axes = [self.axes, ]
# ไฝฟ็จlambdaๅฝๆฐๆ่ทๅๆฐ
self.config_axes = lambda: d2l.set_axes(
self.axes[0], xlabel, ylabel, xlim, ylim, xscale, yscale, legend)
self.X, self.Y, self.fmts = None, None, fmts
def add(self, x, y):
# ๅๅพ่กจไธญๆทปๅ ๅคไธชๆฐๆฎ็น
if not hasattr(y, "__len__"):
y = [y]
n = len(y)
if not hasattr(x, "__len__"):
x = [x] * n
if not self.X:
self.X = [[] for _ in range(n)]
if not self.Y:
self.Y = [[] for _ in range(n)]
for i, (a, b) in enumerate(zip(x, y)):
if a is not None and b is not None:
self.X[i].append(a)
self.Y[i].append(b)
self.axes[0].cla()
for x, y, fmt in zip(self.X, self.Y, self.fmts):
self.axes[0].plot(x, y, fmt)
self.config_axes()
display.display(self.fig)
display.clear_output(wait=True)
def train_ch3(net, train_iter, test_iter, loss, num_epochs, updater): #@save
"""่ฎญ็ปๆจกๅ๏ผๅฎไน่ง็ฌฌ3็ซ ๏ผ"""
animator = Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0.3, 0.9],
legend=['train loss', 'train acc', 'test acc'])
for epoch in range(num_epochs):
train_metrics = train_epoch_ch3(net, train_iter, loss, updater)
test_acc = evaluate_accuracy(net, test_iter)
animator.add(epoch + 1, train_metrics + (test_acc,))
train_loss, train_acc = train_metrics
assert train_loss < 0.5, train_loss
assert train_acc <= 1 and train_acc > 0.7, train_acc
assert test_acc <= 1 and test_acc > 0.7, test_acc
def predict_ch3(net, test_iter, n=6): #@save
"""้ขๆตๆ ็ญพ๏ผๅฎไน่ง็ฌฌ3็ซ ๏ผ"""
for X, y in test_iter:
break
trues = d2l.get_fashion_mnist_labels(y)
preds = d2l.get_fashion_mnist_labels(net(X).argmax(axis=1))
titles = [true +'\n' + pred for true, pred in zip(trues, preds)]
d2l.show_images(
X[0:n].reshape((n, 28, 28)), 1, n, titles=titles[0:n])
"""4.4"""
def evaluate_loss(net, data_iter, loss): #@save
"""่ฏไผฐ็ปๅฎๆฐๆฎ้ไธๆจกๅ็ๆๅคฑใ"""
metric = d2l.Accumulator(2) # ๆๅคฑ็ๆปๅ, ๆ ทๆฌๆฐ้
for X, y in data_iter:
out = net(X)
y = y.reshape(out.shape)
l = loss(out, y)
metric.add(l.sum(), l.numel())
return metric[0] / metric[1]
"""4.10"""
DATA_HUB = dict()
DATA_URL = 'http://d2l-data.s3-accelerate.amazonaws.com/'
def download(name, cache_dir=os.path.join('./', 'data')): #@save
"""ไธ่ฝฝไธไธชDATA_HUBไธญ็ๆไปถ๏ผ่ฟๅๆฌๅฐๆไปถๅ"""
assert name in DATA_HUB, f"{name} ไธๅญๅจไบ {DATA_HUB}"
url, sha1_hash = DATA_HUB[name]
os.makedirs(cache_dir, exist_ok=True)
fname = os.path.join(cache_dir, url.split('/')[-1])
if os.path.exists(fname):
sha1 = hashlib.sha1()
with open(fname, 'rb') as f:
while True:
data = f.read(1048576)
if not data:
break
sha1.update(data)
if sha1.hexdigest() == sha1_hash:
return fname # ๅฝไธญ็ผๅญ
print(f'ๆญฃๅจไป{url}ไธ่ฝฝ{fname}...')
r = requests.get(url, stream=True, verify=True)
with open(fname, 'wb') as f:
f.write(r.content)
return fname
def download_extract(name, folder=None): #@save
"""ไธ่ฝฝๅนถ่งฃๅzip/tarๆไปถ"""
fname = download(name)
base_dir = os.path.dirname(fname)
data_dir, ext = os.path.splitext(fname)
if ext == '.zip':
fp = zipfile.ZipFile(fname, 'r')
elif ext in ('.tar', '.gz'):
fp = tarfile.open(fname, 'r')
else:
assert False, 'ๅชๆzip/tarๆไปถๅฏไปฅ่ขซ่งฃๅ็ผฉ'
fp.extractall(base_dir)
return os.path.join(base_dir, folder) if folder else data_dir
def download_all(): #@save
"""ไธ่ฝฝDATA_HUBไธญ็ๆๆๆไปถ"""
for name in DATA_HUB:
download(name)
DATA_HUB['kaggle_house_train'] = ( #@save
DATA_URL + 'kaggle_house_pred_train.csv',
'585e9cc93e70b39160e7921475f9bcd7d31219ce')
DATA_HUB['kaggle_house_test'] = ( #@save
DATA_URL + 'kaggle_house_pred_test.csv',
'fa19780a7b011d9b009e8bff8e99922a8ee2eb90')
"""5.6"""
#ไฟฎๆน5.6ๅฝๆฐ๏ผ2022.1.21ๆฅ๏ผ ๆๆถๅฏ่ฝๅช่ฝๅค็ๆ้กบๅบ็ๅคGPUๅกๆ
ๅต๏ผๆฏๅฆgpu:0 gpu:1 gpu:2 ......๏ผ
def try_gpu(i=0): #@save
"""ๅฆๆๅญๅจ๏ผๅ่ฟๅgpu(i)๏ผๅฆๅ่ฟๅcpu()ใ"""
if paddle.device.cuda.device_count() >= i + 1:
return paddle.CUDAPlace(i)
return paddle.CPUPlace()
def try_all_gpus(): #@save
"""่ฟๅๆๆๅฏ็จ็GPU๏ผๅฆๆๆฒกๆGPU๏ผๅ่ฟๅ[cpu(),]ใ"""
devices = [paddle.CUDAPlace(i)
for i in range(paddle.device.cuda.device_count())
]
return devices if devices else paddle.CPUPlace()
"""6.2"""
def corr2d(X, K):
"""่ฎก็ฎไบ็ปดไบ็ธๅ
ณ่ฟ็ฎใ"""
h, w = K.shape
Y = paddle.zeros((X.shape[0] - h + 1, X.shape[1] - w + 1))
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
Y[i, j] = (X[i:i + h, j:j + w] * K).sum()
return Y
"""6.6"""
def evaluate_accuracy_gpu(net, data_iter, device=None): #@save
"""ไฝฟ็จGPU่ฎก็ฎๆจกๅๅจๆฐๆฎ้ไธ็็ฒพๅบฆ
Defined in :numref:`sec_lenet`"""
if isinstance(net, nn.Layer):
net.eval() # ่ฎพ็ฝฎไธบ่ฏไผฐๆจกๅผ
if not device:
device = next(iter(net.parameters())).place
# ๆญฃ็กฎ้ขๆต็ๆฐ้๏ผๆป้ขๆต็ๆฐ้
metric = d2l.Accumulator(2)
with paddle.no_grad():
for X, y in data_iter:
if isinstance(X, list):
# BERTๅพฎ่ฐๆ้็๏ผไนๅๅฐไป็ป๏ผ
X = [paddle.to_tensor(x, place=device) for x in X]
else:
X = paddle.to_tensor(X, place=device)
y = paddle.to_tensor(y, place=device)
metric.add(d2l.accuracy(net(X), y), d2l.size(y))
return metric[0] / metric[1]
def train_ch6(net, train_iter, test_iter, batch_size, optimi, num_epochs):
loss = nn.CrossEntropyLoss()
batch_count = 0
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
for idx, (X, y) in enumerate(train_iter):
y_hat = net(X)
l = loss(y_hat, y)
optimi.clear_grad()
l.backward()
optimi.step()
train_l_sum += l.numpy()[0]
train_acc_sum += (y_hat.argmax(axis=1) == y.flatten()).astype('float32').sum().numpy()[0]
n += y.shape[0]
batch_count += 1
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'
% (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))
"""7.6"""
class Residual(nn.Layer):
def __init__(self, input_channels, num_channels, use_1x1conv=False,
strides=1):
super(Residual, self).__init__()
self.conv1 = nn.Conv2D(input_channels, num_channels, kernel_size=3,
padding=1, stride=strides)
self.conv2 = nn.Conv2D(num_channels, num_channels, kernel_size=3,
padding=1)
if use_1x1conv:
self.conv3 = nn.Conv2D(input_channels, num_channels,
kernel_size=1, stride=strides)
else:
self.conv3 = None
self.bn1 = nn.BatchNorm2D(num_channels)
self.bn2 = nn.BatchNorm2D(num_channels)
self.relu = nn.ReLU()
def forward(self, X):
Y = F.relu(self.bn1(self.conv1(X)))
Y = self.bn2(self.conv2(Y))
if self.conv3:
X = self.conv3(X)
Y += X
return F.relu(Y)
"""8.2"""
d2l.DATA_HUB['time_machine'] = (d2l.DATA_URL + 'timemachine.txt',
'090b5e7e70c295757f55df93cb0a180b9691891a')
def read_time_machine():
"""ๅฐๆถ้ดๆบๅจๆฐๆฎ้ๅ ่ฝฝๅฐๆๆฌ่ก็ๅ่กจไธญ
Defined in :numref:`sec_text_preprocessing`"""
with open(d2l.download('time_machine'), 'r') as f:
lines = f.readlines()
return [re.sub('[^A-Za-z]+', ' ', line).strip().lower() for line in lines]
def tokenize(lines, token='word'):
"""ๅฐๆๆฌ่กๆๅไธบๅ่ฏๆๅญ็ฌฆ่ฏๅ
Defined in :numref:`sec_text_preprocessing`"""
if token == 'word':
return [line.split() for line in lines]
elif token == 'char':
return [list(line) for line in lines]
else:
print('้่ฏฏ๏ผๆช็ฅ่ฏๅ
็ฑปๅ๏ผ' + token)
class Vocab:
"""ๆๆฌ่ฏ่กจ"""
def __init__(self, tokens=None, min_freq=0, reserved_tokens=None):
"""Defined in :numref:`sec_text_preprocessing`"""
if tokens is None:
tokens = []
if reserved_tokens is None:
reserved_tokens = []
# ๆๅบ็ฐ้ข็ๆๅบ
counter = count_corpus(tokens)
self._token_freqs = sorted(counter.items(), key=lambda x: x[1],
reverse=True)
# ๆช็ฅ่ฏๅ
็็ดขๅผไธบ0
self.idx_to_token = ['<unk>'] + reserved_tokens
self.token_to_idx = {token: idx
for idx, token in enumerate(self.idx_to_token)}
for token, freq in self._token_freqs:
if freq < min_freq:
break
if token not in self.token_to_idx:
self.idx_to_token.append(token)
self.token_to_idx[token] = len(self.idx_to_token) - 1
def __len__(self):
return len(self.idx_to_token)
def __getitem__(self, tokens):
if not isinstance(tokens, (list, tuple)):
return self.token_to_idx.get(tokens, self.unk)
return [self.__getitem__(token) for token in tokens]
def to_tokens(self, indices):
if not isinstance(indices, (list, tuple)):
return self.idx_to_token[indices]
return [self.idx_to_token[index] for index in indices]
@property
def unk(self): # ๆช็ฅ่ฏๅ
็็ดขๅผไธบ0
return 0
@property
def token_freqs(self):
return self._token_freqs
def count_corpus(tokens):
"""็ป่ฎก่ฏๅ
็้ข็
Defined in :numref:`sec_text_preprocessing`"""
# ่ฟ้็`tokens`ๆฏ1Dๅ่กจๆ2Dๅ่กจ
if len(tokens) == 0 or isinstance(tokens[0], list):
# ๅฐ่ฏๅ
ๅ่กจๅฑๅนณๆไธไธชๅ่กจ
tokens = [token for line in tokens for token in line]
return collections.Counter(tokens)
def load_corpus_time_machine(max_tokens=-1):
"""่ฟๅๆถๅ
ๆบๅจๆฐๆฎ้็่ฏๅ
็ดขๅผๅ่กจๅ่ฏ่กจ
Defined in :numref:`sec_text_preprocessing`"""
lines = read_time_machine()
tokens = tokenize(lines, 'char')
vocab = Vocab(tokens)
# ๅ ไธบๆถๅ
ๆบๅจๆฐๆฎ้ไธญ็ๆฏไธชๆๆฌ่กไธไธๅฎๆฏไธไธชๅฅๅญๆไธไธชๆฎต่ฝ๏ผ
# ๆไปฅๅฐๆๆๆๆฌ่กๅฑๅนณๅฐไธไธชๅ่กจไธญ
corpus = [vocab[token] for line in tokens for token in line]
if max_tokens > 0:
corpus = corpus[:max_tokens]
return corpus, vocab
"""8.3"""
def seq_data_iter_random(corpus, batch_size, num_steps):
"""ไฝฟ็จ้ๆบๆฝๆ ท็ๆไธไธชๅฐๆน้ๅญๅบๅ
Defined in :numref:`sec_language_model`"""
# ไป้ๆบๅ็งป้ๅผๅงๅฏนๅบๅ่ฟ่กๅๅบ๏ผ้ๆบ่ๅดๅ
ๆฌ`num_steps-1`
corpus = corpus[random.randint(0, num_steps - 1):]
# ๅๅป1๏ผๆฏๅ ไธบๆไปฌ้่ฆ่่ๆ ็ญพ
num_subseqs = (len(corpus) - 1) // num_steps
# ้ฟๅบฆไธบ`num_steps`็ๅญๅบๅ็่ตทๅง็ดขๅผ
initial_indices = list(range(0, num_subseqs * num_steps, num_steps))
# ๅจ้ๆบๆฝๆ ท็่ฟญไปฃ่ฟ็จไธญ๏ผ
# ๆฅ่ชไธคไธช็ธ้ป็ใ้ๆบ็ใๅฐๆน้ไธญ็ๅญๅบๅไธไธๅฎๅจๅๅงๅบๅไธ็ธ้ป
random.shuffle(initial_indices)
def data(pos):
# ่ฟๅไป`pos`ไฝ็ฝฎๅผๅง็้ฟๅบฆไธบ`num_steps`็ๅบๅ
return corpus[pos: pos + num_steps]
num_batches = num_subseqs // batch_size
for i in range(0, batch_size * num_batches, batch_size):
# ๅจ่ฟ้๏ผ`initial_indices`ๅ
ๅซๅญๅบๅ็้ๆบ่ตทๅง็ดขๅผ
initial_indices_per_batch = initial_indices[i: i + batch_size]
X = [data(j) for j in initial_indices_per_batch]
Y = [data(j + 1) for j in initial_indices_per_batch]
yield d2l.tensor(X), d2l.tensor(Y)
def seq_data_iter_sequential(corpus, batch_size, num_steps):
"""ไฝฟ็จ้กบๅบๅๅบ็ๆไธไธชๅฐๆน้ๅญๅบๅ
Defined in :numref:`sec_language_model`"""
# ไป้ๆบๅ็งป้ๅผๅงๅๅๅบๅ
offset = random.randint(0, num_steps)
num_tokens = ((len(corpus) - offset - 1) // batch_size) * batch_size
Xs = d2l.tensor(corpus[offset: offset + num_tokens])
Ys = d2l.tensor(corpus[offset + 1: offset + 1 + num_tokens])
Xs, Ys = Xs.reshape((batch_size, -1)), Ys.reshape((batch_size, -1))
num_batches = Xs.shape[1] // num_steps
for i in range(0, num_steps * num_batches, num_steps):
X = Xs[:, i: i + num_steps]
Y = Ys[:, i: i + num_steps]
yield X, Y
class SeqDataLoader:
"""ๅ ่ฝฝๅบๅๆฐๆฎ็่ฟญไปฃๅจ"""
def __init__(self, batch_size, num_steps, use_random_iter, max_tokens):
"""Defined in :numref:`sec_language_model`"""
if use_random_iter:
self.data_iter_fn = d2l.seq_data_iter_random
else:
self.data_iter_fn = d2l.seq_data_iter_sequential
self.corpus, self.vocab = d2l.load_corpus_time_machine(max_tokens)
self.batch_size, self.num_steps = batch_size, num_steps
def __iter__(self):
return self.data_iter_fn(self.corpus, self.batch_size, self.num_steps)
def load_data_time_machine(batch_size, num_steps,
use_random_iter=False, max_tokens=10000):
"""่ฟๅๆถๅ
ๆบๅจๆฐๆฎ้็่ฟญไปฃๅจๅ่ฏ่กจ
Defined in :numref:`sec_language_model`"""
data_iter = SeqDataLoader(
batch_size, num_steps, use_random_iter, max_tokens)
return data_iter, data_iter.vocab
"""8.5"""
class RNNModelScratch: #@save
"""ไป้ถๅผๅงๅฎ็ฐ็ๅพช็ฏ็ฅ็ป็ฝ็ปๆจกๅ"""
def __init__(self, vocab_size, num_hiddens, device,
get_params, init_state, forward_fn):
self.vocab_size, self.num_hiddens = vocab_size, num_hiddens
self.params = get_params(vocab_size, num_hiddens)
self.init_state, self.forward_fn = init_state, forward_fn
def __call__(self, X, state):
X = F.one_hot(X.T, self.vocab_size)
return self.forward_fn(X, state, self.params)
def begin_state(self, batch_size):
return self.init_state(batch_size, self.num_hiddens)
def grad_clipping(net, theta):#@save
"""่ฃๅชๆขฏๅบฆ
Defined in :numref:`sec_rnn_scratch`"""
if isinstance(net, nn.Layer):
params = [p for p in net.parameters() if not p.stop_gradient]
else:
params = net.params
norm = paddle.sqrt(sum(paddle.sum((p.grad ** 2)) for p in params))
if norm > theta:
for param in params:
param.grad.set_value(param.grad * theta / norm)
def predict_ch8(prefix, num_preds, net, vocab, device): #@save
"""ๅจprefixๅ้ข็ๆๆฐๅญ็ฌฆ"""
state = net.begin_state(batch_size=1)
outputs = [vocab[prefix[0]]]
get_input = lambda: d2l.reshape(d2l.tensor(outputs[-1], place=device), (1, 1))
for y in prefix[1:]: # ้ข็ญๆ
_, state = net(get_input(), state)
outputs.append(vocab[y])
for _ in range(num_preds): # ้ขๆตnum_predsๆญฅ
y, state = net(get_input(), state)
outputs.append(int(paddle.reshape(paddle.argmax(y, axis=1), shape=[1])))
return ''.join([vocab.idx_to_token[i] for i in outputs])
#@save
def train_epoch_ch8(net, train_iter, loss, updater, device, use_random_iter):
"""่ฎญ็ป็ฝ็ปไธไธช่ฟญไปฃๅจๆ๏ผๅฎไน่ง็ฌฌ8็ซ ๏ผ
Defined in :numref:`sec_rnn_scratch`"""
state, timer = None, d2l.Timer()
metric = d2l.Accumulator(2) # ่ฎญ็ปๆๅคฑไนๅ,่ฏๅ
ๆฐ้
for X, Y in train_iter:
if state is None or use_random_iter:
# ๅจ็ฌฌไธๆฌก่ฟญไปฃๆไฝฟ็จ้ๆบๆฝๆ ทๆถๅๅงๅ`state`
state = net.begin_state(batch_size=X.shape[0])
else:
if isinstance(net, nn.Layer) and not isinstance(state, tuple):
# `state`ๅฏนไบ`nn.GRU`ๆฏไธชๅผ ้
state.stop_gradient=True
else:
# `state`ๅฏนไบ`nn.LSTM`ๆๅฏนไบๆไปฌไป้ถๅผๅงๅฎ็ฐ็ๆจกๅๆฏไธชๅผ ้
for s in state:
s.stop_gradient=True
y = paddle.reshape(Y.T, shape=[-1])
X = paddle.to_tensor(X, place=device)
y = paddle.to_tensor(y, place=device)
y_hat, state = net(X, state)
l = loss(y_hat, y).mean()
if isinstance(updater, paddle.optimizer.Optimizer):
updater.clear_grad()
l.backward()
updater.step()
else:
l.backward()
grad_clipping(net, 1)
# ๅ ไธบๅทฒ็ป่ฐ็จไบ`mean`ๅฝๆฐ
net.params = updater(batch_size=1)
metric.add(l * d2l.size(y), d2l.size(y))
return math.exp(metric[0] / metric[1]), metric[1] / timer.stop()
#@save
def train_ch8(net, train_iter, vocab, lr, num_epochs, device, use_random_iter=False):
"""่ฎญ็ปๆจกๅ๏ผๅฎไน่ง็ฌฌ8็ซ ๏ผ"""
loss = nn.CrossEntropyLoss()
animator = d2l.Animator(xlabel='epoch', ylabel='perplexity',
legend=['train'], xlim=[10, num_epochs])
# ๅๅงๅ
if isinstance(net, nn.Layer):
clip = paddle.nn.ClipGradByNorm(clip_norm=1.0)
updater = paddle.optimizer.SGD(
learning_rate=lr, parameters=net.parameters(), grad_clip=clip)
else:
updater = lambda batch_size: d2l.sgd(net.params, lr, batch_size)
predict = lambda prefix: predict_ch8(prefix, 50, net, vocab, device)
# ่ฎญ็ปๅ้ขๆต
for epoch in range(num_epochs):
ppl, speed = train_epoch_ch8(
net, train_iter, loss, updater, device, use_random_iter)
if (epoch + 1) % 10 == 0:
print(predict('time traveller'))
animator.add(epoch + 1, [ppl])
print(f'ๅฐๆๅบฆ {ppl:.1f}, {speed:.1f} ่ฏๅ
/็ง {str(device)}')
print(predict('time traveller'))
print(predict('traveller'))
"""8.6"""
class RNNModel(nn.Layer): #@save
"""ๅพช็ฏ็ฅ็ป็ฝ็ปๆจกๅ"""
def __init__(self, rnn_layer, vocab_size, **kwargs):
super(RNNModel, self).__init__(**kwargs)
self.rnn = rnn_layer
self.vocab_size = vocab_size
self.num_hiddens = self.rnn.hidden_size
# ๅฆๆRNNๆฏๅๅ็๏ผไนๅๅฐไป็ป๏ผ๏ผnum_directionsๅบ่ฏฅๆฏ2๏ผๅฆๅๅบ่ฏฅๆฏ1
if self.rnn.num_directions==1:
self.num_directions = 1
self.linear = nn.Linear(self.num_hiddens, self.vocab_size)
else:
self.num_directions = 2
self.linear = nn.Linear(self.num_hiddens * 2, self.vocab_size)
def forward(self, inputs, state):
X = F.one_hot(inputs.T, self.vocab_size) # paddle็ธๆฏtorch๏ผ่ฟ้ๆ ้ๅ็ฑปๅ่ฝฌๆข
Y, state = self.rnn(X, state)
# ๅ
จ่ฟๆฅๅฑ้ฆๅ
ๅฐY็ๅฝข็ถๆนไธบ(ๆถ้ดๆญฅๆฐ*ๆน้ๅคงๅฐ,้่ๅๅ
ๆฐ)
# ๅฎ็่พๅบๅฝข็ถๆฏ(ๆถ้ดๆญฅๆฐ*ๆน้ๅคงๅฐ,่ฏ่กจๅคงๅฐ)ใ
output = self.linear(Y.reshape((-1, Y.shape[-1])))
return output, state
def begin_state(self, batch_size=1):
if not isinstance(self.rnn, nn.LSTM):
# nn.GRUไปฅๅผ ้ไฝไธบ้็ถๆ
return paddle.to_tensor(paddle.zeros(shape=[self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens]))
else:
# nn.LSTMไปฅๅ
็ปไฝไธบ้็ถๆ
return (paddle.to_tensor(paddle.zeros(
shape=[self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens])),
paddle.to_tensor(paddle.zeros(
shape=[self.num_directions * self.rnn.num_layers,
batch_size, self.num_hiddens])))
"""9.5"""
d2l.DATA_HUB['fra-eng'] = (d2l.DATA_URL + 'fra-eng.zip',
'94646ad1522d915e7b0f9296181140edcf86a4f5')
def read_data_nmt():
"""่ฝฝๅ
ฅโ่ฑ่ฏญ๏ผๆณ่ฏญโๆฐๆฎ้"""
data_dir = d2l.download_extract('fra-eng')
with open(os.path.join(data_dir, 'fra.txt'), 'r',
encoding='utf-8') as f:
return f.read()
def preprocess_nmt(text):
"""้ขๅค็โ่ฑ่ฏญ๏ผๆณ่ฏญโๆฐๆฎ้"""
def no_space(char, prev_char):
return char in set(',.!?') and prev_char != ' '
# ไฝฟ็จ็ฉบๆ ผๆฟๆขไธ้ดๆญ็ฉบๆ ผ
# ไฝฟ็จๅฐๅๅญๆฏๆฟๆขๅคงๅๅญๆฏ
text = text.replace('\u202f', ' ').replace('\xa0', ' ').lower()
# ๅจๅ่ฏๅๆ ็น็ฌฆๅทไน้ดๆๅ
ฅ็ฉบๆ ผ
out = [' ' + char if i > 0 and no_space(char, text[i - 1]) else char
for i, char in enumerate(text)]
return ''.join(out)
def tokenize_nmt(text, num_examples=None):
"""่ฏๅ
ๅโ่ฑ่ฏญ๏ผๆณ่ฏญโๆฐๆฎๆฐๆฎ้"""
source, target = [], []
for i, line in enumerate(text.split('\n')):
if num_examples and i > num_examples:
break
parts = line.split('\t')
if len(parts) == 2:
source.append(parts[0].split(' '))
target.append(parts[1].split(' '))
return source, target
def truncate_pad(line, num_steps, padding_token):
"""ๆชๆญๆๅกซๅ
ๆๆฌๅบๅ"""
if len(line) > num_steps:
return line[:num_steps] # ๆชๆญ
return line + [padding_token] * (num_steps - len(line)) # ๅกซๅ
def build_array_nmt(lines, vocab, num_steps):
"""ๅฐๆบๅจ็ฟป่ฏ็ๆๆฌๅบๅ่ฝฌๆขๆๅฐๆน้"""
lines = [vocab[l] for l in lines]
lines = [l + [vocab['<eos>']] for l in lines]
array = paddle.to_tensor([truncate_pad(
l, num_steps, vocab['<pad>']) for l in lines])
valid_len = (array != vocab['<pad>']).astype(paddle.int32).sum(1)
return array, valid_len
def load_data_nmt(batch_size, num_steps, num_examples=600):
"""่ฟๅ็ฟป่ฏๆฐๆฎ้็่ฟญไปฃๅจๅ่ฏ่กจ"""
text = preprocess_nmt(read_data_nmt())
source, target = tokenize_nmt(text, num_examples)
src_vocab = d2l.Vocab(source, min_freq=2,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
tgt_vocab = d2l.Vocab(target, min_freq=2,
reserved_tokens=['<pad>', '<bos>', '<eos>'])
src_array, src_valid_len = build_array_nmt(source, src_vocab, num_steps)
tgt_array, tgt_valid_len = build_array_nmt(target, tgt_vocab, num_steps)
data_arrays = (src_array, src_valid_len, tgt_array, tgt_valid_len)
data_iter = d2l.load_array(data_arrays, batch_size)
return data_iter, src_vocab, tgt_vocab
"""9.6"""
class Encoder(nn.Layer):
"""็ผ็ ๅจ-่งฃ็ ๅจๆถๆ็ๅบๆฌ็ผ็ ๅจๆฅๅฃ"""
def __init__(self, **kwargs):
super(Encoder, self).__init__(**kwargs)
def forward(self, X, *args):
raise NotImplementedError
class Decoder(nn.Layer):
"""็ผ็ ๅจ-่งฃ็ ๅจๆถๆ็ๅบๆฌ่งฃ็ ๅจๆฅๅฃ"""
def __init__(self, **kwargs):
super(Decoder, self).__init__(**kwargs)
def init_state(self, enc_outputs, *args):
raise NotImplementedError
def forward(self, X, state):
raise NotImplementedError
class EncoderDecoder(nn.Layer):
"""็ผ็ ๅจ-่งฃ็ ๅจๆถๆ็ๅบ็ฑป"""
def __init__(self, encoder, decoder, **kwargs):
super(EncoderDecoder, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
def forward(self, enc_X, dec_X, *args):
enc_outputs = self.encoder(enc_X, *args)
dec_state = self.decoder.init_state(enc_outputs, *args)
return self.decoder(dec_X, dec_state)
"""9.7"""
class Seq2SeqEncoder(d2l.Encoder):
"""็จไบๅบๅๅฐๅบๅๅญฆไน ็ๅพช็ฏ็ฅ็ป็ฝ็ป็ผ็ ๅจ"""
def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
dropout=0, **kwargs):
super(Seq2SeqEncoder, self).__init__(**kwargs)
weight_ih_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform())
weight_hh_attr = paddle.ParamAttr(initializer=nn.initializer.XavierUniform())
# ๅตๅ
ฅๅฑ
self.embedding = nn.Embedding(vocab_size, embed_size)
self.rnn = nn.GRU(embed_size, num_hiddens, num_layers, dropout=dropout,
time_major=True, weight_ih_attr=weight_ih_attr, weight_hh_attr=weight_hh_attr)
def forward(self, X, *args):
# ่พๅบ'X'็ๅฝข็ถ๏ผ(batch_size,num_steps,embed_size)
X = self.embedding(X)
# ๅจๅพช็ฏ็ฅ็ป็ฝ็ปๆจกๅไธญ๏ผ็ฌฌไธไธช่ฝดๅฏนๅบไบๆถ้ดๆญฅ
X = X.transpose([1, 0, 2])
# ๅฆๆๆชๆๅ็ถๆ๏ผๅ้ป่ฎคไธบ0
output, state = self.rnn(X)
# PaddlePaddle็GRUๅฑoutput็ๅฝข็ถ:(batch_size,time_steps,num_directions * num_hiddens),
# ้่ฎพๅฎtime_major=True,ๆๅฎinput็็ฌฌไธไธช็ปดๅบฆไธบtime_steps
# state[0]็ๅฝข็ถ:(num_layers,batch_size,num_hiddens)
return output, state
def sequence_mask(X, valid_len, value=0):#@save
"""ๅจๅบๅไธญๅฑ่ฝไธ็ธๅ
ณ็้กน"""
maxlen = X.shape[1]
mask = paddle.arange((maxlen), dtype=paddle.float32)[None, :] < valid_len[:, None]
Xtype = X.dtype
X=X.astype(paddle.float32)
X[~mask] = float(value)
return X.astype(Xtype)
class MaskedSoftmaxCELoss(nn.CrossEntropyLoss):
"""ๅธฆ้ฎ่ฝ็softmaxไบคๅ็ตๆๅคฑๅฝๆฐ"""
# pred็ๅฝข็ถ๏ผ(batch_size,num_steps,vocab_size)
# label็ๅฝข็ถ๏ผ(batch_size,num_steps)
# valid_len็ๅฝข็ถ๏ผ(batch_size,)
def forward(self, pred, label, valid_len):
weights = paddle.ones_like(label)
weights = sequence_mask(weights, valid_len)
self.reduction='none'
unweighted_loss = super(MaskedSoftmaxCELoss, self).forward(
pred, label)
weighted_loss = (unweighted_loss * weights).mean(axis=1)
return weighted_loss
def train_seq2seq(net, data_iter, lr, num_epochs, tgt_vocab, device):
"""่ฎญ็ปๅบๅๅฐๅบๅๆจกๅ"""
optimizer = paddle.optimizer.Adam(learning_rate=lr, parameters=net.parameters())
loss = MaskedSoftmaxCELoss()
net.train()
animator = d2l.Animator(xlabel='epoch', ylabel='loss',
xlim=[10, num_epochs])
for epoch in range(num_epochs):
timer = d2l.Timer()
metric = d2l.Accumulator(2) # ่ฎญ็ปๆๅคฑๆปๅ๏ผ่ฏๅ
ๆฐ้
for batch in data_iter:
optimizer.clear_grad()
X, X_valid_len, Y, Y_valid_len = [paddle.to_tensor(x, place=device) for x in batch]
bos = paddle.to_tensor([tgt_vocab['<bos>']] * Y.shape[0]).reshape([-1, 1])
dec_input = paddle.concat([bos, Y[:, :-1]], 1) # ๅผบๅถๆๅญฆ
Y_hat, _ = net(X, dec_input, X_valid_len)
l = loss(Y_hat, Y, Y_valid_len.squeeze())
l.backward() # ๆๅคฑๅฝๆฐ็ๆ ้่ฟ่กโๅๅไผ ๆญโ
d2l.grad_clipping(net, 1)
num_tokens = Y_valid_len.sum()
optimizer.step()
with paddle.no_grad():
metric.add(l.sum(), num_tokens)
if (epoch + 1) % 10 == 0:
animator.add(epoch + 1, (metric[0] / metric[1],))
print(f'loss {metric[0] / metric[1]:.3f}, {metric[1] / timer.stop():.1f} '
f'tokens/sec on {str(device)}')
def predict_seq2seq(net, src_sentence, src_vocab, tgt_vocab, num_steps,
device, save_attention_weights=False):
"""ๅบๅๅฐๅบๅๆจกๅ็้ขๆต"""
# ๅจ้ขๆตๆถๅฐnet่ฎพ็ฝฎไธบ่ฏไผฐๆจกๅผ
net.eval()
src_tokens = src_vocab[src_sentence.lower().split(' ')] + [
src_vocab['<eos>']]
enc_valid_len = paddle.to_tensor([len(src_tokens)], place=device)
src_tokens = d2l.truncate_pad(src_tokens, num_steps, src_vocab['<pad>'])
# ๆทปๅ ๆน้่ฝด
enc_X = paddle.unsqueeze(
paddle.to_tensor(src_tokens, dtype=paddle.int64, place=device), axis=0)
enc_outputs = net.encoder(enc_X, enc_valid_len)
dec_state = net.decoder.init_state(enc_outputs, enc_valid_len)
# ๆทปๅ ๆน้่ฝด
dec_X = paddle.unsqueeze(paddle.to_tensor(
[tgt_vocab['<bos>']], dtype=paddle.int64, place=device), axis=0)
output_seq, attention_weight_seq = [], []
for _ in range(num_steps):
Y, dec_state = net.decoder(dec_X, dec_state)
# ๆไปฌไฝฟ็จๅ
ทๆ้ขๆตๆ้ซๅฏ่ฝๆง็่ฏๅ
๏ผไฝไธบ่งฃ็ ๅจๅจไธไธๆถ้ดๆญฅ็่พๅ
ฅ
dec_X = Y.argmax(axis=2)
pred = dec_X.squeeze(axis=0).astype(paddle.int32).item()
# ไฟๅญๆณจๆๅๆ้๏ผ็จๅ่ฎจ่ฎบ๏ผ
if save_attention_weights:
attention_weight_seq.append(net.decoder.attention_weights)
# ไธๆฆๅบๅ็ปๆ่ฏๅ
่ขซ้ขๆต๏ผ่พๅบๅบๅ็็ๆๅฐฑๅฎๆไบ
if pred == tgt_vocab['<eos>']:
break
output_seq.append(pred)
return ' '.join(tgt_vocab.to_tokens(output_seq)), attention_weight_seq
def bleu(pred_seq, label_seq, k): #@save
"""่ฎก็ฎBLEU"""
pred_tokens, label_tokens = pred_seq.split(' '), label_seq.split(' ')
len_pred, len_label = len(pred_tokens), len(label_tokens)
score = math.exp(min(0, 1 - len_label / len_pred))
for n in range(1, k + 1):
num_matches, label_subs = 0, collections.defaultdict(int)
for i in range(len_label - n + 1):
label_subs[' '.join(label_tokens[i: i + n])] += 1
for i in range(len_pred - n + 1):
if label_subs[' '.join(pred_tokens[i: i + n])] > 0:
num_matches += 1
label_subs[' '.join(pred_tokens[i: i + n])] -= 1
score *= math.pow(num_matches / (len_pred - n + 1), math.pow(0.5, n))
return score
"""10.1"""
#@save
def show_heatmaps(matrices, xlabel, ylabel, titles=None, figsize=(2.5, 2.5),
cmap='Reds'):
"""ๆพ็คบ็ฉ้ต็ญๅพ"""
d2l.use_svg_display()
num_rows, num_cols = matrices.shape[0], matrices.shape[1]
fig, axes = d2l.plt.subplots(num_rows, num_cols, figsize=figsize,
sharex=True, sharey=True, squeeze=False)
for i, (row_axes, row_matrices) in enumerate(zip(axes, matrices)):
for j, (ax,matrix) in enumerate(zip(row_axes, row_matrices)):
pcm = ax.imshow(matrix.detach().numpy(), cmap=cmap)
if i == num_rows - 1:
ax.set_xlabel(xlabel)
if j == 0:
ax.set_ylabel(ylabel)
if titles:
ax.set_title(titles[j])
fig.colorbar(pcm, ax=axes, shrink=0.6);
"""10.3"""
#@save
def masked_softmax(X, valid_lens):
"""้่ฟๅจๆๅไธไธช่ฝดไธๆฉ่ฝๅ
็ด ๆฅๆง่กsoftmaxๆไฝ"""
# X:3Dๅผ ้๏ผvalid_lens:1Dๆ2Dๅผ ้
if valid_lens is None:
return nn.functional.softmax(X, axis=-1)
else:
shape = X.shape
if (valid_lens.dim() == 1) or (valid_lens.dim() == 2 and (valid_lens.shape)[1] == 1):
valid_lens = paddle.tile(valid_lens.reshape((valid_lens.shape[0], -1)), [shape[1]]).reshape((-1,))
else:
valid_lens = valid_lens.reshape((-1,))
# # ๆๅไธ่ฝดไธ่ขซๆฉ่ฝ็ๅ
็ด ไฝฟ็จไธไธช้ๅธธๅคง็่ดๅผๆฟๆข๏ผไป่ๅ
ถsoftmax่พๅบไธบ0
X = d2l.sequence_mask(X.reshape((-1, shape[-1])), valid_lens,
value=-1e6)
return nn.functional.softmax(X.reshape(shape), axis=-1)
#@save
class AdditiveAttention(nn.Layer):
"""ๅ ๆงๆณจๆๅ"""
def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs):
super(AdditiveAttention, self).__init__(**kwargs)
self.W_k = nn.Linear(key_size, num_hiddens, bias_attr=False)
self.W_q = nn.Linear(query_size, num_hiddens, bias_attr=False)
self.w_v = nn.Linear(num_hiddens, 1, bias_attr=False)
self.dropout = nn.Dropout(dropout)
def forward(self, queries, keys, values, valid_lens):
queries, keys = self.W_q(queries), self.W_k(keys)
# ๅจ็ปดๅบฆๆฉๅฑๅ๏ผ
# queries็ๅฝข็ถ๏ผ(batch_size๏ผๆฅ่ฏข็ไธชๆฐ๏ผ1๏ผnum_hidden)
# key็ๅฝข็ถ๏ผ(batch_size๏ผ1๏ผโ้ฎ๏ผๅผโๅฏน็ไธชๆฐ๏ผnum_hiddens)
# ไฝฟ็จๅนฟๆญๆนๅผ่ฟ่กๆฑๅ
features = queries.unsqueeze(2) + keys.unsqueeze(1)
features = paddle.tanh(features)
# self.w_vไป
ๆไธไธช่พๅบ๏ผๅ ๆญคไปๅฝข็ถไธญ็งป้คๆๅ้ฃไธช็ปดๅบฆใ
# scores็ๅฝข็ถ๏ผ(batch_size๏ผๆฅ่ฏข็ไธชๆฐ๏ผโ้ฎ-ๅผโๅฏน็ไธชๆฐ)
scores = self.w_v(features).squeeze(-1)
self.attention_weights = masked_softmax(scores, valid_lens)
# values็ๅฝข็ถ๏ผ(batch_size๏ผโ้ฎ๏ผๅผโๅฏน็ไธชๆฐ๏ผๅผ็็ปดๅบฆ)
return paddle.bmm(self.dropout(self.attention_weights), values)
#@save
class DotProductAttention(nn.Layer):
"""็ผฉๆพ็น็งฏๆณจๆๅ"""
def __init__(self, dropout, **kwargs):
super(DotProductAttention, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
# queries็ๅฝข็ถ๏ผ(batch_size๏ผๆฅ่ฏข็ไธชๆฐ๏ผd)
# keys็ๅฝข็ถ๏ผ(batch_size๏ผโ้ฎ๏ผๅผโๅฏน็ไธชๆฐ๏ผd)
# values็ๅฝข็ถ๏ผ(batch_size๏ผโ้ฎ๏ผๅผโๅฏน็ไธชๆฐ๏ผๅผ็็ปดๅบฆ)
# valid_lens็ๅฝข็ถ:(batch_size๏ผ)ๆ่
(batch_size๏ผๆฅ่ฏข็ไธชๆฐ)
def forward(self, queries, keys, values, valid_lens=None):
d = queries.shape[-1]
# ่ฎพ็ฝฎtranspose_b=Trueไธบไบไบคๆขkeys็ๆๅไธคไธช็ปดๅบฆ
scores = paddle.bmm(queries, keys.transpose((0, 2, 1))) / math.sqrt(d)
self.attention_weights = masked_softmax(scores, valid_lens)
return paddle.bmm(self.dropout(self.attention_weights), values)
"""10.4"""
#@save
class AttentionDecoder(d2l.Decoder):
"""ๅธฆๆๆณจๆๅๆบๅถ่งฃ็ ๅจ็ๅบๆฌๆฅๅฃ"""
def __init__(self, **kwargs):
super(AttentionDecoder, self).__init__(**kwargs)
@property
def attention_weights(self):
raise NotImplementedError
"""10.5"""
#@save
class MultiHeadAttention(nn.Layer):
def __init__(self, key_size, query_size, value_size, num_hiddens,
num_heads, dropout, bias=False, **kwargs):
super(MultiHeadAttention, self).__init__(**kwargs)
self.num_heads = num_heads
self.attention = d2l.DotProductAttention(dropout)
self.W_q = nn.Linear(query_size, num_hiddens, bias_attr=bias)
self.W_k = nn.Linear(key_size, num_hiddens, bias_attr=bias)
self.W_v = nn.Linear(value_size, num_hiddens, bias_attr=bias)
self.W_o = nn.Linear(num_hiddens, num_hiddens, bias_attr=bias)
def forward(self, queries, keys, values, valid_lens):
# queries๏ผkeys๏ผvalues็ๅฝข็ถ:
# (batch_size๏ผๆฅ่ฏขๆ่
โ้ฎ๏ผๅผโๅฏน็ไธชๆฐ๏ผnum_hiddens)
# valid_lensใ็ๅฝข็ถ:
# (batch_size๏ผ)ๆ(batch_size๏ผๆฅ่ฏข็ไธชๆฐ)
# ็ป่ฟๅๆขๅ๏ผ่พๅบ็queries๏ผkeys๏ผvaluesใ็ๅฝข็ถ:
# (batch_size*num_heads๏ผๆฅ่ฏขๆ่
โ้ฎ๏ผๅผโๅฏน็ไธชๆฐ๏ผ
# num_hiddens/num_heads)
queries = transpose_qkv(self.W_q(queries), self.num_heads)
keys = transpose_qkv(self.W_k(keys), self.num_heads)
values = transpose_qkv(self.W_v(values), self.num_heads)
if valid_lens is not None:
# ๅจ่ฝด0๏ผๅฐ็ฌฌไธ้กน๏ผๆ ้ๆ่
็ข้๏ผๅคๅถnum_headsๆฌก๏ผ
# ็ถๅๅฆๆญคๅคๅถ็ฌฌไบ้กน๏ผ็ถๅ่ฏธๅฆๆญค็ฑปใ
valid_lens_np = valid_lens.numpy()
valid_lens_np = np.repeat(valid_lens_np, self.num_heads, axis=0)
valid_lens = paddle.to_tensor(valid_lens_np)
# output็ๅฝข็ถ:(batch_size*num_heads๏ผๆฅ่ฏข็ไธชๆฐ๏ผ
# num_hiddens/num_heads)
output = self.attention(queries, keys, values, valid_lens)
# output_concat็ๅฝข็ถ:(batch_size๏ผๆฅ่ฏข็ไธชๆฐ๏ผnum_hiddens)
output_concat = transpose_output(output, self.num_heads)
return self.W_o(output_concat)
#@save
def transpose_qkv(X, num_heads):
"""ไธบไบๅคๆณจๆๅๅคด็ๅนถ่ก่ฎก็ฎ่ๅๆขๅฝข็ถ"""
# ่พๅ
ฅX็ๅฝข็ถ:(batch_size๏ผๆฅ่ฏขๆ่
โ้ฎ๏ผๅผโๅฏน็ไธชๆฐ๏ผnum_hiddens)
# ่พๅบX็ๅฝข็ถ:(batch_size๏ผๆฅ่ฏขๆ่
โ้ฎ๏ผๅผโๅฏน็ไธชๆฐ๏ผnum_heads๏ผ
# num_hiddens/num_heads)
X = X.reshape((X.shape[0], X.shape[1], num_heads, -1))
# ่พๅบX็ๅฝข็ถ:(batch_size๏ผnum_heads๏ผๆฅ่ฏขๆ่
โ้ฎ๏ผๅผโๅฏน็ไธชๆฐ,
# num_hiddens/num_heads)
X = X.transpose((0, 2, 1, 3))
# ๆ็ป่พๅบ็ๅฝข็ถ:(batch_size*num_heads,ๆฅ่ฏขๆ่
โ้ฎ๏ผๅผโๅฏน็ไธชๆฐ,
# num_hiddens/num_heads)
return X.reshape((-1, X.shape[2], X.shape[3]))
#@save
def transpose_output(X, num_heads):
"""้่ฝฌtranspose_qkvๅฝๆฐ็ๆไฝ"""
X = X.reshape((-1, num_heads, X.shape[1], X.shape[2]))
X = X.transpose((0, 2, 1, 3))
return X.reshape((X.shape[0], X.shape[1], -1))
"""10.6"""
#@save
class PositionalEncoding(nn.Layer):
"""ไฝ็ฝฎ็ผ็ """
def __init__(self, num_hiddens, dropout, max_len=1000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(dropout)
# ๅๅปบไธไธช่ถณๅค้ฟ็P
self.P = paddle.zeros((1, max_len, num_hiddens))
X = paddle.arange(max_len, dtype=paddle.float32).reshape(
(-1, 1)) / paddle.pow(paddle.to_tensor([10000.0]), paddle.arange(
0, num_hiddens, 2, dtype=paddle.float32) / num_hiddens)
self.P[:, :, 0::2] = paddle.sin(X)
self.P[:, :, 1::2] = paddle.cos(X)
def forward(self, X):
X = X + self.P[:, :X.shape[1], :]
return self.dropout(X)
"""10.7"""
#@save
class PositionWiseFFN(nn.Layer):
"""ๅบไบไฝ็ฝฎ็ๅ้ฆ็ฝ็ป"""
def __init__(self, ffn_num_input, ffn_num_hiddens, ffn_num_outputs,
**kwargs):
super(PositionWiseFFN, self).__init__(**kwargs)
self.dense1 = nn.Linear(ffn_num_input, ffn_num_hiddens)
self.relu = nn.ReLU()
self.dense2 = nn.Linear(ffn_num_hiddens, ffn_num_outputs)
def forward(self, X):
return self.dense2(self.relu(self.dense1(X)))
#@save
class AddNorm(nn.Layer):
"""ๆฎๅทฎ่ฟๆฅๅ่ฟ่กๅฑ่ง่ๅ"""
def __init__(self, normalized_shape, dropout, **kwargs):
super(AddNorm, self).__init__(**kwargs)
self.dropout = nn.Dropout(dropout)
self.ln = nn.LayerNorm(normalized_shape)
def forward(self, X, Y):
return self.ln(self.dropout(Y) + X)
#@save
class EncoderBlock(nn.Layer):
"""transformer็ผ็ ๅจๅ"""
def __init__(self, key_size, query_size, value_size, num_hiddens,
norm_shape, ffn_num_input, ffn_num_hiddens, num_heads,
dropout, use_bias=False, **kwargs):
super(EncoderBlock, self).__init__(**kwargs)
self.attention = d2l.MultiHeadAttention(
key_size, query_size, value_size, num_hiddens, num_heads, dropout,
use_bias)
self.addnorm1 = AddNorm(norm_shape, dropout)
self.ffn = PositionWiseFFN(
ffn_num_input, ffn_num_hiddens, num_hiddens)
self.addnorm2 = AddNorm(norm_shape, dropout)
def forward(self, X, valid_lens):
Y = self.addnorm1(X, self.attention(X, X, X, valid_lens))
return self.addnorm2(Y, self.ffn(Y))
#@save
class TransformerEncoder(d2l.Encoder):
"""transformer็ผ็ ๅจ"""
def __init__(self, vocab_size, key_size, query_size, value_size,
num_hiddens, norm_shape, ffn_num_input, ffn_num_hiddens,
num_heads, num_layers, dropout, use_bias=False, **kwargs):
super(TransformerEncoder, self).__init__(**kwargs)
self.num_hiddens = num_hiddens
self.embedding = nn.Embedding(vocab_size, num_hiddens)
self.pos_encoding = d2l.PositionalEncoding(num_hiddens, dropout)
self.blks = nn.Sequential()
for i in range(num_layers):
self.blks.add_sublayer(str(i),
EncoderBlock(key_size, query_size, value_size, num_hiddens,
norm_shape, ffn_num_input, ffn_num_hiddens,
num_heads, dropout, use_bias))
def forward(self, X, valid_lens, *args):
# ๅ ไธบไฝ็ฝฎ็ผ็ ๅผๅจ-1ๅ1ไน้ด๏ผ
# ๅ ๆญคๅตๅ
ฅๅผไนไปฅๅตๅ
ฅ็ปดๅบฆ็ๅนณๆนๆ น่ฟ่ก็ผฉๆพ๏ผ
# ็ถๅๅไธไฝ็ฝฎ็ผ็ ็ธๅ ใ
X = self.pos_encoding(self.embedding(X) * math.sqrt(self.num_hiddens))
self.attention_weights = [None] * len(self.blks)
for i, blk in enumerate(self.blks):
print(blk)
X = blk(X, valid_lens)
self.attention_weights[
i] = blk.attention.attention.attention_weights
return X
"""11.1"""
def annotate(text, xy, xytext): #@save
d2l.plt.gca().annotate(text, xy=xy, xytext=xytext,
arrowprops=dict(arrowstyle='->'))
"""11.3"""
def train_2d(trainer, steps=20, f_grad=None): # @save
"""็จๅฎๅถ็่ฎญ็ปๆบไผๅ2D็ฎๆ ๅฝๆฐ"""
# s1ๅs2ๆฏ็จๅๅฐไฝฟ็จ็ๅ
้จ็ถๆๅ้
x1, x2, s1, s2 = -5, -2, 0, 0
results = [(x1, x2)]
for i in range(steps):
if f_grad:
x1, x2, s1, s2 = trainer(x1, x2, s1, s2, f_grad)
else:
x1, x2, s1, s2 = trainer(x1, x2, s1, s2)
results.append((x1, x2))
print(f'epoch {i + 1}, x1: {float(x1):f}, x2: {float(x2):f}')
return results
def show_trace_2d(f, results): # @save
"""ๆพ็คบไผๅ่ฟ็จไธญ2Dๅ้็่ฝจ่ฟน"""
d2l.set_figsize()
d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e')
x1, x2 = paddle.meshgrid(
paddle.arange(-5.5, 1.0, 0.1, dtype='float32'), paddle.arange(-3.0, 1.0, 0.1, dtype='float32'))
d2l.plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')
d2l.plt.xlabel('x1')
d2l.plt.ylabel('x2')
"""11.5"""
#@save
d2l.DATA_HUB['airfoil'] = (d2l.DATA_URL + 'airfoil_self_noise.dat',
'76e5be1548fd8222e5074cf0faae75edff8cf93f')
#@save
def get_data_ch11(batch_size=10, n=1500):
data = np.genfromtxt(d2l.download('airfoil'),
dtype=np.float32, delimiter='\t')
data = d2l.tensor((data - data.mean(axis=0)) / data.std(axis=0))
data_iter = d2l.load_array((data[:n, :-1], data[:n, -1]),
batch_size, is_train=True)
return data_iter, data.shape[1]-1
#@save
def train_ch11(trainer_fn, states, hyperparams, data_iter,
feature_dim, num_epochs=2):
# ๅๅงๅๆจกๅ
w = d2l.tensor(d2l.normal(mean=0.0, std=0.01, shape=(feature_dim, 1),),stop_gradient=False)
b = d2l.tensor(d2l.zeros((1,)), stop_gradient=False)
net, loss = lambda X: d2l.linreg(X, w, b), d2l.squared_loss
# ่ฎญ็ปๆจกๅ
animator = d2l.Animator(xlabel='epoch', ylabel='loss',
xlim=[0, num_epochs], ylim=[0.22, 0.35])
n, timer = 0, d2l.Timer()
for _ in range(num_epochs):
for X, y in data_iter:
l = loss(net(X), y).mean()
l.backward()
w, b = trainer_fn([w, b], states, hyperparams)
n += X.shape[0]
if n % 200 == 0:
timer.stop()
animator.add(n/X.shape[0]/len(data_iter),
(d2l.evaluate_loss(net, data_iter, loss),))
timer.start()
print(f'loss: {animator.Y[0][-1]:.3f}, {timer.avg():.3f} sec/epoch')
return timer.cumsum(), animator.Y[0]
#@save
def train_concise_ch11(trainer_fn, hyperparams, data_iter, num_epochs=4):
# ๅๅงๅๆจกๅ
net = nn.Sequential(nn.Linear(5, 1))
def init_weights(m):
if type(m) == nn.Linear:
paddle.nn.initializer.Normal(m.weight, std=0.01)
net.apply(init_weights)
optimizer = trainer_fn(parameters=net.parameters(), **hyperparams)
loss = nn.MSELoss(reduction='none')
animator = d2l.Animator(xlabel='epoch', ylabel='loss',
xlim=[0, num_epochs], ylim=[0.22, 0.35])
n, timer = 0, d2l.Timer()
for _ in range(num_epochs):
for X, y in data_iter:
optimizer.clear_grad()
out = net(X)
y = y.reshape(out.shape)
l = loss(out, y)
l.mean().backward()
optimizer.step()
n += X.shape[0]
if n % 200 == 0:
timer.stop()
# MSELoss่ฎก็ฎๅนณๆน่ฏฏๅทฎๆถไธๅธฆ็ณปๆฐ1/2
animator.add(n/X.shape[0]/len(data_iter),
(d2l.evaluate_loss(net, data_iter, loss) / 2,))
timer.start()
print(f'loss: {animator.Y[0][-1]:.3f}, {timer.avg():.3f} sec/epoch')
"""12.1"""
#@save
class Benchmark:
"""็จไบๆต้่ฟ่กๆถ้ด"""
def __init__(self, description='Done'):
self.description = description
def __enter__(self):
self.timer = d2l.Timer()
return self
def __exit__(self, *args):
print(f'{self.description}: {self.timer.stop():.4f} sec')
'''12.5'''
# ๅฎไน้ฃๆกจๅๅๅฝๆฐ๏ผๅฐๆฐๆฎๅๅๅๅๅ็ปๆๆGPU
def paddlescatter(XY, devices):
xy = int(XY.shape[0]/len(devices)) # ๆ นๆฎGPUๆฐ็ฎ่ฎก็ฎๅๅๅคงๅฐ
return [paddle.to_tensor(XY[i * xy:(i + 1) * xy], place=device) for i, device in enumerate(devices)]
#@save
# ๅฐXๅyๆๅๅฐๅคไธช่ฎพๅคไธ
def split_batch(X, y, devices):
"""ๅฐXๅyๆๅๅฐๅคไธช่ฎพๅคไธ"""
assert X.shape[0] == y.shape[0]
return (paddlescatter(X, devices),
paddlescatter(y, devices))
'''12.6'''
#@save
def resnet18(num_classes, in_channels=1):
"""็จๅ ไฟฎๆน็ResNet-18ๆจกๅ"""
def resnet_block(in_channels, out_channels, num_residuals,
first_block=False):
blk = []
for i in range(num_residuals):
if i == 0 and not first_block:
blk.append(d2l.Residual(in_channels, out_channels,
use_1x1conv=True, strides=2))
else:
blk.append(d2l.Residual(out_channels, out_channels))
return nn.Sequential(*blk)
# ่ฏฅๆจกๅไฝฟ็จไบๆดๅฐ็ๅท็งฏๆ ธใๆญฅ้ฟๅๅกซๅ
๏ผ่ไธๅ ้คไบๆๅคงๆฑ่ๅฑ
net = nn.Sequential(
nn.Conv2D(in_channels, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2D(64),
nn.ReLU())
net.add_sublayer("resnet_block1", resnet_block(
64, 64, 2, first_block=True))
net.add_sublayer("resnet_block2", resnet_block(64, 128, 2))
net.add_sublayer("resnet_block3", resnet_block(128, 256, 2))
net.add_sublayer("resnet_block4", resnet_block(256, 512, 2))
net.add_sublayer("global_avg_pool", nn.AdaptiveAvgPool2D((1, 1)))
net.add_sublayer("fc", nn.Sequential(nn.Flatten(),
nn.Linear(512, num_classes)))
return net
"""13.1"""
def train_batch_ch13(net, X, y, loss, trainer, devices):
"""็จๅคGPU่ฟ่กๅฐๆน้่ฎญ็ป
Defined in :numref:`sec_image_augmentation`"""
if isinstance(X, list):
# ๅพฎ่ฐBERTไธญๆ้๏ผ็จๅ่ฎจ่ฎบ๏ผ
X = [paddle.to_tensor(x, place=devices[0]) for x in X]
else:
X = paddle.to_tensor(X, place=devices[0])
y = paddle.to_tensor(y, place=devices[0])
net.train()
trainer.clear_grad()
pred = net(X)
l = loss(pred, y)
l.sum().backward()
trainer.step()
train_loss_sum = l.sum()
train_acc_sum = d2l.accuracy(pred, y)
return train_loss_sum, train_acc_sum
def train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs,
devices=d2l.try_all_gpus()):
"""็จๅคGPU่ฟ่กๆจกๅ่ฎญ็ป
Defined in :numref:`sec_image_augmentation`"""
timer, num_batches = d2l.Timer(), len(train_iter)
animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs], ylim=[0, 1],
legend=['train loss', 'train acc', 'test acc'])
net = paddle.DataParallel(net)
for epoch in range(num_epochs):
# 4ไธช็ปดๅบฆ๏ผๅจๅญ่ฎญ็ปๆๅคฑ๏ผ่ฎญ็ปๅ็กฎๅบฆ๏ผๅฎไพๆฐ๏ผ็น็นๆฐ
metric = d2l.Accumulator(4)
for i, (features, labels) in enumerate(train_iter):
timer.start()
l, acc = train_batch_ch13(
net, features, labels, loss, trainer, devices)
metric.add(l, acc, labels.shape[0], labels.numel())
timer.stop()
if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
animator.add(epoch + (i + 1) / num_batches,
(metric[0] / metric[2], metric[1] / metric[3],
None))
test_acc = d2l.evaluate_accuracy_gpu(net, test_iter)
animator.add(epoch + 1, (None, None, test_acc))
print(f'loss {metric[0] / metric[2]:.3f}, train acc '
f'{metric[1] / metric[3]:.3f}, test acc {test_acc:.3f}')
print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec on '
f'{str(devices)}')
"""13.2"""
#@save
d2l.DATA_HUB['hotdog'] = (d2l.DATA_URL + 'hotdog.zip',
'fba480ffa8aa7e0febbb511d181409f899b9baa5')
"""13.3"""
#@save
def bbox_to_rect(bbox, color):
"""Defined in :numref:`sec_bbox`"""
# ๅฐ่พน็ๆก(ๅทฆไธx,ๅทฆไธy,ๅณไธx,ๅณไธy)ๆ ผๅผ่ฝฌๆขๆmatplotlibๆ ผๅผ๏ผ
# ((ๅทฆไธx,ๅทฆไธy),ๅฎฝ,้ซ)
return d2l.plt.Rectangle(
xy=(bbox[0], bbox[1]), width=bbox[2] - bbox[0], height=bbox[3] - bbox[1],
fill=False, edgecolor=color, linewidth=2)
#@save
def box_corner_to_center(boxes):
"""ไป๏ผๅทฆไธ๏ผๅณไธ๏ผ่ฝฌๆขๅฐ๏ผไธญ้ด๏ผๅฎฝๅบฆ๏ผ้ซๅบฆ๏ผ"""
x1, y1, x2, y2 = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
cx = (x1 + x2) / 2
cy = (y1 + y2) / 2
w = x2 - x1
h = y2 - y1
boxes = paddle.stack((cx, cy, w, h), axis=-1)
return boxes
#@save
def box_center_to_corner(boxes):
"""ไป๏ผไธญ้ด๏ผๅฎฝๅบฆ๏ผ้ซๅบฆ๏ผ่ฝฌๆขๅฐ๏ผๅทฆไธ๏ผๅณไธ๏ผ"""
cx, cy, w, h = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3]
x1 = cx - 0.5 * w
y1 = cy - 0.5 * h
x2 = cx + 0.5 * w
y2 = cy + 0.5 * h
boxes = paddle.stack((x1, y1, x2, y2), axis=-1)
return boxes
"""13.4"""
#@save
def multibox_prior(data, sizes, ratios):
"""็ๆไปฅๆฏไธชๅ็ด ไธบไธญๅฟๅ
ทๆไธๅๅฝข็ถ็้ๆก
Defined in :numref:`sec_anchor`"""
in_height, in_width = data.shape[-2:]
device, num_sizes, num_ratios = data.place, len(sizes), len(ratios)
boxes_per_pixel = (num_sizes + num_ratios - 1)
size_tensor = d2l.tensor(sizes, place=device)
ratio_tensor = d2l.tensor(ratios, place=device)
# ไธบไบๅฐ้็น็งปๅจๅฐๅ็ด ็ไธญๅฟ๏ผ้่ฆ่ฎพ็ฝฎๅ็งป้ใ
# ๅ ไธบไธไธชๅ็ด ็็้ซไธบ1ไธๅฎฝไธบ1๏ผๆไปฌ้ๆฉๅ็งปๆไปฌ็ไธญๅฟ0.5
offset_h, offset_w = 0.5, 0.5
steps_h = 1.0 / in_height # ๅจy่ฝดไธ็ผฉๆพๆญฅ้ฟ
steps_w = 1.0 / in_width # ๅจx่ฝดไธ็ผฉๆพๆญฅ้ฟ
# ็ๆ้ๆก็ๆๆไธญๅฟ็น
center_h = (paddle.arange(in_height) + offset_h) * steps_h
center_w = (paddle.arange(in_width) + offset_w) * steps_w
shift_y, shift_x = paddle.meshgrid(center_h, center_w)
shift_y, shift_x = paddle.reshape(shift_y, [-1]), paddle.reshape(shift_x, [-1])
# ็ๆโboxes_per_pixelโไธช้ซๅๅฎฝ๏ผ
# ไนๅ็จไบๅๅปบ้ๆก็ๅ่งๅๆ (xmin,xmax,ymin,ymax)
w = paddle.concat((size_tensor * paddle.sqrt(ratio_tensor[0]),
sizes[0] * paddle.sqrt(ratio_tensor[1:]))) \
* in_height / in_width # ๅค็็ฉๅฝข่พๅ
ฅ
h = paddle.concat((size_tensor / paddle.sqrt(ratio_tensor[0]),
sizes[0] / paddle.sqrt(ratio_tensor[1:])))
# ้คไปฅ2ๆฅ่ทๅพๅ้ซๅๅๅฎฝ
anchor_manipulations = paddle.tile(paddle.stack((-w, -h, w, h)).T,
repeat_times=[in_height * in_width, 1]) / 2
# ๆฏไธชไธญๅฟ็น้ฝๅฐๆโboxes_per_pixelโไธช้ๆก๏ผ
# ๆไปฅ็ๆๅซๆๆ้ๆกไธญๅฟ็็ฝๆ ผ๏ผ้ๅคไบโboxes_per_pixelโๆฌก
out_grid = paddle.stack([shift_x, shift_y, shift_x, shift_y], axis=1)
out_grid = paddle.tile(out_grid, repeat_times=[boxes_per_pixel]).reshape((-1, out_grid.shape[1]))
output = out_grid + anchor_manipulations
return output.unsqueeze(0)
def show_bboxes(axes, bboxes, labels=None, colors=None):
"""ๆพ็คบๆๆ่พน็ๆก
Defined in :numref:`sec_anchor`"""
def _make_list(obj, default_values=None):
if obj is None:
obj = default_values
elif not isinstance(obj, (list, tuple)):
obj = [obj]
return obj
labels = _make_list(labels)
colors = _make_list(colors, ['b', 'g', 'r', 'm', 'c'])
for i, bbox in enumerate(bboxes):
color = colors[i % len(colors)]
rect = d2l.bbox_to_rect(d2l.numpy(bbox), color)
axes.add_patch(rect)
if labels and len(labels) > i:
text_color = 'k' if color == 'w' else 'w'
axes.text(rect.xy[0], rect.xy[1], labels[i],
va='center', ha='center', fontsize=9, color=text_color,
bbox=dict(facecolor=color, lw=0))
#@save
def box_iou(boxes1, boxes2):
"""่ฎก็ฎไธคไธช้ๆกๆ่พน็ๆกๅ่กจไธญๆๅฏน็ไบคๅนถๆฏ"""
box_area = lambda boxes: ((boxes[:, 2] - boxes[:, 0]) *
(boxes[:, 3] - boxes[:, 1]))
# boxes1,boxes2,areas1,areas2็ๅฝข็ถ:
# boxes1๏ผ(boxes1็ๆฐ้,4),
# boxes2๏ผ(boxes2็ๆฐ้,4),
# areas1๏ผ(boxes1็ๆฐ้,),
# areas2๏ผ(boxes2็ๆฐ้,)
areas1 = box_area(boxes1)
areas2 = box_area(boxes2)
# inter_upperlefts,inter_lowerrights,inters็ๅฝข็ถ:
# (boxes1็ๆฐ้,boxes2็ๆฐ้,2)
inter_upperlefts = paddle.maximum(boxes1[:, None, :2], boxes2[:, :2])
inter_lowerrights = paddle.minimum(boxes1[:, None, 2:], boxes2[:, 2:])
inters = (inter_lowerrights - inter_upperlefts).clip(min=0)
# inter_areasandunion_areas็ๅฝข็ถ:(boxes1็ๆฐ้,boxes2็ๆฐ้)
inter_areas = inters[:, :, 0] * inters[:, :, 1]
union_areas = areas1[:, None] + areas2 - inter_areas
return inter_areas / union_areas
#@save
def assign_anchor_to_bbox(ground_truth, anchors, iou_threshold=0.5):
"""ๅฐๆๆฅ่ฟ็็ๅฎ่พน็ๆกๅ้
็ป้ๆก"""
num_anchors, num_gt_boxes = anchors.shape[0], ground_truth.shape[0]
# ไฝไบ็ฌฌi่กๅ็ฌฌjๅ็ๅ
็ด x_ijๆฏ้ๆกiๅ็ๅฎ่พน็ๆกj็IoU
jaccard = box_iou(anchors, ground_truth)
# ๅฏนไบๆฏไธช้ๆก๏ผๅ้
็็ๅฎ่พน็ๆก็ๅผ ้
anchors_bbox_map = paddle.full((num_anchors,), -1, dtype=paddle.int64)
# ๆ นๆฎ้ๅผ๏ผๅณๅฎๆฏๅฆๅ้
็ๅฎ่พน็ๆก
indices = paddle.argmax(jaccard, axis=1)
max_ious = paddle.max(jaccard, axis=1)
anc_i = paddle.nonzero(max_ious >= 0.5).reshape([-1])
box_j = indices[max_ious >= 0.5]
anchors_bbox_map[anc_i] = box_j
col_discard = paddle.full((num_anchors,), -1)
row_discard = paddle.full((num_gt_boxes,), -1)
for _ in range(num_gt_boxes):
max_idx = paddle.argmax(jaccard)
box_idx = (max_idx % num_gt_boxes).astype(paddle.int64)
anc_idx = (max_idx / num_gt_boxes).astype(paddle.int64)
anchors_bbox_map[anc_idx] = box_idx
jaccard[:, box_idx] = col_discard
jaccard[anc_idx, :] = row_discard
return anchors_bbox_map
#@save
def offset_boxes(anchors, assigned_bb, eps=1e-6):
"""ๅฏน้ๆกๅ็งป้็่ฝฌๆข"""
c_anc = d2l.box_corner_to_center(anchors)
c_assigned_bb = d2l.box_corner_to_center(assigned_bb)
offset_xy = 10 * (c_assigned_bb[:, :2] - c_anc[:, :2]) / c_anc[:, 2:]
offset_wh = 5 * paddle.log(eps + c_assigned_bb[:, 2:] / c_anc[:, 2:])
offset = paddle.concat([offset_xy, offset_wh], axis=1)
return offset
#@save
def multibox_target(anchors, labels):
"""ไฝฟ็จ็ๅฎ่พน็ๆกๆ ่ฎฐ้ๆก"""
batch_size, anchors = labels.shape[0], anchors.squeeze(0)
batch_offset, batch_mask, batch_class_labels = [], [], []
num_anchors = anchors.shape[0]
for i in range(batch_size):
label = labels[i, :, :]
anchors_bbox_map = assign_anchor_to_bbox(
label[:, 1:], anchors)
bbox_mask = ((anchors_bbox_map >= 0).astype(paddle.float32).unsqueeze(-1)).tile(
[1, 4])
# ๅฐ็ฑปๆ ็ญพๅๅ้
็่พน็ๆกๅๆ ๅๅงๅไธบ้ถ
class_labels = paddle.zeros([num_anchors], dtype=paddle.int64)
assigned_bb = paddle.zeros((num_anchors, 4), dtype=paddle.float32)
# ไฝฟ็จ็ๅฎ่พน็ๆกๆฅๆ ่ฎฐ้ๆก็็ฑปๅซใ
# ๅฆๆไธไธช้ๆกๆฒกๆ่ขซๅ้
๏ผๆไปฌๆ ่ฎฐๅ
ถไธบ่ๆฏ๏ผๅผไธบ้ถ๏ผ
indices_true = paddle.nonzero(anchors_bbox_map >= 0)
bb_idx = anchors_bbox_map[indices_true]
class_labels[indices_true] = label[:, 0][bb_idx].astype(paddle.int64) + 1
assigned_bb[indices_true] = label[:, 1:][bb_idx]
# ๅ็งป้่ฝฌๆข
offset = offset_boxes(anchors, assigned_bb) * bbox_mask
batch_offset.append(offset.reshape([-1]))
batch_mask.append(bbox_mask.reshape([-1]))
batch_class_labels.append(class_labels)
bbox_offset = paddle.stack(batch_offset)
bbox_mask = paddle.stack(batch_mask)
class_labels = paddle.stack(batch_class_labels)
return (bbox_offset, bbox_mask, class_labels)
#@save
def offset_inverse(anchors, offset_preds):
"""ๆ นๆฎๅธฆๆ้ขๆตๅ็งป้็้ๆกๆฅ้ขๆต่พน็ๆก"""
anc = d2l.box_corner_to_center(anchors)
pred_bbox_xy = (offset_preds[:, :2] * anc[:, 2:] / 10) + anc[:, :2]
pred_bbox_wh = paddle.exp(offset_preds[:, 2:] / 5) * anc[:, 2:]
pred_bbox = paddle.concat((pred_bbox_xy, pred_bbox_wh), axis=1)
predicted_bbox = d2l.box_center_to_corner(pred_bbox)
return predicted_bbox
#@save
def nms(boxes, scores, iou_threshold):
"""ๅฏน้ขๆต่พน็ๆก็็ฝฎไฟกๅบฆ่ฟ่กๆๅบ"""
B = paddle.argsort(scores, axis=-1, descending=True)
keep = [] # ไฟ็้ขๆต่พน็ๆก็ๆๆ
while B.numel() > 0:
i = B.numpy()[0]
keep.append(i)
if B.numel() == 1: break
iou = box_iou(boxes[i].reshape([-1, 4]),
boxes[B[1:]].reshape([-1, 4])).reshape([-1])
inds = paddle.nonzero(iou <= iou_threshold).reshape([-1])
B = B[inds + 1]
return paddle.to_tensor(keep)
#@save
def multibox_detection(cls_probs, offset_preds, anchors, nms_threshold=0.5,
pos_threshold=0.009999999):
"""ไฝฟ็จ้ๆๅคงๅผๆๅถๆฅ้ขๆต่พน็ๆก"""
place, batch_size = cls_probs.place, cls_probs.shape[0]
anchors = anchors.squeeze(0)
num_classes, num_anchors = cls_probs.shape[1], cls_probs.shape[2]
out = []
for i in range(batch_size):
cls_prob, offset_pred = cls_probs[i], offset_preds[i].reshape([-1, 4])
conf = paddle.max(cls_prob[1:], 0)
class_id = paddle.argmax(cls_prob[1:], 0).numpy()
predicted_bb = offset_inverse(anchors, offset_pred)
keep = nms(predicted_bb, conf, nms_threshold)
# ๆพๅฐๆๆ็non_keep็ดขๅผ๏ผๅนถๅฐ็ฑป่ฎพ็ฝฎไธบ่ๆฏ
all_idx = paddle.arange(num_anchors, dtype='int64')
combined = paddle.concat((keep, all_idx))
uniques, counts = combined.unique(return_counts=True)
non_keep = uniques[counts == 1]
all_id_sorted = paddle.concat([keep, non_keep])
class_id[non_keep] = -1
class_id = class_id[all_id_sorted]
conf, predicted_bb = conf[all_id_sorted], predicted_bb[all_id_sorted]
# pos_thresholdๆฏไธไธช็จไบ้่ๆฏ้ขๆต็้ๅผ
below_min_idx = (conf < pos_threshold)
conf = conf.numpy()
class_id[below_min_idx.numpy()] = -1
conf[below_min_idx.numpy()] = 1 - conf[below_min_idx.numpy()]
pred_info = paddle.concat((paddle.to_tensor(class_id, dtype='float32').unsqueeze(1),
paddle.to_tensor(conf, dtype='float32').unsqueeze(1),
predicted_bb), axis=1)
out.append(pred_info)
return paddle.stack(out)
"""13.6"""
#@save
d2l.DATA_HUB['banana-detection'] = (
d2l.DATA_URL + 'banana-detection.zip',
'5de26c8fce5ccdea9f91267273464dc968d20d72')
#@save
def read_data_bananas(is_train=True):
"""่ฏปๅ้ฆ่ๆฃๆตๆฐๆฎ้ไธญ็ๅพๅๅๆ ็ญพ"""
data_dir = d2l.download_extract('banana-detection')
csv_fname = os.path.join(data_dir, 'bananas_train' if is_train
else 'bananas_val', 'label.csv')
csv_data = pd.read_csv(csv_fname)
csv_data = csv_data.set_index('img_name')
images, targets = [], []
for img_name, target in csv_data.iterrows():
paddle.vision.set_image_backend('cv2')
images.append(paddlevision.image_load(os.path.join(data_dir, 'bananas_train' if is_train else
'bananas_val', 'images', f'{img_name}'))[..., ::-1])
# ่ฟ้็targetๅ
ๅซ๏ผ็ฑปๅซ๏ผๅทฆไธ่งx๏ผๅทฆไธ่งy๏ผๅณไธ่งx๏ผๅณไธ่งy๏ผ
# ๅ
ถไธญๆๆๅพๅ้ฝๅ
ทๆ็ธๅ็้ฆ่็ฑป๏ผ็ดขๅผไธบ0๏ผ
targets.append(list(target))
return images, paddle.to_tensor(targets).unsqueeze(1) / 256
#@save
class BananasDataset(paddle.io.Dataset):
"""ไธไธช็จไบๅ ่ฝฝ้ฆ่ๆฃๆตๆฐๆฎ้็่ชๅฎไนๆฐๆฎ้"""
def __init__(self, is_train):
self.features, self.labels = read_data_bananas(is_train)
print('read ' + str(len(self.features)) + (f' training examples' if
is_train else f' validation examples'))
def __getitem__(self, idx):
return (paddle.to_tensor(self.features[idx], dtype='float32').transpose([2, 0, 1]), self.labels[idx])
def __len__(self):
return len(self.features)
#@save
def load_data_bananas(batch_size):
"""ๅ ่ฝฝ้ฆ่ๆฃๆตๆฐๆฎ้"""
train_iter = paddle.io.DataLoader(BananasDataset(is_train=True),
batch_size=batch_size, shuffle=True)
val_iter = paddle.io.DataLoader(BananasDataset(is_train=False),
batch_size=batch_size)
return train_iter, val_iter
"""13.9"""
d2l.DATA_HUB['voc2012'] = (d2l.DATA_URL + 'VOCtrainval_11-May-2012.tar',
'4e443f8a2eca6b1dac8a6c57641b67dd40621a49')
def read_voc_images(voc_dir, is_train=True):
"""่ฏปๅๆๆVOCๅพๅๅนถๆ ๆณจ
Defined in :numref:`sec_semantic_segmentation`"""
txt_fname = os.path.join(voc_dir, 'ImageSets', 'Segmentation',
'train.txt' if is_train else 'val.txt')
with open(txt_fname, 'r') as f:
images = f.read().split()
features, labels = [], []
for i, fname in enumerate(images):
features.append(paddle.to_tensor(paddle.vision.image.image_load(os.path.join(
voc_dir, 'JPEGImages', f'{fname}.jpg'), backend='cv2')[..., ::-1], dtype=paddle.float32).transpose(
[2, 0, 1]))
labels.append(paddle.to_tensor(paddle.vision.image.image_load(os.path.join(
voc_dir, 'SegmentationClass', f'{fname}.png'), backend='cv2')[..., ::-1], dtype=paddle.float32).transpose(
[2, 0, 1]))
return features, labels
VOC_COLORMAP = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0],
[0, 0, 128], [128, 0, 128], [0, 128, 128], [128, 128, 128],
[64, 0, 0], [192, 0, 0], [64, 128, 0], [192, 128, 0],
[64, 0, 128], [192, 0, 128], [64, 128, 128], [192, 128, 128],
[0, 64, 0], [128, 64, 0], [0, 192, 0], [128, 192, 0],
[0, 64, 128]]
VOC_CLASSES = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'potted plant', 'sheep', 'sofa', 'train', 'tv/monitor']
def voc_colormap2label():
"""ๆๅปบไปRGBๅฐVOC็ฑปๅซ็ดขๅผ็ๆ ๅฐ
Defined in :numref:`sec_semantic_segmentation`"""
colormap2label = paddle.zeros([256 ** 3], dtype=paddle.int64)
for i, colormap in enumerate(VOC_COLORMAP):
colormap2label[
(colormap[0] * 256 + colormap[1]) * 256 + colormap[2]] = i
return colormap2label
def voc_label_indices(colormap, colormap2label):
"""ๅฐVOCๆ ็ญพไธญ็RGBๅผๆ ๅฐๅฐๅฎไปฌ็็ฑปๅซ็ดขๅผ
Defined in :numref:`sec_semantic_segmentation`"""
colormap = colormap.transpose([1, 2, 0]).astype('int32')
idx = ((colormap[:, :, 0] * 256 + colormap[:, :, 1]) * 256
+ colormap[:, :, 2])
return colormap2label[idx]
def voc_rand_crop(feature, label, height, width):
"""้ๆบ่ฃๅช็นๅพๅๆ ็ญพๅพๅ
Defined in :numref:`sec_semantic_segmentation`"""
rect = paddle.vision.transforms.RandomCrop((height, width))._get_param(
img=feature, output_size=(height, width))
feature = paddle.vision.transforms.crop(feature, *rect)
label = paddle.vision.transforms.crop(label, *rect)
return feature, label
class VOCSegDataset(paddle.io.Dataset):
"""ไธไธช็จไบๅ ่ฝฝVOCๆฐๆฎ้็่ชๅฎไนๆฐๆฎ้
Defined in :numref:`sec_semantic_segmentation`"""
def __init__(self, is_train, crop_size, voc_dir):
self.transform = paddle.vision.transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.crop_size = crop_size
features, labels = read_voc_images(voc_dir, is_train=is_train)
self.features = [self.normalize_image(feature)
for feature in self.filter(features)]
self.labels = self.filter(labels)
self.colormap2label = voc_colormap2label()
print('read ' + str(len(self.features)) + ' examples')
def normalize_image(self, img):
return self.transform(img.astype(paddle.float32) / 255)
def filter(self, imgs):
return [img for img in imgs if (
img.shape[1] >= self.crop_size[0] and
img.shape[2] >= self.crop_size[1])]
def __getitem__(self, idx):
feature, label = voc_rand_crop(self.features[idx], self.labels[idx],
*self.crop_size)
return (feature, voc_label_indices(label, self.colormap2label))
def __len__(self):
return len(self.features)
def load_data_voc(batch_size, crop_size):
"""ๅ ่ฝฝVOC่ฏญไนๅๅฒๆฐๆฎ้
Defined in :numref:`sec_semantic_segmentation`"""
voc_dir = d2l.download_extract('voc2012', os.path.join(
'VOCdevkit', 'VOC2012'))
num_workers = d2l.get_dataloader_workers()
train_iter = paddle.io.DataLoader(
VOCSegDataset(True, crop_size, voc_dir), batch_size=batch_size,
shuffle=True, drop_last=True, num_workers=num_workers)
test_iter = paddle.io.DataLoader(
VOCSegDataset(False, crop_size, voc_dir), batch_size=batch_size,
drop_last=True, num_workers=num_workers)
return train_iter, test_iter
"""13.13"""
d2l.DATA_HUB['cifar10_tiny'] = (d2l.DATA_URL + 'kaggle_cifar10_tiny.zip',
'2068874e4b9a9f0fb07ebe0ad2b29754449ccacd')
def read_csv_labels(fname):
"""่ฏปๅfnameๆฅ็ปๆ ็ญพๅญๅ
ธ่ฟๅไธไธชๆไปถๅ"""
with open(fname, 'r') as f:
# ่ทณ่ฟๆไปถๅคด่ก(ๅๅ)
lines = f.readlines()[1:]
tokens = [l.rstrip().split(',') for l in lines]
return dict(((name, label) for name, label in tokens))
def copyfile(filename, target_dir):
"""ๅฐๆไปถๅคๅถๅฐ็ฎๆ ็ฎๅฝ"""
os.makedirs(target_dir, exist_ok=True)
shutil.copy(filename, target_dir)
def reorg_train_valid(data_dir, labels, valid_ratio):
"""ๅฐ้ช่ฏ้ไปๅๅง็่ฎญ็ป้ไธญๆๅๅบๆฅ"""
# ่ฎญ็ปๆฐๆฎ้ไธญๆ ทๆฌๆๅฐ็็ฑปๅซไธญ็ๆ ทๆฌๆฐ
n = collections.Counter(labels.values()).most_common()[-1][1]
# ้ช่ฏ้ไธญๆฏไธช็ฑปๅซ็ๆ ทๆฌๆฐ
n_valid_per_label = max(1, math.floor(n * valid_ratio))
label_count = {}
for train_file in os.listdir(os.path.join(data_dir, 'train')):
label = labels[train_file.split('.')[0]]
fname = os.path.join(data_dir, 'train', train_file)
copyfile(fname, os.path.join(data_dir, 'train_valid_test',
'train_valid', label))
if label not in label_count or label_count[label] < n_valid_per_label:
copyfile(fname, os.path.join(data_dir, 'train_valid_test',
'valid', label))
label_count[label] = label_count.get(label, 0) + 1
else:
copyfile(fname, os.path.join(data_dir, 'train_valid_test',
'train', label))
return n_valid_per_label
def reorg_test(data_dir):
"""ๅจ้ขๆตๆ้ดๆด็ๆต่ฏ้๏ผไปฅๆนไพฟ่ฏปๅ"""
for test_file in os.listdir(os.path.join(data_dir, 'test')):
copyfile(os.path.join(data_dir, 'test', test_file),
os.path.join(data_dir, 'train_valid_test', 'test',
'unknown'))
"""13.14"""
d2l.DATA_HUB['dog_tiny'] = (d2l.DATA_URL + 'kaggle_dog_tiny.zip',
'0cb91d09b814ecdc07b50f31f8dcad3e81d6a86d')
"""14.3"""
d2l.DATA_HUB['ptb'] = (d2l.DATA_URL + 'ptb.zip',
'319d85e578af0cdc590547f26231e4e31cdf1e42')
def read_ptb():
"""ๅฐPTBๆฐๆฎ้ๅ ่ฝฝๅฐๆๆฌ่ก็ๅ่กจไธญ"""
data_dir = d2l.download_extract('ptb')
# Readthetrainingset.
with open(os.path.join(data_dir, 'ptb.train.txt')) as f:
raw_text = f.read()
return [line.split() for line in raw_text.split('\n')]
def subsample(sentences, vocab):
"""ไธ้ๆ ท้ซ้ข่ฏ"""
# ๆ้คๆช็ฅ่ฏๅ
'<unk>'
sentences = [[token for token in line if vocab[token] != vocab.unk]
for line in sentences]
counter = d2l.count_corpus(sentences)
num_tokens = sum(counter.values())
# ๅฆๆๅจไธ้ๆ ทๆ้ดไฟ็่ฏๅ
๏ผๅ่ฟๅTrue
def keep(token):
return(random.uniform(0, 1) <
math.sqrt(1e-4 / counter[token] * num_tokens))
return ([[token for token in line if keep(token)] for line in sentences],
counter)
def get_centers_and_contexts(corpus, max_window_size):
"""่ฟๅ่ทณๅ
ๆจกๅไธญ็ไธญๅฟ่ฏๅไธไธๆ่ฏ"""
centers, contexts = [], []
for line in corpus:
# ่ฆๅฝขๆโไธญๅฟ่ฏ-ไธไธๆ่ฏโๅฏน๏ผๆฏไธชๅฅๅญ่ณๅฐ้่ฆๆ2ไธช่ฏ
if len(line) < 2:
continue
centers += line
for i in range(len(line)): # ไธไธๆ็ชๅฃไธญ้ดi
window_size = random.randint(1, max_window_size)
indices = list(range(max(0, i - window_size),
min(len(line), i + 1 + window_size)))
# ไปไธไธๆ่ฏไธญๆ้คไธญๅฟ่ฏ
indices.remove(i)
contexts.append([line[idx] for idx in indices])
return centers, contexts
class RandomGenerator:
"""ๆ นๆฎnไธช้ๆ ทๆ้ๅจ{1,...,n}ไธญ้ๆบๆฝๅ"""
def __init__(self, sampling_weights):
# Exclude
self.population = list(range(1, len(sampling_weights) + 1))
self.sampling_weights = sampling_weights
self.candidates = []
self.i = 0
def draw(self):
if self.i == len(self.candidates):
# ็ผๅญkไธช้ๆบ้ๆ ท็ปๆ
self.candidates = random.choices(
self.population, self.sampling_weights, k=10000)
self.i = 0
self.i += 1
return self.candidates[self.i - 1]
def get_negatives(all_contexts, vocab, counter, K):
"""่ฟๅ่ด้ๆ ทไธญ็ๅชๅฃฐ่ฏ"""
# ็ดขๅผไธบ1ใ2ใ...๏ผ็ดขๅผ0ๆฏ่ฏ่กจไธญๆ้ค็ๆช็ฅๆ ่ฎฐ๏ผ
sampling_weights = [counter[vocab.to_tokens(i)]**0.75
for i in range(1, len(vocab))]
all_negatives, generator = [], RandomGenerator(sampling_weights)
for contexts in all_contexts:
negatives = []
while len(negatives) < len(contexts) * K:
neg = generator.draw()
# ๅชๅฃฐ่ฏไธ่ฝๆฏไธไธๆ่ฏ
if neg not in contexts:
negatives.append(neg)
all_negatives.append(negatives)
return all_negatives
def batchify(data):
"""่ฟๅๅธฆๆ่ด้ๆ ท็่ทณๅ
ๆจกๅ็ๅฐๆน้ๆ ทๆฌ"""
max_len = max(len(c) + len(n) for _, c, n in data)
centers, contexts_negatives, masks, labels = [], [], [], []
for center, context, negative in data:
cur_len = len(context) + len(negative)
centers += [center]
contexts_negatives += \
[context + negative + [0] * (max_len - cur_len)]
masks += [[1] * cur_len + [0] * (max_len - cur_len)]
labels += [[1] * len(context) + [0] * (max_len - len(context))]
return (paddle.to_tensor(centers).reshape((-1, 1)), paddle.to_tensor(
contexts_negatives), paddle.to_tensor(masks), paddle.to_tensor(labels))
def load_data_ptb(batch_size, max_window_size, num_noise_words):
"""ไธ่ฝฝPTBๆฐๆฎ้๏ผ็ถๅๅฐๅ
ถๅ ่ฝฝๅฐๅ
ๅญไธญ"""
num_workers = d2l.get_dataloader_workers()
sentences = read_ptb()
vocab = d2l.Vocab(sentences, min_freq=10)
subsampled, counter = subsample(sentences, vocab)
corpus = [vocab[line] for line in subsampled]
all_centers, all_contexts = get_centers_and_contexts(
corpus, max_window_size)
all_negatives = get_negatives(
all_contexts, vocab, counter, num_noise_words)
class PTBDataset(paddle.io.Dataset):
def __init__(self, centers, contexts, negatives):
assert len(centers) == len(contexts) == len(negatives)
self.centers = centers
self.contexts = contexts
self.negatives = negatives
def __getitem__(self, index):
return (self.centers[index], self.contexts[index],
self.negatives[index])
def __len__(self):
return len(self.centers)
dataset = PTBDataset(all_centers, all_contexts, all_negatives)
data_iter = paddle.io.DataLoader(
dataset, batch_size=batch_size, shuffle=True,
collate_fn=batchify, num_workers=num_workers)
return data_iter, vocab
generator = RandomGenerator([2, 3, 4])
[generator.draw() for _ in range(10)]
"""14.7"""
d2l.DATA_HUB['glove.6b.50d'] = (d2l.DATA_URL + 'glove.6B.50d.zip',
'0b8703943ccdb6eb788e6f091b8946e82231bc4d')
d2l.DATA_HUB['glove.6b.100d'] = (d2l.DATA_URL + 'glove.6B.100d.zip',
'cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a')
d2l.DATA_HUB['glove.42b.300d'] = (d2l.DATA_URL + 'glove.42B.300d.zip',
'b5116e234e9eb9076672cfeabf5469f3eec904fa')
d2l.DATA_HUB['wiki.en'] = (d2l.DATA_URL + 'wiki.en.zip',
'c1816da3821ae9f43899be655002f6c723e91b88')
class TokenEmbedding:
"""GloVeๅตๅ
ฅ"""
def __init__(self, embedding_name):
"""Defined in :numref:`sec_synonyms`"""
self.idx_to_token, self.idx_to_vec = self._load_embedding(
embedding_name)
self.unknown_idx = 0
self.token_to_idx = {token: idx for idx, token in
enumerate(self.idx_to_token)}
def _load_embedding(self, embedding_name):
idx_to_token, idx_to_vec = ['<unk>'], []
data_dir = d2l.download_extract(embedding_name)
# GloVe็ฝ็ซ๏ผhttps://nlp.stanford.edu/projects/glove/
# fastText็ฝ็ซ๏ผhttps://fasttext.cc/
with open(os.path.join(data_dir, 'vec.txt'), 'r') as f:
for line in f:
elems = line.rstrip().split(' ')
token, elems = elems[0], [float(elem) for elem in elems[1:]]
# ่ทณ่ฟๆ ้ขไฟกๆฏ๏ผไพๅฆfastTextไธญ็้ฆ่ก
if len(elems) > 1:
idx_to_token.append(token)
idx_to_vec.append(elems)
idx_to_vec = [[0] * len(idx_to_vec[0])] + idx_to_vec
return idx_to_token, d2l.tensor(idx_to_vec)
def __getitem__(self, tokens):
indices = [self.token_to_idx.get(token, self.unknown_idx)
for token in tokens]
vecs = self.idx_to_vec[d2l.tensor(indices)]
return vecs
def __len__(self):
return len(self.idx_to_token)
"""14.8"""
def get_tokens_and_segments(tokens_a, tokens_b=None):
"""่ทๅ่พๅ
ฅๅบๅ็่ฏๅ
ๅๅ
ถ็ๆฎต็ดขๅผ"""
tokens = ['<cls>'] + tokens_a + ['<sep>']
# 0ๅ1ๅๅซๆ ่ฎฐ็ๆฎตAๅB
segments = [0] * (len(tokens_a) + 2)
if tokens_b is not None:
tokens += tokens_b + ['<sep>']
segments += [1] * (len(tokens_b) + 1)
return tokens, segments
#@save
class BERTEncoder(nn.Layer):
"""BERT็ผ็ ๅจ"""
def __init__(self, vocab_size, num_hiddens, norm_shape, ffn_num_input,
ffn_num_hiddens, num_heads, num_layers, dropout,
max_len=1000, key_size=768, query_size=768, value_size=768,
**kwargs):
super(BERTEncoder, self).__init__(**kwargs)
self.token_embedding = nn.Embedding(vocab_size, num_hiddens)
self.segment_embedding = nn.Embedding(2, num_hiddens)
self.blks = nn.Sequential()
for i in range(num_layers):
self.blks.add_sublayer(f"{i}", d2l.EncoderBlock(
key_size, query_size, value_size, num_hiddens, norm_shape,
ffn_num_input, ffn_num_hiddens, num_heads, dropout, True))
# ๅจBERTไธญ๏ผไฝ็ฝฎๅตๅ
ฅๆฏๅฏๅญฆไน ็๏ผๅ ๆญคๆไปฌๅๅปบไธไธช่ถณๅค้ฟ็ไฝ็ฝฎๅตๅ
ฅๅๆฐ
x = paddle.randn([1, max_len, num_hiddens])
self.pos_embedding = paddle.create_parameter(shape=x.shape, dtype=str(x.numpy().dtype),
default_initializer=paddle.nn.initializer.Assign(x))
def forward(self, tokens, segments, valid_lens):
# ๅจไปฅไธไปฃ็ ๆฎตไธญ๏ผX็ๅฝข็ถไฟๆไธๅ๏ผ๏ผๆน้ๅคงๅฐ๏ผๆๅคงๅบๅ้ฟๅบฆ๏ผnum_hiddens๏ผ
X = self.token_embedding(tokens) + self.segment_embedding(segments)
X = X + self.pos_embedding[:, :X.shape[1], :]
for blk in self.blks:
X = blk(X, valid_lens)
return X
#@save
def paddletile(x, n) :
# ๅไธไธช้ฃๆกจ็ไปฃ็ ๏ผๅค็ฐtorch.repeat_interleaveๅฝไปคใๅช้ๅฏน1Dๆฐๆฎใ
x = x.reshape([-1, 1])
out = paddle.tile(x, repeat_times=n)
return out.reshape([-1])
#@save
class MaskLM(nn.Layer):
"""BERT็ๆฉ่ฝ่ฏญ่จๆจกๅไปปๅก"""
def __init__(self, vocab_size, num_hiddens, num_inputs=768, **kwargs):
super(MaskLM, self).__init__(**kwargs)
self.mlp = nn.Sequential(nn.Linear(num_inputs, num_hiddens),
nn.ReLU(),
nn.LayerNorm(num_hiddens),
nn.Linear(num_hiddens, vocab_size))
def forward(self, X, pred_positions):
num_pred_positions = pred_positions.shape[1]
pred_positions = pred_positions.reshape([-1])
batch_size = X.shape[0]
batch_idx = paddle.arange(0, batch_size) # torch.arange()
# ๅ่ฎพbatch_size=2๏ผnum_pred_positions=3
# ้ฃไนbatch_idxๆฏnp.array๏ผ[0,0,0,1,1]๏ผ
batch_idx = paddletile(batch_idx, [num_pred_positions])
masked_X = X[batch_idx, pred_positions]
masked_X = masked_X.reshape((batch_size, num_pred_positions, -1))
mlm_Y_hat = self.mlp(masked_X)
return mlm_Y_hat
#@save
class NextSentencePred(nn.Layer):
"""BERT็ไธไธๅฅ้ขๆตไปปๅก"""
def __init__(self, num_inputs, **kwargs):
super(NextSentencePred, self).__init__(**kwargs)
self.output = nn.Linear(num_inputs, 2)
def forward(self, X):
# X็ๅฝข็ถ๏ผ(batchsize,num_hiddens)
return self.output(X)
#@save
class BERTModel(nn.Layer):
"""BERTๆจกๅ"""
def __init__(self, vocab_size, num_hiddens, norm_shape, ffn_num_input,
ffn_num_hiddens, num_heads, num_layers, dropout,
max_len=1000, key_size=768, query_size=768, value_size=768,
hid_in_features=768, mlm_in_features=768,
nsp_in_features=768):
super(BERTModel, self).__init__()
self.encoder = BERTEncoder(vocab_size, num_hiddens, norm_shape,
ffn_num_input, ffn_num_hiddens, num_heads, num_layers,
dropout, max_len=max_len, key_size=key_size,
query_size=query_size, value_size=value_size)
self.hidden = nn.Sequential(nn.Linear(hid_in_features, num_hiddens),
nn.Tanh())
self.mlm = MaskLM(vocab_size, num_hiddens, mlm_in_features)
self.nsp = NextSentencePred(nsp_in_features)
def forward(self, tokens, segments, valid_lens=None,
pred_positions=None):
encoded_X = self.encoder(tokens, segments, valid_lens)
if pred_positions is not None:
mlm_Y_hat = self.mlm(encoded_X, pred_positions)
else:
mlm_Y_hat = None
# ็จไบไธไธๅฅ้ขๆต็ๅคๅฑๆ็ฅๆบๅ็ฑปๅจ็้่ๅฑ๏ผ0ๆฏโ<cls>โๆ ่ฎฐ็็ดขๅผ
nsp_Y_hat = self.nsp(self.hidden(encoded_X[:, 0, :]))
return encoded_X, mlm_Y_hat, nsp_Y_hat
"""14.9"""
#@save
d2l.DATA_HUB['wikitext-2'] = (
'https://s3.amazonaws.com/research.metamind.io/wikitext/'
'wikitext-2-v1.zip', '3c914d17d80b1459be871a5039ac23e752a53cbe')
#@save
def _read_wiki(data_dir):
file_name = os.path.join(data_dir, 'wiki.train.tokens')
with open(file_name, 'r') as f:
lines = f.readlines()
# ๅคงๅๅญๆฏ่ฝฌๆขไธบๅฐๅๅญๆฏ
paragraphs = [line.strip().lower().split(' . ')
for line in lines if len(line.split(' . ')) >= 2]
random.shuffle(paragraphs)
return paragraphs
#@save
def _get_next_sentence(sentence, next_sentence, paragraphs):
if random.random() < 0.5:
is_next = True
else:
# paragraphsๆฏไธ้ๅ่กจ็ๅตๅฅ
next_sentence = random.choice(random.choice(paragraphs))
is_next = False
return sentence, next_sentence, is_next
#@save
def _get_nsp_data_from_paragraph(paragraph, paragraphs, vocab, max_len):
nsp_data_from_paragraph = []
for i in range(len(paragraph) - 1):
tokens_a, tokens_b, is_next = _get_next_sentence(
paragraph[i], paragraph[i + 1], paragraphs)
# ่่1ไธช'<cls>'่ฏๅ
ๅ2ไธช'<sep>'่ฏๅ
if len(tokens_a) + len(tokens_b) + 3 > max_len:
continue
tokens, segments = d2l.get_tokens_and_segments(tokens_a, tokens_b)
nsp_data_from_paragraph.append((tokens, segments, is_next))
return nsp_data_from_paragraph
#@save
def _replace_mlm_tokens(tokens, candidate_pred_positions, num_mlm_preds,
vocab):
# ไธบ้ฎ่ฝ่ฏญ่จๆจกๅ็่พๅ
ฅๅๅปบๆฐ็่ฏๅ
ๅฏๆฌ๏ผๅ
ถไธญ่พๅ
ฅๅฏ่ฝๅ
ๅซๆฟๆข็โ<mask>โๆ้ๆบ่ฏๅ
mlm_input_tokens = [token for token in tokens]
pred_positions_and_labels = []
# ๆไนฑๅ็จไบๅจ้ฎ่ฝ่ฏญ่จๆจกๅไปปๅกไธญ่ทๅ15%็้ๆบ่ฏๅ
่ฟ่ก้ขๆต
random.shuffle(candidate_pred_positions)
for mlm_pred_position in candidate_pred_positions:
if len(pred_positions_and_labels) >= num_mlm_preds:
break
masked_token = None
# 80%็ๆถ้ด๏ผๅฐ่ฏๆฟๆขไธบโ<mask>โ่ฏๅ
if random.random() < 0.8:
masked_token = '<mask>'
else:
# 10%็ๆถ้ด๏ผไฟๆ่ฏไธๅ
if random.random() < 0.5:
masked_token = tokens[mlm_pred_position]
# 10%็ๆถ้ด๏ผ็จ้ๆบ่ฏๆฟๆข่ฏฅ่ฏ
else:
masked_token = random.choice(vocab.idx_to_token)
mlm_input_tokens[mlm_pred_position] = masked_token
pred_positions_and_labels.append(
(mlm_pred_position, tokens[mlm_pred_position]))
return mlm_input_tokens, pred_positions_and_labels
#@save
def _get_mlm_data_from_tokens(tokens, vocab):
candidate_pred_positions = []
# tokensๆฏไธไธชๅญ็ฌฆไธฒๅ่กจ
for i, token in enumerate(tokens):
# ๅจ้ฎ่ฝ่ฏญ่จๆจกๅไปปๅกไธญไธไผ้ขๆต็นๆฎ่ฏๅ
if token in ['<cls>', '<sep>']:
continue
candidate_pred_positions.append(i)
# ้ฎ่ฝ่ฏญ่จๆจกๅไปปๅกไธญ้ขๆต15%็้ๆบ่ฏๅ
num_mlm_preds = max(1, round(len(tokens) * 0.15))
mlm_input_tokens, pred_positions_and_labels = _replace_mlm_tokens(
tokens, candidate_pred_positions, num_mlm_preds, vocab)
pred_positions_and_labels = sorted(pred_positions_and_labels,
key=lambda x: x[0])
pred_positions = [v[0] for v in pred_positions_and_labels]
mlm_pred_labels = [v[1] for v in pred_positions_and_labels]
return vocab[mlm_input_tokens], pred_positions, vocab[mlm_pred_labels]
#@save
def _pad_bert_inputs(examples, max_len, vocab):
max_num_mlm_preds = round(max_len * 0.15)
all_token_ids, all_segments, valid_lens, = [], [], []
all_pred_positions, all_mlm_weights, all_mlm_labels = [], [], []
nsp_labels = []
for (token_ids, pred_positions, mlm_pred_label_ids, segments,
is_next) in examples:
all_token_ids.append(paddle.to_tensor(token_ids + [vocab['<pad>']] * (
max_len - len(token_ids)), dtype=paddle.int64))
all_segments.append(paddle.to_tensor(segments + [0] * (
max_len - len(segments)), dtype=paddle.int64))
# valid_lensไธๅ
ๆฌ'<pad>'็่ฎกๆฐ
valid_lens.append(paddle.to_tensor(len(token_ids), dtype=paddle.float32))
all_pred_positions.append(paddle.to_tensor(pred_positions + [0] * (
max_num_mlm_preds - len(pred_positions)), dtype=paddle.int64))
# ๅกซๅ
่ฏๅ
็้ขๆตๅฐ้่ฟไนไปฅ0ๆ้ๅจๆๅคฑไธญ่ฟๆปคๆ
all_mlm_weights.append(
paddle.to_tensor([1.0] * len(mlm_pred_label_ids) + [0.0] * (
max_num_mlm_preds - len(pred_positions)),
dtype=paddle.float32))
all_mlm_labels.append(paddle.to_tensor(mlm_pred_label_ids + [0] * (
max_num_mlm_preds - len(mlm_pred_label_ids)), dtype=paddle.int64))
nsp_labels.append(paddle.to_tensor(is_next, dtype=paddle.int64))
return (all_token_ids, all_segments, valid_lens, all_pred_positions,
all_mlm_weights, all_mlm_labels, nsp_labels)
#@save
class _WikiTextDataset(paddle.io.Dataset):
def __init__(self, paragraphs, max_len):
# ่พๅ
ฅparagraphs[i]ๆฏไปฃ่กจๆฎต่ฝ็ๅฅๅญๅญ็ฌฆไธฒๅ่กจ๏ผ
# ่่พๅบparagraphs[i]ๆฏไปฃ่กจๆฎต่ฝ็ๅฅๅญๅ่กจ๏ผๅ
ถไธญๆฏไธชๅฅๅญ้ฝๆฏ่ฏๅ
ๅ่กจ
paragraphs = [d2l.tokenize(
paragraph, token='word') for paragraph in paragraphs]
sentences = [sentence for paragraph in paragraphs
for sentence in paragraph]
self.vocab = d2l.Vocab(sentences, min_freq=5, reserved_tokens=[
'<pad>', '<mask>', '<cls>', '<sep>'])
# ่ทๅไธไธๅฅๅญ้ขๆตไปปๅก็ๆฐๆฎ
examples = []
for paragraph in paragraphs:
examples.extend(_get_nsp_data_from_paragraph(
paragraph, paragraphs, self.vocab, max_len))
# ่ทๅ้ฎ่ฝ่ฏญ่จๆจกๅไปปๅก็ๆฐๆฎ
examples = [(_get_mlm_data_from_tokens(tokens, self.vocab)
+ (segments, is_next))
for tokens, segments, is_next in examples]
# ๅกซๅ
่พๅ
ฅ
(self.all_token_ids, self.all_segments, self.valid_lens,
self.all_pred_positions, self.all_mlm_weights,
self.all_mlm_labels, self.nsp_labels) = _pad_bert_inputs(
examples, max_len, self.vocab)
def __getitem__(self, idx):
return (self.all_token_ids[idx], self.all_segments[idx],
self.valid_lens[idx], self.all_pred_positions[idx],
self.all_mlm_weights[idx], self.all_mlm_labels[idx],
self.nsp_labels[idx])
def __len__(self):
return len(self.all_token_ids)
#@save
def load_data_wiki(batch_size, max_len):
"""ๅ ่ฝฝWikiText-2ๆฐๆฎ้"""
num_workers = d2l.get_dataloader_workers()
num_workers = 0
data_dir = d2l.download_extract('wikitext-2', 'wikitext-2')
paragraphs = _read_wiki(data_dir)
train_set = _WikiTextDataset(paragraphs, max_len)
train_iter = paddle.io.DataLoader(dataset=train_set, batch_size=batch_size,
shuffle=True, num_workers=num_workers)
return train_iter, train_set.vocab
'''14.10'''
#@save
def _get_batch_loss_bert(net, loss, vocab_size, tokens_X,
segments_X, valid_lens_x,
pred_positions_X, mlm_weights_X,
mlm_Y, nsp_y):
# ๅๅไผ ๆญ
_, mlm_Y_hat, nsp_Y_hat = net(tokens_X, segments_X,
valid_lens_x.reshape([-1]), # reshape ๅ้ข่ฆ่ทๅ่กจๆๅ
็ป
pred_positions_X)
# ่ฎก็ฎ้ฎ่ฝ่ฏญ่จๆจกๅๆๅคฑ
mlm_l = loss(mlm_Y_hat.reshape([-1, vocab_size]), mlm_Y.reshape([-1])) *\
mlm_weights_X.reshape([-1, 1])
mlm_l = mlm_l.sum() / (mlm_weights_X.sum() + 1e-8)
# ่ฎก็ฎไธไธๅฅๅญ้ขๆตไปปๅก็ๆๅคฑ
nsp_l = loss(nsp_Y_hat, nsp_y)
l = mlm_l + nsp_l
return mlm_l, nsp_l, l
"""15.1"""
d2l.DATA_HUB['aclImdb'] = (
'http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz',
'01ada507287d82875905620988597833ad4e0903')
def read_imdb(data_dir, is_train):
"""่ฏปๅIMDb่ฏ่ฎบๆฐๆฎ้ๆๆฌๅบๅๅๆ ็ญพ"""
data, labels = [], []
for label in ('pos', 'neg'):
folder_name = os.path.join(data_dir, 'train' if is_train else 'test',
label)
for file in os.listdir(folder_name):
with open(os.path.join(folder_name, file), 'rb') as f:
review = f.read().decode('utf-8').replace('\n', '')
data.append(review)
labels.append(1 if label == 'pos' else 0)
return data, labels
def load_data_imdb(batch_size, num_steps=500):
"""่ฟๅๆฐๆฎ่ฟญไปฃๅจๅIMDb่ฏ่ฎบๆฐๆฎ้็่ฏ่กจ"""
data_dir = d2l.download_extract('aclImdb', 'aclImdb')
train_data = read_imdb(data_dir, True)
test_data = read_imdb(data_dir, False)
train_tokens = d2l.tokenize(train_data[0], token='word')
test_tokens = d2l.tokenize(test_data[0], token='word')
vocab = d2l.Vocab(train_tokens, min_freq=5)
train_features = d2l.tensor([d2l.truncate_pad(
vocab[line], num_steps, vocab['<pad>']) for line in train_tokens])
test_features = d2l.tensor([d2l.truncate_pad(
vocab[line], num_steps, vocab['<pad>']) for line in test_tokens])
train_iter = d2l.load_array((train_features, d2l.tensor(train_data[1])),
batch_size)
test_iter = d2l.load_array((test_features, d2l.tensor(test_data[1])),
batch_size,
is_train=False)
return train_iter, test_iter, vocab
"""15.2"""
def predict_sentiment(net, vocab, sequence):
"""้ขๆตๆๆฌๅบๅ็ๆ
ๆ"""
sequence = paddle.to_tensor(vocab[sequence.split()], place=d2l.try_gpu())
label = paddle.argmax(net(sequence.reshape((1, -1))), axis=1)
return 'positive' if label == 1 else 'negative'
"""15.4"""
d2l.DATA_HUB['SNLI'] = (
'https://nlp.stanford.edu/projects/snli/snli_1.0.zip',
'9fcde07509c7e87ec61c640c1b2753d9041758e4')
def read_snli(data_dir, is_train):
"""ๅฐSNLIๆฐๆฎ้่งฃๆไธบๅๆใๅ่ฎพๅๆ ็ญพ"""
def extract_text(s):
# ๅ ้คๆไปฌไธไผไฝฟ็จ็ไฟกๆฏ
s = re.sub('\\(', '', s)
s = re.sub('\\)', '', s)
# ็จไธไธช็ฉบๆ ผๆฟๆขไธคไธชๆๅคไธช่ฟ็ปญ็็ฉบๆ ผ
s = re.sub('\\s{2,}', ' ', s)
return s.strip()
label_set = {'entailment': 0, 'contradiction': 1, 'neutral': 2}
file_name = os.path.join(data_dir, 'snli_1.0_train.txt'
if is_train else 'snli_1.0_test.txt')
with open(file_name, 'r') as f:
rows = [row.split('\t') for row in f.readlines()[1:]]
premises = [extract_text(row[1]) for row in rows if row[0] in label_set]
hypotheses = [extract_text(row[2]) for row in rows if row[0] \
in label_set]
labels = [label_set[row[0]] for row in rows if row[0] in label_set]
return premises, hypotheses, labels
class SNLIDataset(paddle.io.Dataset):
"""็จไบๅ ่ฝฝSNLIๆฐๆฎ้็่ชๅฎไนๆฐๆฎ้"""
def __init__(self, dataset, num_steps, vocab=None):
self.num_steps = num_steps
all_premise_tokens = d2l.tokenize(dataset[0])
all_hypothesis_tokens = d2l.tokenize(dataset[1])
if vocab is None:
self.vocab = d2l.Vocab(all_premise_tokens + \
all_hypothesis_tokens, min_freq=5, reserved_tokens=['<pad>'])
else:
self.vocab = vocab
self.premises = self._pad(all_premise_tokens)
self.hypotheses = self._pad(all_hypothesis_tokens)
self.labels = paddle.to_tensor(dataset[2])
print('read ' + str(len(self.premises)) + ' examples')
def _pad(self, lines):
return paddle.to_tensor([d2l.truncate_pad(
self.vocab[line], self.num_steps, self.vocab['<pad>'])
for line in lines])
def __getitem__(self, idx):
return (self.premises[idx], self.hypotheses[idx]), self.labels[idx]
def __len__(self):
return len(self.premises)
def load_data_snli(batch_size, num_steps=50):
"""ไธ่ฝฝSNLIๆฐๆฎ้ๅนถ่ฟๅๆฐๆฎ่ฟญไปฃๅจๅ่ฏ่กจ"""
num_workers = d2l.get_dataloader_workers()
data_dir = d2l.download_extract('SNLI')
train_data = read_snli(data_dir, True)
test_data = read_snli(data_dir, False)
train_set = SNLIDataset(train_data, num_steps)
test_set = SNLIDataset(test_data, num_steps, train_set.vocab)
train_iter = paddle.io.DataLoader(train_set, batch_size=batch_size,
shuffle=True,
return_list=True
)
test_iter = paddle.io.DataLoader(test_set, batch_size=batch_size,
shuffle=False,
return_list=True
)
return train_iter, test_iter, train_set.vocab
"""15.5"""
def predict_snli(net, vocab, premise, hypothesis):
"""้ขๆตๅๆๅๅ่ฎพไน้ด็้ป่พๅ
ณ็ณป"""
net.eval()
premise = paddle.to_tensor(vocab[premise], place=d2l.try_gpu())
hypothesis = paddle.to_tensor(vocab[hypothesis], place=d2l.try_gpu())
label = paddle.argmax(net([premise.reshape((1, -1)),
hypothesis.reshape((1, -1))]), axis=1)
return 'entailment' if label == 0 else 'contradiction' if label == 1 \
else 'neutral'
ones = paddle.ones
zeros = paddle.zeros
tensor = paddle.to_tensor
arange = paddle.arange
meshgrid = paddle.meshgrid
sin = paddle.sin
sinh = paddle.sinh
cos = paddle.cos
cosh = paddle.cosh
tanh = paddle.tanh
linspace = paddle.linspace
exp = paddle.exp
log = paddle.log
normal = paddle.normal
rand = paddle.rand
randn = paddle.randn
matmul = paddle.matmul
int32 = paddle.int32
float32 = paddle.float32
concat = paddle.concat
stack = paddle.stack
abs = paddle.abs
eye = paddle.eye
numpy = lambda x, *args, **kwargs: x.detach().numpy(*args, **kwargs)
size = lambda x, *args, **kwargs: x.numel(*args, **kwargs)
reshape = lambda x, *args, **kwargs: x.reshape(*args, **kwargs)
to = lambda x, *args, **kwargs: x.to(*args, **kwargs)
reduce_sum = lambda x, *args, **kwargs: x.sum(*args, **kwargs)
argmax = lambda x, *args, **kwargs: x.argmax(*args, **kwargs)
astype = lambda x, *args, **kwargs: x.type(*args, **kwargs)
transpose = lambda x, *args, **kwargs: x.t(*args, **kwargs)
reduce_mean = lambda x, *args, **kwargs: x.mean(*args, **kwargs)
"""่กฅๅ
ๅฝๆฐ14.3้่ฆ็จๅฐ"""
def show_list_len_pair_hist(legend, xlabel, ylabel, xlist, ylist):
"""Plot the histogram for list length pairs.
Defined in :numref:`sec_machine_translation`"""
d2l.set_figsize()
_, _, patches = d2l.plt.hist(
[[len(l) for l in xlist], [len(l) for l in ylist]])
d2l.plt.xlabel(xlabel)
d2l.plt.ylabel(ylabel)
for patch in patches[1].patches:
patch.set_hatch('/')
d2l.plt.legend(legend)
"""bert้ฃๆกจ้ข่ฎญ็ปๆจกๅ"""
d2l.DATA_HUB['bert_small'] = ('https://paddlenlp.bj.bcebos.com/models/bert.small.paddle.zip', '9fcde07509c7e87ec61c640c1b277509c7e87ec6153d9041758e4')
d2l.DATA_HUB['bert_base'] = ('https://paddlenlp.bj.bcebos.com/models/bert.base.paddle.zip', '9fcde07509c7e87ec61c640c1b27509c7e87ec61753d9041758e4')
| 39.041504
| 151
| 0.596177
|
a9206e5af5513cdc862300ca5b3cde2e68b2dd51
| 1,794
|
py
|
Python
|
backend/server/apps/ml/tests.py
|
BlooAM/ML-WebService
|
f331ad24b5b43bccfcdd062aedcd9413f31d9097
|
[
"MIT"
] | null | null | null |
backend/server/apps/ml/tests.py
|
BlooAM/ML-WebService
|
f331ad24b5b43bccfcdd062aedcd9413f31d9097
|
[
"MIT"
] | null | null | null |
backend/server/apps/ml/tests.py
|
BlooAM/ML-WebService
|
f331ad24b5b43bccfcdd062aedcd9413f31d9097
|
[
"MIT"
] | null | null | null |
import inspect
from django.test import TestCase
from apps.ml.registry import MLRegistry
from apps.ml.income_classifier.random_forest import RandomForestClassifier
class MLTests(TestCase):
def test_rf_algorithm(self):
input_data = {
"age": 37,
"workclass": "Private",
"fnlwgt": 34146,
"education": 'HS-grad',
"education-num": 9,
"marital-status": "Married-civ-spouse",
"occupation": "Craft-repair",
"relationship": "Husband",
"race": "White",
"sex": "Male",
"capital-gain": 0,
"capital-loss": 0,
"hours-per-week": 68,
"native-country": "United-States"
}
my_alg = RandomForestClassifier()
response = my_alg.compute_prediction(input_data)
self.assertEqual('OK', response['status'])
self.assertTrue('label' in response)
self.assertEqual('<=50K', response['label'])
def test_registry(self):
registry = MLRegistry()
self.assertEqual(len(registry.endpoints), 0)
endpoint_name = "income_classifier"
algorithm_object = RandomForestClassifier()
algorithm_name = "random forest"
algorithm_status = "production"
algorithm_version = "0.0.1"
algorithm_owner = "Piotr"
algorithm_description = "Random Forest with simple pre- and post-processing"
algorithm_code = inspect.getsource(RandomForestClassifier)
registry.add_algorithm(endpoint_name, algorithm_object, algorithm_name, algorithm_status,
algorithm_version, algorithm_owner, algorithm_description,
algorithm_code)
self.assertEqual(len(registry.endpoints), 1)
| 38.170213
| 97
| 0.61204
|
842cf6109379393d11e4922ae6f2a723a613c55b
| 942
|
py
|
Python
|
problem solutions/error_calc.py
|
suhailnajeeb/numerical-methods
|
b5f6189e5072407004e97d37edc83356e43449e9
|
[
"MIT"
] | null | null | null |
problem solutions/error_calc.py
|
suhailnajeeb/numerical-methods
|
b5f6189e5072407004e97d37edc83356e43449e9
|
[
"MIT"
] | null | null | null |
problem solutions/error_calc.py
|
suhailnajeeb/numerical-methods
|
b5f6189e5072407004e97d37edc83356e43449e9
|
[
"MIT"
] | 1
|
2020-02-12T09:12:50.000Z
|
2020-02-12T09:12:50.000Z
|
import numpy as np
a0 = 3.56
a1 = 1.4859
a2 = 2.025
x = np.array([1,2,3,4,5])
y = np.array([7.7, 14.5, 26, 40, 62])
xm = np.mean(x).round(4)
ym = np.mean(y).round(4)
print('mean of x: ' + str(xm))
print('mean of y: ' + str(ym))
st = lambda yi : np.square(yi - ym).round(4)
sr = lambda xi,yi : np.square(yi-a0-a1*xi-a2*np.square(xi)).round(4)
def printline(): print('----------------------------------------------------------------------')
ST = 0
SR = 0
printline()
print('xi\t\tyi\t\t(yi-ym)^2\t(yi-a0-a1xi-a2xi^2)^2')
printline()
for i in range(len(x)):
St = st(y[i])
Sr = sr(x[i],y[i])
ST = ST + St
SR = SR + Sr
print(str(x[i]) + '\t\t' + str(y[i]) + '\t\t' + str(St) + '\t\t' + str(Sr))
printline()
print('\t\t\t\t' + str(ST) + '\t\t' + str(SR))
n = len(x)
#S_yx = np.sqrt((SR/(n-2)))
S_y = np.sqrt((SR/(n-3)))
r2 = (ST-SR)/ST
print('Standard error: %f' %S_y)
print('Co-efficient of determination: %f' %r2)
| 20.933333
| 96
| 0.504246
|
1b0c11892ef3aa133d021c6ddd9006dc0cf33060
| 2,367
|
py
|
Python
|
Knil Dungeon/Dialog.py
|
WexyR/KNIL
|
b81c5d4025d1f4e1607b0e948c4611cff0fdbc2e
|
[
"MIT"
] | null | null | null |
Knil Dungeon/Dialog.py
|
WexyR/KNIL
|
b81c5d4025d1f4e1607b0e948c4611cff0fdbc2e
|
[
"MIT"
] | null | null | null |
Knil Dungeon/Dialog.py
|
WexyR/KNIL
|
b81c5d4025d1f4e1607b0e948c4611cff0fdbc2e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import Menu
import Game
import Tools
def dialog_box_create(ROWS, COLS, percent_size_rows=0.25):
"""Initialise une boite de dialogue"""
assert isinstance(percent_size_rows, float) and 0 <= percent_size_rows
d = dict()
x = 1
y = int((ROWS-1) * (1-percent_size_rows))+1
d["ylen"] = int((ROWS-1)*percent_size_rows)
d["xlen"] = COLS - 4
d["COLS"] = COLS
d["ROWS"] = ROWS
d["x"], d["y"] = x, y
return d
def run_dialog(d, txt, speaker=''):
"""Lance un texte dans la boite de dialogue, en tenant compte de sa taille (
ajoute des retours ร la ligne si elle est trop longue, et des pages si il y a
trop de lignes)"""
ROWS, COLS = d["ROWS"], d["COLS"]
pages = txt.split('\n\n') # Sรฉpare tout d'abord les pages
for page in pages:
resized_txt_lst = [resized_line for line in page.splitlines() for resized_line in resize_line(line, d["xlen"], '\n')]
for t in range(0, len(resized_txt_lst), d["ylen"]-int(bool(speaker))):
text = "".join(resized_txt_lst[t:t+d["ylen"]-int(bool(speaker))])
if speaker:
text = Tools.reformat('<bold><underlined>{0} :</>\n'.format(speaker) + text) # Si l'รฉmรฉteur est prรฉcisรฉ, on l'affiche en haut de chaque page en gras soulignรฉ
m = Menu.create([[(text, lambda: None)]], d["x"], d["y"], COLS, d['ylen']+2, text_align="left") # On utilise un menu d'une seule case pour afficher chaque page
Menu.run(m)
def resize_line(line, size, carac='', pile=None):
"""Fonction rรฉcursive qui sรฉpare une chaรฎne de caractรจre en blocs de taille donnรฉe, en ajoutant
un caractรจre entre chaque bloc si besoin"""
if pile is None:
pile = [] # Can not put a list as default value for a function
assert isinstance(line, (str, unicode))
assert isinstance(size, int) and size > 3
assert isinstance(pile, list)
if len(line) > size:
line1, space, remainder = line[:size+1].rpartition(' ')
if space:
line1 += carac
pile.append(line1)
line2 = remainder + line[size+1:]
else:
line1 = line[:size-1] + "-" + carac
pile.append(line1)
line2 = "-" + line[size-1:]
resize_line(line2, size, carac, pile)
else:
pile.append(line + carac)
return pile
return pile
| 38.177419
| 173
| 0.606253
|
ad3130808f810af7366ef698ce92654fa1e16855
| 524
|
py
|
Python
|
zinc/migrations/0004_zone_cached_ns_records.py
|
PressLabs/zinc
|
9e1dc852f31f9897e7759962cf0f3e6d42fbe637
|
[
"Apache-2.0"
] | 29
|
2017-06-29T15:03:49.000Z
|
2018-01-30T14:07:26.000Z
|
zinc/migrations/0004_zone_cached_ns_records.py
|
presslabs/zinc
|
94146e5203fc93ee0e8bb011a4db0ffcd4b0096e
|
[
"Apache-2.0"
] | 9
|
2019-01-11T09:07:17.000Z
|
2022-02-03T12:50:21.000Z
|
zinc/migrations/0004_zone_cached_ns_records.py
|
PressLabs/zinc
|
9e1dc852f31f9897e7759962cf0f3e6d42fbe637
|
[
"Apache-2.0"
] | 1
|
2020-08-09T18:17:25.000Z
|
2020-08-09T18:17:25.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-15 16:27
from __future__ import unicode_literals
from django.db import migrations, models
import zinc.models
class Migration(migrations.Migration):
dependencies = [
('zinc', '0003_zone_ns_propagated'),
]
operations = [
migrations.AddField(
model_name='zone',
name='cached_ns_records',
field=models.TextField(default=None, null=True, validators=[zinc.models.validate_json]),
),
]
| 23.818182
| 100
| 0.645038
|
e5c3b0c077867dca9511022145c9dbbd6a137532
| 2,406
|
py
|
Python
|
sheets/snippets/sheets_batch_update.py
|
himanshupr2627/python-samples
|
4a04e3aee1068dc1f1402e9e9c90044ff101a6c8
|
[
"Apache-2.0"
] | 479
|
2018-03-16T16:45:11.000Z
|
2020-10-13T11:32:02.000Z
|
sheets/snippets/sheets_batch_update.py
|
himanshupr2627/python-samples
|
4a04e3aee1068dc1f1402e9e9c90044ff101a6c8
|
[
"Apache-2.0"
] | 159
|
2018-03-28T20:03:56.000Z
|
2020-10-13T06:00:08.000Z
|
sheets/snippets/sheets_batch_update.py
|
himanshupr2627/python-samples
|
4a04e3aee1068dc1f1402e9e9c90044ff101a6c8
|
[
"Apache-2.0"
] | 493
|
2018-03-21T01:07:21.000Z
|
2020-10-14T10:31:00.000Z
|
"""
Copyright 2022 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# [START sheets_batch_update]
from __future__ import print_function
import google.auth
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
def sheets_batch_update(spreadsheet_id, title, find, replacement):
"""
Update the sheet details in batch, the user has access to.
Load pre-authorized user credentials from the environment.
TODO(developer) - See https://developers.google.com/identity
for guides on implementing OAuth2 for the application.
"""
creds, _ = google.auth.default()
# pylint: disable=maybe-no-member
try:
service = build('classroom', 'v1', credentials=creds)
requests = []
# Change the spreadsheet's title.
requests.append({
'updateSpreadsheetProperties': {
'properties': {
'title': title
},
'fields': 'title'
}
})
# Find and replace text
requests.append({
'findReplace': {
'find': find,
'replacement': replacement,
'allSheets': True
}
})
# Add additional requests (operations) ...
body = {
'requests': requests
}
response = service.spreadsheets().batchUpdate(
spreadsheetId=spreadsheet_id,
body=body).execute()
find_replace_response = response.get('replies')[1].get('findReplace')
print('{0} replacements made.'.format(
find_replace_response.get('occurrencesChanged')))
return response
except HttpError as error:
print(f"An error occurred: {error}")
return error
if __name__ == '__main__':
sheets_batch_update('spreadsheet_id', 'title', 'find', 'replacement')
# [END sheets_batch_update]
| 29.703704
| 77
| 0.640482
|
7fd2dd25859ec86ed4da264c4dfe89e82882c723
| 5,841
|
py
|
Python
|
Processor/diode.py
|
george200150/Licenta
|
9c3f7d86abf3cc5d90204db0acc956eb8bee26dc
|
[
"MIT"
] | null | null | null |
Processor/diode.py
|
george200150/Licenta
|
9c3f7d86abf3cc5d90204db0acc956eb8bee26dc
|
[
"MIT"
] | null | null | null |
Processor/diode.py
|
george200150/Licenta
|
9c3f7d86abf3cc5d90204db0acc956eb8bee26dc
|
[
"MIT"
] | null | null | null |
import os.path as osp
from itertools import chain
import json
from torch.utils.data import Dataset
import numpy as np
from PIL import Image
import random
import matplotlib.pyplot as plt
'''
The json metadata for DIODE is laid out as follows:
train:
outdoor:
scene_000xx:
scan_00yyy:
- 000xx_00yyy_indoors_300_010
- 000xx_00yyy_indoors_300_020
- 000xx_00yyy_indoors_300_030
scene_000kk:
_analogous_
val:
_analogous_
test:
_analogous_
'''
_VALID_SPLITS = ('train', 'val', 'test')
_VALID_SCENE_TYPES = ('indoors', 'outdoor')
def check_and_tuplize_tokens(tokens, valid_tokens):
if not isinstance(tokens, (tuple, list)):
tokens = (tokens,)
for split in tokens:
assert split in valid_tokens
return tokens
def enumerate_paths(src):
"""flatten out a nested dictionary into an iterable
DIODE metadata is a nested dictionary;
One could easily query a particular scene and scan, but sequentially
enumerating files in a nested dictionary is troublesome. This function
recursively traces out and aggregates the leaves of a tree.
"""
if isinstance(src, list):
return src
elif isinstance(src, dict):
acc = []
for k, v in src.items():
_sub_paths = enumerate_paths(v)
_sub_paths = list(map(lambda x: osp.join(k, x), _sub_paths))
acc.append(_sub_paths)
return list(chain.from_iterable(acc))
else:
raise ValueError('do not accept data type {}'.format(type(src)))
def save_plot_dm(dm, validity_mask):
validity_mask = validity_mask > 0
MIN_DEPTH = 0.5
MAX_DEPTH = min(300, np.percentile(dm, 99))
dm = np.clip(dm, MIN_DEPTH, MAX_DEPTH)
dm = np.log(dm, where=validity_mask)
dm = np.ma.masked_where(~validity_mask, dm)
cmap = plt.cm.jet
cmap.set_bad(color='black')
plt.gca().set_axis_off()
plt.imshow(dm, cmap=cmap, vmax=np.log(MAX_DEPTH))
# plt.savefig("AAA.png", bbox_inches='tight', pad_inches=0, format='png', dpi=1200)
plt.savefig("AAA.png", bbox_inches='tight', pad_inches=0, format='png', dpi=200)
def plot_depth_map(dm, validity_mask):
validity_mask = validity_mask > 0
MIN_DEPTH = 0.5
MAX_DEPTH = min(300, np.percentile(dm, 99))
dm = np.clip(dm, MIN_DEPTH, MAX_DEPTH)
dm = np.log(dm, where=validity_mask)
dm = np.ma.masked_where(~validity_mask, dm)
cmap = plt.cm.jet
cmap.set_bad(color='black')
plt.gca().set_axis_off()
plt.imshow(dm, cmap=cmap, vmax=np.log(MAX_DEPTH))
# fig = plt.figure(figsize=(10.24, 7.68))
# plt.savefig("AAA", bbox_inches='tight', pad_inches=0, format='eps')
# plt.savefig("AAA.png", bbox_inches='tight', pad_inches=0, format='png', dpi=1200)
plt.show()
def plot_normal_map(normal_map):
normal_viz = normal_map[:, ::, :]
normal_viz = normal_viz + np.equal(np.sum(normal_viz, 2,
keepdims=True), 0.).astype(np.float32) * np.min(normal_viz)
normal_viz = (normal_viz - np.min(normal_viz)) / 2.
plt.axis('off')
plt.imshow(normal_viz)
class DIODE(Dataset):
def __init__(self, meta_fname, data_root, splits, scene_types, num_images):
self.data_root = data_root
self.splits = check_and_tuplize_tokens(
splits, _VALID_SPLITS
)
self.scene_types = check_and_tuplize_tokens(
scene_types, _VALID_SCENE_TYPES
)
with open(meta_fname, 'r') as f:
self.meta = json.load(f)
imgs = []
for split in self.splits:
for scene_type in self.scene_types:
_curr = enumerate_paths(self.meta[split][scene_type])
_curr = map(lambda x: osp.join(split, scene_type, x), _curr)
imgs.extend(list(_curr))
self.imgs = imgs
# -----------------------------------------------------------------------------
num_images = min(num_images, len(self.imgs))
self.imgs = random.sample(self.imgs, num_images) # only use num_images images
self.classes = list(set([x.split("\\")[3] for x in self.imgs]))
# -----------------------------------------------------------------------------
def __len__(self):
return len(self.imgs)
def __getitem__(self, index):
im = self.imgs[index]
im_fname = osp.join(self.data_root, '{}.png'.format(im))
de_fname = osp.join(self.data_root, '{}_depth.npy'.format(im))
de_mask_fname = osp.join(self.data_root, '{}_depth_mask.npy'.format(im))
# -----------------------------------------------------------------------------
image_path = osp.join(self.data_root, im_fname)
im = Image.open(image_path)
# cls = image_path.split("\\")[3] # get scene name from image path
cls = image_path.split("\\")[1] # get indoors / outdoors
newsize = (384, 512)
# newsize = (192, 256)
# newsize = (3, 4)
im = im.resize(newsize) # limited GPU resources
im = np.array(im)
# -----------------------------------------------------------------------------
# im = np.array(Image.open(osp.join(self.data_root, im_fname)))
de = np.load(de_fname).squeeze()
de_mask = np.load(de_mask_fname)
# return im, de, de_mask
return image_path, cls, im, de, de_mask # TODO: this is for t-SNE only
# (instead of returning a dict, we return a tuple)
#
# """
# The intrinsic parameters of the camera are:
# [fx, fy, cx, cy] = [886.81, 927.06, 512, 384]
# These are the parameters of the computational camera used to generate RGBD crops from the scans as described in Section
# 3.2 of the paper; please note that fx and fy are slightly different.
# """
| 33.568966
| 121
| 0.594248
|
ca4dd67ec0aed2f8bf00d6183e24f2e58f704b39
| 8,639
|
py
|
Python
|
far_ws/src/follow_ahead_rl/scripts/move_test.py
|
alik604/ra
|
6058a9adb47db93bb86bcb2c224930c5731d663d
|
[
"Unlicense"
] | null | null | null |
far_ws/src/follow_ahead_rl/scripts/move_test.py
|
alik604/ra
|
6058a9adb47db93bb86bcb2c224930c5731d663d
|
[
"Unlicense"
] | 5
|
2021-03-26T01:30:13.000Z
|
2021-04-22T22:19:03.000Z
|
far_ws/src/follow_ahead_rl/scripts/move_test.py
|
alik604/ra
|
6058a9adb47db93bb86bcb2c224930c5731d663d
|
[
"Unlicense"
] | 1
|
2021-05-05T00:57:43.000Z
|
2021-05-05T00:57:43.000Z
|
<<<<<<< HEAD
import gym
import gym_gazeboros_ac
from time import sleep
=======
import pickle
import math
import gym
import gym_gazeboros_ac
import numpy as np
import matplotlib.pyplot as plt
from time import sleep
import cv2 as cv
>>>>>>> MCTS
ENV_NAME = 'gazeborosAC-v0'
EPISODE_LEN = 15
# Robot Chase Simulator 2021
# How to use:
# Terminal 1: Launch turtlebot.launch
# Terminal 2: run `python tf_node.py in old_scripts`
# Terminal 3: Launch navigation.launch
# Terminal 4: run this file
#
# * DON'T FORGET TO SOURCE THE WORKSPACE IN EACH FILE <3
# ie: cd .../far_ws && source devel/setup.bash
if __name__ == '__main__':
print('START Move Test')
<<<<<<< HEAD
env = gym.make(ENV_NAME).unwrapped
env.set_agent(0)
mode = 4
while True:
env.set_person_mode(mode % 5)
mode += 1
state = env.reset()
# Prints out x y position of person
# print(f"person pose = {env.get_person_pos()}")
c = 0
for i in range(EPISODE_LEN):
action = [0.5, 0]
state, reward, done, _ = env.step(action)
# print(state)
sleep(1)
# if done:
# break
c += 1
print("END")
=======
# between pose and pose. where pose is position and orientation, and the 2nd pose is the "center"
def get_relative_pose(pos_goal, orientation_goal, pos_center, orientation_center):
center_pos = np.asarray(pos_center)
center_orientation = orientation_center
relative_pos = np.asarray(pos_goal)
relative_pos2 = np.asarray([relative_pos[0] + math.cos(orientation_goal),
relative_pos[1] + math.sin(orientation_goal)]).T
# transform the relative to center coordinat
rotation_matrix = np.array([[np.cos(center_orientation), np.sin(center_orientation)], # TODO Try both with viz. Ali: I think this is a bug. it should be -center_orientation, like in other `rotation_matrix`s
[-np.sin(center_orientation), np.cos(center_orientation)]])
relative_pos = np.matmul(relative_pos, rotation_matrix)
relative_pos2 = np.matmul(relative_pos2, rotation_matrix)
global_pos = np.asarray(relative_pos + center_pos)
global_pos2 = np.asarray(relative_pos2 + center_pos)
new_orientation = np.arctan2(global_pos2[1]-global_pos[1], global_pos2[0]-global_pos[0])
return global_pos[0], global_pos[1], new_orientation
# def compute_action_set(orientation_rad):
# pi = np.pi
# numb_tickers = 16
# phase_shift = 2*pi/numb_tickers
# velocity_ratios = [1/(1.6*1.6), 1/1.6, 1] # 1.66 or 1.625 or 1.6
# action_set = []
# action_set.append([0, 0]) # do nothing
# for velocity_ratio in velocity_ratios:
# angle = orientation_rad - phase_shift
# for i in range(3): # 3 is hardcoded, if changed, reorientation & plot will be needed
# # (velocity_ratio*np.cos(angle), velocity_ratio*np.sin(angle))
# action_set.append([velocity_ratio, angle]) # [linear_velocity, angular_velocity]
# angle += phase_shift # TODO was angle += phase_shift
# return action_set # 10 actions
def compute_action_set_from_TEB():
trajectories = []
with open('discrete_action_space.pickle', 'rb') as handle:
x = pickle.load(handle)
x, y, theta = list(zip(*x))
for i in range(len(x)):
# print(f'\t{x[i]}, {y[i]}')
# plt.plot(x[i], y[i])
trajectories.extend([[x[i], y[i], theta[i]]])
return trajectories
# action_set = compute_action_set(0)
trajectories = compute_action_set_from_TEB()
trajectories = trajectories[:10]
# for i in range(len(trajectories)): # look like the first elem is indeed the first (meaning its not flipped)
# for ii in range(len(trajectories[i])):
# print(f'trajectories[i][0] {trajectories[i][0]}\n')
# print(f' {abs(trajectories[i][0][0]) < abs(trajectories[i][0][-1])}')
# exit()
mode = 4
env = gym.make(ENV_NAME).unwrapped
env.set_agent(0)
action = [0.5, 0] # linear_velocity, angular_velocity. from 0 to 1, a % of the max_linear_vel (0.8) & max_angular_vel (1.8)
counter = 0
# while False:
while True:
# env.set_person_mode(mode % 5)
mode += 1
state = env.reset()
# env.person.pause() # weird side effect for ending episode (path finished)
# env.person.resume()
counter += 1
# counter = counter % 10
print(f'counter is {counter}')
images = []
for i in range(len(trajectories)):# EPISODE_LEN
# dx_dt, dy_dt, da_dt = env.get_system_velocities() # best to see code. (dx_dt, dy_dt, da_dt)
# print(f'X: {dx_dt} | Y: {dy_dt} | Angular V: {da_dt}')
# Prints out x y heading position of person
# person_state = env.get_person_pos() # [xy[0], xy[1], theta] where theta is orientation
# print(f'Person state is {person_state}')
# print(f'State is {state}') # shape is 47
# print(f"Robot state \n\t position is {env.robot.state_['position']} \n\t orientation is {env.robot.state_['orientation']} \n\t velocity lin & angular is {env.robot.state_['velocity']}")
# print(f'Person state\n\t position is {env.person.state_["position"]}\n\t orientation is {env.person.state_["orientation"]}\n\t velocity lin & angular is {env.person.state_["velocity"]}')
rel_pos = env.get_relative_position(env.person.get_pos(), env.robot)
distance = np.hypot(rel_pos[0], rel_pos[1])
# print(f'get relative position. person.pos()-robot.pos(): {rel_pos} | with a distance of {distance}')
rel_heading = env.get_relative_heading_position(env.robot, env.person)[1]
orientation_rad = np.arctan2(rel_heading[1], rel_heading[0])
orientation = np.rad2deg(orientation_rad)
# print(f'get relative heading: {rel_heading} | orientation_rad {orientation_rad} | orientation {orientation}')
# i is from range(len(trajectories))
recommended_move = i # np.random.choice(len(trajectories))
print(f'will plot {recommended_move}')
path_to_simulate = trajectories[recommended_move].copy()
current_robot_pos = env.robot.state_['position']
# print(f'path_to_simulate is {path_to_simulate[:2]}')
for idx in range(len(path_to_simulate[0])): # TODO this is wrong
path_to_simulate[0][idx] += current_robot_pos[0]
path_to_simulate[1][idx] += current_robot_pos[1]
path_to_simulate = np.around(path_to_simulate, 2)
print(f'current_robot_pos is {current_robot_pos}\npath_to_simulate is {path_to_simulate[:2]}')
# exit()
#### option a Direct #####
cords = path_to_simulate[-1]
x, y = cords[0], cords[1]
# x, y, theta = get_relative_pose([x, y], cords[2], [current_robot_pos[0], current_robot_pos[1]], env.robot.state_['orientation'])
state_rel_person, reward, done, _ = env.step([x, y])
#### option b Micto steps #####
# NUMBER_SUB_STEPS = len(path_to_simulate)
# for idx in range(NUMBER_SUB_STEPS):
# robot_state = {}
# x, y, theta = path_to_simulate[0][idx], path_to_simulate[1][idx], path_to_simulate[2][idx]
# last_x, last_y, last_theta = current_robot_pos[0], current_robot_pos[1], env.robot.state_['orientation']
# x, y, theta = get_relative_pose([x, y], theta, [last_x, last_y], last_theta)
# state_rel_person, reward, done, _ = env.step([x, y])
# state, reward, done, _ = env.step(action)
# action_set = compute_action_set(orientation_rad)
# print(f'action_set {action_set}')
# action = action_set[c]
# image = env.get_current_observation_image()
# images.append(image) # image
images.append((x, y)) # points
sleep(0.5)
# if done:
# break
# c += 1
print(images)
# exit()
for img in images:
# plt.imshow(img, cmap='gray') # image
plt.plot(img, 'ro') # points
plt.show()
print("END")
>>>>>>> MCTS
| 39.447489
| 214
| 0.593008
|
c89b2a10e803867fa6cc6af293cb6128f9e950fd
| 7,220
|
py
|
Python
|
hubspot/crm/extensions/calling/models/error_detail.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
hubspot/crm/extensions/calling/models/error_detail.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
hubspot/crm/extensions/calling/models/error_detail.py
|
cclauss/hubspot-api-python
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Calling Extensions API
Provides a way for apps to add custom calling options to a contact record. This works in conjunction with the [Calling SDK](#), which is used to build your phone/calling UI. The endpoints here allow your service to appear as an option to HubSpot users when they access the *Call* action on a contact record. Once accessed, your custom phone/calling UI will be displayed in an iframe at the specified URL with the specified dimensions on that record. # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.extensions.calling.configuration import Configuration
class ErrorDetail(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'message': 'str',
'_in': 'str',
'code': 'str',
'sub_category': 'str',
'context': 'dict(str, list[str])'
}
attribute_map = {
'message': 'message',
'_in': 'in',
'code': 'code',
'sub_category': 'subCategory',
'context': 'context'
}
def __init__(self, message=None, _in=None, code=None, sub_category=None, context=None, local_vars_configuration=None): # noqa: E501
"""ErrorDetail - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._message = None
self.__in = None
self._code = None
self._sub_category = None
self._context = None
self.discriminator = None
self.message = message
if _in is not None:
self._in = _in
if code is not None:
self.code = code
if sub_category is not None:
self.sub_category = sub_category
if context is not None:
self.context = context
@property
def message(self):
"""Gets the message of this ErrorDetail. # noqa: E501
A human readable message describing the error along with remediation steps where appropriate # noqa: E501
:return: The message of this ErrorDetail. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this ErrorDetail.
A human readable message describing the error along with remediation steps where appropriate # noqa: E501
:param message: The message of this ErrorDetail. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and message is None: # noqa: E501
raise ValueError("Invalid value for `message`, must not be `None`") # noqa: E501
self._message = message
@property
def _in(self):
"""Gets the _in of this ErrorDetail. # noqa: E501
The name of the field or parameter in which the error was found. # noqa: E501
:return: The _in of this ErrorDetail. # noqa: E501
:rtype: str
"""
return self.__in
@_in.setter
def _in(self, _in):
"""Sets the _in of this ErrorDetail.
The name of the field or parameter in which the error was found. # noqa: E501
:param _in: The _in of this ErrorDetail. # noqa: E501
:type: str
"""
self.__in = _in
@property
def code(self):
"""Gets the code of this ErrorDetail. # noqa: E501
The status code associated with the error detail # noqa: E501
:return: The code of this ErrorDetail. # noqa: E501
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this ErrorDetail.
The status code associated with the error detail # noqa: E501
:param code: The code of this ErrorDetail. # noqa: E501
:type: str
"""
self._code = code
@property
def sub_category(self):
"""Gets the sub_category of this ErrorDetail. # noqa: E501
A specific category that contains more specific detail about the error # noqa: E501
:return: The sub_category of this ErrorDetail. # noqa: E501
:rtype: str
"""
return self._sub_category
@sub_category.setter
def sub_category(self, sub_category):
"""Sets the sub_category of this ErrorDetail.
A specific category that contains more specific detail about the error # noqa: E501
:param sub_category: The sub_category of this ErrorDetail. # noqa: E501
:type: str
"""
self._sub_category = sub_category
@property
def context(self):
"""Gets the context of this ErrorDetail. # noqa: E501
Context about the error condition # noqa: E501
:return: The context of this ErrorDetail. # noqa: E501
:rtype: dict(str, list[str])
"""
return self._context
@context.setter
def context(self, context):
"""Sets the context of this ErrorDetail.
Context about the error condition # noqa: E501
:param context: The context of this ErrorDetail. # noqa: E501
:type: dict(str, list[str])
"""
self._context = context
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ErrorDetail):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ErrorDetail):
return True
return self.to_dict() != other.to_dict()
| 30.59322
| 467
| 0.597091
|
300ac6b1f8141c26ff68562048669d787c7fd579
| 74,952
|
py
|
Python
|
theano/tensor/subtensor.py
|
mrgloom/Theano
|
018c7fa9a292858486b92e03a5d0a36cb3e45e1f
|
[
"BSD-3-Clause"
] | 1
|
2020-12-27T13:50:59.000Z
|
2020-12-27T13:50:59.000Z
|
theano/tensor/subtensor.py
|
ynd/Theano
|
5c89596df9e5d8ecafa7d4c0aa8f0f4eb393cd57
|
[
"BSD-3-Clause"
] | null | null | null |
theano/tensor/subtensor.py
|
ynd/Theano
|
5c89596df9e5d8ecafa7d4c0aa8f0f4eb393cd57
|
[
"BSD-3-Clause"
] | null | null | null |
from copy import copy
from itertools import izip
import sys
from textwrap import dedent
import warnings
import logging
_logger = logging.getLogger("theano.tensor.subtensor")
import numpy
import theano
from theano.gradient import DisconnectedType
from theano import gof
from theano.gof import Apply, Constant, hashtype, Op, Type, MethodNotDefined
from theano.gof.python25 import maxsize
from theano.printing import pprint
from theano import scalar as scal
from theano.tensor.basic import (addbroadcast, clip, get_scalar_constant_value,
ARange, TensorType)
from theano.tensor.elemwise import DimShuffle
from theano.tensor.type_other import NoneConst, SliceType, make_slice
from theano import config
inplace_increment = None
if config.cxx:
import theano.gof.cutils # needed to import cutils_ext
try:
from cutils_ext.cutils_ext import inplace_increment
except ImportError:
pass
# Do a lazy import of the sparse module
sparse_module_ref = None
class AdvancedIndexingError(TypeError):
"""
Raised when Subtensor is asked to perform advanced indexing.
"""
def __init__(self, *args):
TypeError.__init__(self, *args)
##########
# Helpful functions to deal with Subtensor and IncSubtensor
##########
def make_constant(args):
"""
Convert python litterals to theano constants in subtensor arguments.
"""
def conv(a):
if a is None:
return a
elif isinstance(a, slice):
return slice(conv(a.start),
conv(a.stop),
conv(a.step))
elif isinstance(a, (int, long, numpy.integer)):
return scal.ScalarConstant(scal.int64, a)
else:
return a
return tuple(map(conv, args))
def get_idx_list(inputs, idx_list):
'''
Given a list of inputs to the subtensor and its idx_list reorders
the inputs according to the idx list to get the right values
'''
# The subtensor (or idx_list) does not depend on the inputs.
if len(inputs) == 1:
return tuple(idx_list)
indices = list(reversed(list(inputs[1:])))
# General case
def convert(entry):
if isinstance(entry, gof.Type):
return indices.pop()
elif isinstance(entry, slice):
return slice(convert(entry.start),
convert(entry.stop),
convert(entry.step))
else:
return entry
cdata = tuple(map(convert, idx_list))
return cdata
def get_canonical_form_slice(theslice, length):
'''
Given a slice [start:stop:step] transform it into a canonical form
that respects the conventions imposed by python and numpy.
In a canonical form a slice is represented by a canonical form slice,
in which 0 <= start <= stop <= length and step > 0, and a flag which says
if the resulting set of numbers needs to be reversed or not.
'''
from theano.tensor import switch, lt, ge, sgn
if isinstance(theslice, slice):
def analyze(x):
try:
x_constant = get_scalar_constant_value(x)
is_constant = True
except theano.tensor.NotScalarConstantError:
x_constant = theano.tensor.extract_constant(x)
is_constant = False
return x_constant, is_constant
start, is_start_constant = analyze(theslice.start)
stop, is_stop_constant = analyze(theslice.stop)
step, is_step_constant = analyze(theslice.step)
length, is_length_constant = analyze(length)
if step is None:
step = 1
is_step_constant = True
# First handle the easier and common case where `step` is 1 and
# either `start` or `stop` is a range boundary. More specializations
# could be added later. This makes the resulting graph smaller than
# in the generic case below.
if step == 1:
is_start_0 = (
start in [None, 0] or
(is_start_constant and is_length_constant and
start < 0 and start + length <= 0))
is_stop_length = (
stop in [None, length, maxsize] or
(is_stop_constant and is_length_constant and
stop >= length))
if is_start_0:
# 0:stop:1
if is_stop_length:
# Full slice.
return slice(0, length, 1), 1
if is_stop_constant and stop >= 0:
return (slice(0, switch(lt(stop, length), stop, length),
1), 1)
stop_plus_len = stop + length
stop = switch(
lt(stop, 0),
# stop < 0
switch(
lt(stop_plus_len, 0),
# stop + len < 0
0,
# stop + len >= 0
stop_plus_len),
# stop >= 0: use min(stop, length)
switch(lt(stop, length), stop, length))
return slice(0, stop, 1), 1
elif is_stop_length:
# start:length:1
if is_start_constant and start >= 0:
return slice(switch(lt(start, length), start, length),
length, 1), 1
start_plus_len = start + length
start = switch(
lt(start, 0),
# start < 0
switch(
lt(start_plus_len, 0),
# start + len < 0
0,
# start + len >= 0
start_plus_len),
# start >= 0: use min(start, length)
switch(lt(start, length), start, length))
return slice(start, length, 1), 1
# This is the generic case.
if is_step_constant:
# When we know the sign of `step`, the graph can be made simpler.
assert step != 0
if step > 0:
def switch_neg_step(a, b):
return b
abs_step = step
sgn_step = 1
else:
def switch_neg_step(a, b):
return a
abs_step = -step
sgn_step = -1
else:
is_step_neg = lt(step, 0)
def switch_neg_step(a, b):
return switch(is_step_neg, a, b)
abs_step = abs(step)
sgn_step = sgn(step)
defstart = switch_neg_step(length - 1, 0)
defstop = switch_neg_step(-1, length)
if start is None:
start = defstart
else:
start = switch(lt(start, 0), start + length, start)
start = switch(lt(start, 0), switch_neg_step(-1, 0), start)
start = switch(ge(start, length),
switch_neg_step(length - 1, length),
start)
if stop in [None, maxsize]:
# The special "maxsize" case is probably not needed here,
# as slices containing maxsize are not generated by
# __getslice__ anymore.
stop = defstop
else:
stop = switch(lt(stop, 0), stop + length, stop)
stop = switch(lt(stop, 0), -1, stop)
stop = switch(ge(stop, length), length, stop)
nw_stop = switch_neg_step(start + 1, stop)
slice_len = (start - stop - 1) // abs_step + 1
slice_len = switch(lt(slice_len, 0), 0, slice_len)
neg_start = nw_stop - (slice_len - 1) * abs_step - 1
neg_start = switch(lt(neg_start, 0), (nw_stop - 1), neg_start)
nw_start = switch_neg_step(neg_start, start)
nw_start = switch(lt(nw_start, 0), 0, nw_start)
nw_stop = switch(lt(nw_stop, 0), 0, nw_stop)
# Ensure start <= stop.
nw_start = switch(lt(nw_start, nw_stop), nw_start, nw_stop)
nw_step = abs_step
if step != 1:
reverse = sgn_step
return slice(nw_start, nw_stop, nw_step), reverse
else:
return slice(nw_start, nw_stop, nw_step), 1
else:
value = theano.tensor.extract_constant(theslice)
value = switch(lt(value, 0), (value + length), value)
return value, 1
class Subtensor(Op):
"""Return a subtensor view
The inputs array is the tensor x, followed by scalar integer types.
TODO: WRITEME: how are the scalar integer variables formatted?
This class uses a relatively complex internal representation of the inputs
to remember how the input tensor x should be sliced.
idx_list: instance variable TODO: WRITEME: is this a list or a tuple?
(old docstring gives two conflicting
descriptions)
elements are either integers, theano scalar types, or slices.
one element per "explicitly named dimension"
TODO: WRITEME: what is an "explicitly named dimension" ?
if integer:
indexes into the inputs array
if slice:
start/stop/step members of each slice are integer indices
into the inputs array or None
integer indices be actual integers or theano scalar types
Note that the idx_list defines the Op, so two Subtensor instances are
considered to be different Ops if they have different idx_list fields.
This means that the entries in it are theano Types, not theano Variables.
@todo: add support for advanced tensor indexing (in Subtensor_dx too).
"""
e_invalid = ('The index list is longer (size %d) than the number of '
'dimensions of the tensor(namely %d). You are asking for '
'a dimension of the tensor that does not exist! You might '
'need to use dimshuffle to add extra dimension to your '
'tensor.')
e_subslice = 'nested slicing is not supported'
e_indextype = "Invalid index type or slice for Subtensor"
debug = 0
view_map = {0: [0]}
@staticmethod
def collapse(idxs, cond):
"""
idxs: a list of indices or slices.
cond: a callable that returns a bool
returns: idxs, with the slices flattened out into a list.
if cond is true for an entry, does not flatten it.
"""
ret = []
def helper(entry):
if cond(entry):
ret.append(entry)
elif isinstance(entry, slice):
helper(entry.start)
helper(entry.stop)
helper(entry.step)
for idx in idxs:
helper(idx)
return ret
@staticmethod
def convert(entry, slice_ok=True):
"""
The "idx_list" field is unique to each Subtensor instance.
It is not unique to each Apply node, so it should not refer to
specific Variables. This method changes references to Variables
into references to Types.
TODO: WRITEME: This method also accepts "entry" already being a Type;
when would that happen?
"""
invalid_scal_types = [scal.float64, scal.float32]
scal_types = [scal.int64, scal.int32, scal.int16, scal.int8]
tensor_types = [theano.tensor.lscalar, theano.tensor.iscalar,
theano.tensor.wscalar, theano.tensor.bscalar]
invalid_tensor_types = [theano.tensor.fscalar, theano.tensor.dscalar,
theano.tensor.cscalar, theano.tensor.zscalar]
if (isinstance(entry, gof.Variable)
and (entry.type in invalid_scal_types
or entry.type in invalid_tensor_types)):
raise TypeError("Expected an integer")
if isinstance(entry, gof.Variable) and entry.type in scal_types:
return entry.type
elif isinstance(entry, gof.Type) and entry in scal_types:
return entry
if (isinstance(entry, gof.Variable)
and entry.type in tensor_types
and numpy.all(entry.type.broadcastable)):
return scal.get_scalar_type(entry.type.dtype)
elif (isinstance(entry, gof.Type)
and entry in tensor_types
and numpy.all(entry.broadcastable)):
return scal.get_scalar_type(entry.dtype)
elif slice_ok and isinstance(entry, slice):
a = entry.start
b = entry.stop
c = entry.step
if a is not None:
slice_a = Subtensor.convert(a, False)
else:
slice_a = None
if b is not None and b != maxsize:
# The special "maxsize" case is probably not needed here,
# as slices containing maxsize are not generated by
# __getslice__ anymore.
slice_b = Subtensor.convert(b, False)
else:
slice_b = None
if c is not None:
slice_c = Subtensor.convert(c, False)
else:
slice_c = None
return slice(slice_a, slice_b, slice_c)
elif isinstance(entry, (int, long, numpy.integer)):
# Disallow the use of python scalars in idx_list
raise TypeError("Python scalar in idx_list."
"Please report this error to theano-dev.")
else:
raise AdvancedIndexingError(Subtensor.e_indextype, entry)
def get_constant_idx(self, inputs, allow_partial=False):
"""
Return the idx_list with constant inputs replaced by their
python scalar equivalent. May raise
`theano.tensor.NotScalarConstantError` if the idx contains
non-constant entries.
If allow_partial is True, then entries that are not constant
will stay as their input variable rather than raising an
exception.
None entries are always left as-is.
Example usage (where v, a are appropriately typed theano variables):
>>> b = a[v, 1:3]
>>> b.owner.op.idx_list
(Scalar(int64), slice(Scalar(int64), Scalar(int64), None))
>>> b.owner.op.get_constant_idx(b.owner.inputs, allow_partial=True)
[v, slice(1, 3, None)]
>>> b.owner.op.get_constant_idx(b.owner.inputs)
NotScalarConstantError: v
"""
real_idx = get_idx_list(inputs, self.idx_list)
def conv(val):
if val is None:
return None
elif isinstance(val, slice):
return slice(conv(val.start),
conv(val.stop),
conv(val.step))
else:
try:
return get_scalar_constant_value(val)
except theano.tensor.NotScalarConstantError:
if allow_partial:
return val
else:
raise
return map(conv, real_idx)
def __init__(self, idx_list):
self.idx_list = tuple(map(self.convert, idx_list))
@staticmethod
def my_as_scalar(a):
# Since scal.as_scalar does not know about tensor types (it would
# create a circular import) , this method converts either a
# TensorVariable or a ScalarVariable to a scalar.
if isinstance(a, gof.Variable) and isinstance(a.type, TensorType):
return theano.tensor.scalar_from_tensor(a)
else:
return scal.as_scalar(a)
def make_node(self, x, *inputs):
"""
x: the tensor to take a subtensor of
inputs: a list of theano Scalars
"""
x = theano.tensor.as_tensor_variable(x)
inputs = tuple(self.my_as_scalar(a) for a in inputs)
idx_list = list(self.idx_list)
if len(idx_list) > x.type.ndim:
exception = ValueError(Subtensor.e_invalid % (
len(idx_list), x.type.ndim))
exception.subtensor_invalid = True
raise exception
input_types = Subtensor.collapse(idx_list,
lambda entry: isinstance(entry, gof.Type))
if len(inputs) != len(input_types):
raise IndexError(
"Not enough inputs to fill in the Subtensor template.",
inputs, idx_list)
for input, expected_type in izip(inputs, input_types):
if input.type != expected_type:
raise TypeError(
"Wrong type for Subtensor template. Expected %s, got %s."
% (input.type, expected_type))
# infer the broadcasting pattern
padded = (self.get_constant_idx((None,)+inputs, allow_partial=True)
+ [slice(None, None, None)] * (x.type.ndim - len(idx_list)))
broadcastable = []
for i, (p, bc) in enumerate(izip(padded, x.type.broadcastable)):
if isinstance(p, slice):
if bc and p.start in [None, 0]:
start = p.start
if start is None:
start = 0
if (p.stop is None or
(isinstance(p.stop, (int, numpy.integer)) and
p.stop > start)):
broadcastable.append(True)
continue
broadcastable.append(False)
return gof.Apply(self,
(x, ) + inputs,
[theano.tensor.tensor(dtype=x.type.dtype,
broadcastable=broadcastable)])
def perform(self, node, inputs, out_):
out, = out_
x = inputs[0]
cdata = get_idx_list(inputs, self.idx_list)
if len(cdata) == 1:
cdata = cdata[0]
out[0] = numpy.asarray(x.__getitem__(cdata))
def infer_shape(self, node, shapes):
xshp = shapes[0]
assert len(xshp) == node.inputs[0].ndim
outshp = []
actual_idx_list = list(get_idx_list(node.inputs, self.idx_list))
padded = (actual_idx_list +
[slice(None, None, None)] * (len(xshp) - len(self.idx_list)))
i = 0
for idx, xl in izip(padded, xshp):
if isinstance(idx, slice):
# If it is the default (None, None, None) slice, or a variant,
# the shape will be xl
if ((idx.start in [None, 0])
and (idx.stop in [None, maxsize])
and (idx.step is None or idx.step == 1)):
outshp.append(xl)
else:
cnf = get_canonical_form_slice(idx, xl)[0]
if cnf.step == 1:
length = cnf.stop - cnf.start
else:
length = (cnf.stop - cnf.start - 1) // cnf.step + 1
outshp.append(length)
i += 1
else:
# That dimension is dropped
pass
assert i == node.outputs[0].ndim
assert len(outshp) == node.outputs[0].ndim
return [outshp]
def grad(self, inputs, grads):
gz, = grads
x = inputs[0]
rest = inputs[1:]
output = self(*inputs)
if output.dtype.find('int') != -1:
first = x.zeros_like().astype(theano.config.floatX)
else:
first = IncSubtensor(self.idx_list)(x.zeros_like(), gz, *rest)
return ([first]
+ [DisconnectedType()()] * len(rest))
def connection_pattern(self, node):
rval = [[True]]
for ipt in node.inputs[1:]:
rval.append([False])
return rval
def __eq__(self, other):
return type(self) == type(other) and self.idx_list == other.idx_list
def __hash__(self):
# TODO: optimize by cache this hash value
msg = []
for entry in self.idx_list:
if isinstance(entry, slice):
msg += [(entry.start, entry.stop, entry.step)]
else:
msg += [entry]
idx_list = tuple(msg)
# backport
# idx_list = tuple((entry.start, entry.stop, entry.step)
# if isinstance(entry, slice)
# else entry
# for entry in self.idx_list)
return hash(idx_list)
@staticmethod
def str_from_slice(entry):
msg = []
for x in [entry.start, entry.stop, entry.step]:
if x is None:
msg.append("")
else:
msg.append(str(x))
return ":".join(msg)
def __str__(self):
indices = []
for entry in self.idx_list:
if isinstance(entry, slice):
indices.append(self.str_from_slice(entry))
else:
indices.append(str(entry))
return "%s{%s}" % (self.__class__.__name__, ", ".join(indices))
@staticmethod
def default_helper_c_code_args():
"""
Returns a dictionary of default arguments to
helper_c_code
"""
return {
"c_prefix": "PyArray",
"strides_mul": 1,
}
@staticmethod
def helper_c_code(node, name, inputs, outputs, sub, idx_list, view_ndim,
c_prefix=None,
strides_mul=None,
):
"""
The parameters c_prefix are there to allow reusing this
function on PyArray and CudaNdarray object.
This fct take as input the x,
"""
default_args = Subtensor.default_helper_c_code_args()
if strides_mul is None:
strides_mul = default_args['strides_mul']
if c_prefix is None:
c_prefix = default_args['c_prefix']
#
# two arrays are created in C code:
# is_slice: len == ndim, 0 means int, 1 means slice
# subtensor_spec: len = n_ints + 3 * n_slices
#
fail = sub['fail']
init_cmds = [] # initialization for subtensor_spec
is_slice = []
# TODO: change that, it might lead to unexpected results,
# see assembla-#767
NONE_CODE = maxsize - 1
pos = [0, 1] # annoying version of global variable for init_entry
def inc_spec_pos(amt):
pos[0] += amt
def inc_input_pos(amt):
pos[1] += amt
def spec_pos():
return pos[0]
def input_pos():
return pos[1]
def init_entry(entry, depth=0):
if isinstance(entry, (numpy.integer, int)):
init_cmds.append(
"subtensor_spec[%i] = %i;" % (spec_pos(),
entry))
inc_spec_pos(1)
if depth == 0:
is_slice.append(0)
elif isinstance(entry, Type):
init_cmds.append(
"subtensor_spec[%i] = %s;" % (spec_pos(),
inputs[input_pos()]))
inc_spec_pos(1)
inc_input_pos(1)
if depth == 0:
is_slice.append(0)
elif entry is None:
init_cmds.append(
"subtensor_spec[%i] = %i;" % (spec_pos(),
NONE_CODE))
inc_spec_pos(1)
if depth == 0:
is_slice.append(0)
elif depth == 0 and isinstance(entry, slice):
init_entry(entry.start, depth + 1)
init_entry(entry.stop, depth + 1)
init_entry(entry.step, depth + 1)
is_slice.append(1)
else:
assert 0, entry
for entry in idx_list:
init_entry(entry)
# make sure we used all inputs
assert input_pos() == len(inputs), input_pos()
assert len(is_slice) <= node.inputs[0].ndim, node.inputs[0].ndim
len_is_slice = len(is_slice)
len_subtensor_spec = spec_pos()
subensor_spec = "npy_intp subtensor_spec[%(len_subtensor_spec)s];" % locals()
if len_subtensor_spec == 0:
subensor_spec = "npy_intp * subtensor_spec = NULL;"
if is_slice:
is_slice_init = "int is_slice[] = {" + ",".join([str(s) for s in is_slice]) + "};"
else:
is_slice_init = "int* is_slice = NULL;"
subtensor_init = "\n".join(init_cmds)
x, = inputs[:1]
z, = outputs
if view_ndim:
rval = """
// Argument of the view
npy_intp xview_dims[%(view_ndim)s];
npy_intp xview_strides[%(view_ndim)s];
"""% locals()
else:
rval = """
// Argument of the view
npy_intp* xview_dims = NULL;
npy_intp* xview_strides = NULL;
"""
rval += """
// One more argument of the view
npy_intp xview_offset = 0;
// The subtensor is created by iterating over the dimensions
// and updating stride, shape, and data pointers
%(is_slice_init)s
%(subensor_spec)s
%(subtensor_init)s;
int spec_pos = 0; //position in subtensor_spec
int inner_ii = 0; // the current dimension of zview
int outer_ii = 0; // current dimension of z
for (; outer_ii < %(len_is_slice)s; ++outer_ii)
{
if (is_slice[outer_ii])
{
npy_intp length = %(c_prefix)s_DIMS(%(x)s)[outer_ii];
npy_intp slicelength;
npy_intp start = subtensor_spec[spec_pos+0];
npy_intp stop = subtensor_spec[spec_pos+1];
npy_intp step = subtensor_spec[spec_pos+2];
if (step == %(NONE_CODE)s) step = 1;
npy_intp defstart = step < 0 ? length-1 : 0;
npy_intp defstop = step < 0 ? -1 : length;
// logic adapted from
// PySlice_GetIndicesEx in python source
if (!step)
{
PyErr_Format(PyExc_ValueError,
"slice step cannot be zero");
%(fail)s;
}
if (start == %(NONE_CODE)s)
{
start = defstart;
}
else
{
if (start < 0) start += length;
if (start < 0) start = (step < 0) ? -1 : 0;
if (start >= length)
start = (step < 0) ? length - 1 : length;
}
if (stop == %(NONE_CODE)s)
{
stop = defstop;
}
else
{
if (stop < 0) stop += length;
if (stop < 0) stop = (step < 0) ? -1 : 0;
if (stop >= length)
stop = (step < 0) ? length - 1 : length;
}
if ((step < 0 && stop >= start)
|| (step > 0 && start >= stop)) {
slicelength = 0;
}
else if (step < 0) {
slicelength = (stop-start+1)/step+1;
}
else {
slicelength = (stop-start-1)/step+1;
}
if (0){
fprintf(stdout, "start %%zi\\n", start);
fprintf(stdout, "stop %%zi\\n", stop);
fprintf(stdout, "step %%zi\\n", step);
fprintf(stdout, "length %%zi\\n", length);
fprintf(stdout, "slicelength %%zi\\n", slicelength);
}
assert (slicelength <= length);
xview_offset += %(c_prefix)s_STRIDES(%(x)s)[outer_ii] * start *
%(strides_mul)s;
xview_dims[inner_ii] = slicelength;
xview_strides[inner_ii] = %(c_prefix)s_STRIDES(%(x)s)[outer_ii] * step;
inner_ii += 1;
spec_pos += 3;
}
else // tuple coord `outer_ii` is an int
{
int idx = subtensor_spec[spec_pos];
if (idx < 0) idx += %(c_prefix)s_DIMS(%(x)s)[outer_ii];
if (idx >= 0)
{
if (idx < %(c_prefix)s_DIMS(%(x)s)[outer_ii])
{
xview_offset += %(c_prefix)s_STRIDES(%(x)s)[outer_ii] * idx *
%(strides_mul)s;
}
else
{
PyErr_Format(PyExc_IndexError,"index out of bounds");
%(fail)s;
}
}
else
{
PyErr_Format(PyExc_IndexError,"index out of bounds");
%(fail)s;
}
spec_pos += 1;
}
}
assert (inner_ii <= %(view_ndim)s);
while (inner_ii < %(view_ndim)s)
{
assert (outer_ii < %(c_prefix)s_NDIM(%(x)s));
xview_dims[inner_ii] = %(c_prefix)s_DIMS(%(x)s)[outer_ii];
xview_strides[inner_ii] = %(c_prefix)s_STRIDES(%(x)s)[outer_ii];
inner_ii += 1;
outer_ii += 1;
}
""" % locals()
# print rval
return rval
@staticmethod
def helper_c_code_cache_version():
return (8,)
def c_code(self, node, name, inputs, outputs, sub): # DEBUG
if not isinstance(node.inputs[0].type, theano.tensor.TensorType):
raise NotImplementedError()
x = inputs[0]
z, = outputs
view_ndim = node.outputs[0].ndim
fail = sub['fail']
decl = "PyArrayObject * xview = NULL;"
get_xview = self.helper_c_code(node, name, inputs, outputs, sub,
self.idx_list, view_ndim)
build_view = """
//TODO: give this Op a second output so that this view can be cached
//TODO: alternatively, fix the memory leak on failure
Py_INCREF(PyArray_DESCR(%(x)s));
xview = (PyArrayObject*)PyArray_NewFromDescr(
&PyArray_Type,
PyArray_DESCR(%(x)s),
%(view_ndim)s,
xview_dims,
xview_strides,
PyArray_BYTES(%(x)s) + xview_offset,
PyArray_FLAGS(%(x)s),
NULL);
assert (PyArray_NDIM(xview) == %(view_ndim)s);
if (!xview)
{
%(fail)s;
}
""" % locals()
finish_view = """
//This is needed for NumPy 1.5, but not 1.7.2
PyArray_UpdateFlags(xview, NPY_ARRAY_C_CONTIGUOUS| NPY_ARRAY_F_CONTIGUOUS);
Py_XDECREF(%(z)s);
Py_INCREF(py_%(x)s);
#if NPY_API_VERSION < 0x00000007
PyArray_BASE(xview) = py_%(x)s;
#else
PyArray_SetBaseObject(xview, py_%(x)s);
#endif
assert(py_%(x)s == (PyObject*)%(x)s);
%(z)s = xview;
""" % locals()
return decl + get_xview + build_view + finish_view
def c_code_cache_version(self):
hv = self.helper_c_code_cache_version()
# If `helper_c_code_cache_version` is not versioned we do not want to
# have a versioned version of this op's C code.
if len(hv) == 0:
return ()
return (2, hv)
def R_op(self, inputs, eval_points):
# Subtensor is not differentiable wrt to its indices, therefore we
# do not even need to consider the eval_points provided for those
# (they should be defaulted to zeros_like by the global R_op)
if eval_points[0] is None:
return [None]
return self(eval_points[0], *inputs[1:], **dict(return_list=True))
class SubtensorPrinter:
def process(self, r, pstate):
if r.owner is None:
raise TypeError("Can only print Subtensor.")
elif isinstance(r.owner.op, Subtensor):
idxs = r.owner.op.idx_list
inputs = list(r.owner.inputs)
input = inputs.pop()
sidxs = []
inbrack_pstate = pstate.clone(precedence=-1000)
for entry in idxs:
if isinstance(entry, int):
sidxs.append(str(entry))
elif isinstance(entry, scal.Scalar):
sidxs.append(inbrack_pstate.pprinter.process(inputs.pop()))
elif isinstance(entry, slice):
if entry.start is None or entry.start == 0:
msg1 = ""
else:
msg1 = entry.start
if entry.stop is None or entry.stop == maxsize:
msg2 = ""
else:
msg2 = entry.stop
if entry.step is None:
msg3 = ""
else:
msg3 = ":%s" % entry.step
sidxs.append("%s:%s%s" % (msg1, msg2, msg3))
return "%s[%s]" % (pstate.pprinter.process(
input,
pstate.clone(precedence=1000)),
", ".join(sidxs))
else:
raise TypeError("Can only print Subtensor.")
pprint.assign(lambda pstate, r: r.owner and isinstance(r.owner.op, Subtensor),
SubtensorPrinter())
def set_subtensor(x, y, inplace=False,
tolerate_inplace_aliasing=False):
"""Return x with the given subtensor overwritten by y.
Example: To replicate the numpy expression "r[10:] = 5", type
>>> new_r = set_subtensor(r[10:], 5)
:param x: symbolic variable for the lvalue of = operation
:param y: symbolic variable for the rvalue of = operation
:param tolerate_inplace_aliasing: see inc_subtensor for documentation.
"""
return inc_subtensor(x, y, inplace, set_instead_of_inc=True,
tolerate_inplace_aliasing=tolerate_inplace_aliasing)
def inc_subtensor(x, y, inplace=False, set_instead_of_inc=False,
tolerate_inplace_aliasing=False):
"""Return x with the given subtensor incremented by y.
:param x: the symbolic result of a Subtensor operation.
:param y: the amount by which to increment ths subtensor in question
:param tolerate_inplace_aliasing: allow x and y to be views of a single
underlying array even while working inplace. For correct results,
x and y must not be overlapping views; if they overlap, the result
of this Op will generally be incorrect. This value has no effect if
inplace=False.
Example: To replicate the numpy expression "r[10:] += 5", type
>>> new_r = inc_subtensor(r[10:], 5)
"""
# First of all, y cannot have a higher dimension than x,
# nor have non-broadcastable dimensions where x is broadcastable.
x = theano.tensor.as_tensor_variable(x)
y = theano.tensor.as_tensor_variable(y)
if y.ndim > x.ndim:
raise TypeError(("Trying to increment a %d-dimensional "
"subtensor with a %d-dimensional value.") % (x.ndim, y.ndim))
for dim in range(y.ndim):
dim_offset = x.ndim - y.ndim
if (x.broadcastable[dim + dim_offset]
and not y.broadcastable[dim]):
# It is acceptable to try to increment a subtensor with a
# broadcastable dim with a tensor that is not broadcastable
# on that dimension. However, its length must then be 1.
# We insert a Rebroadcast Op to make sure it is the case.
y = addbroadcast(y, dim)
if not x.owner:
raise TypeError('x must be the result of a subtensor operation')
# retrieve idx_list from x.owner
if isinstance(x.owner.op, Subtensor):
if tolerate_inplace_aliasing:
destroyhandler_tolerate_aliased = [[0, 1]]
else:
destroyhandler_tolerate_aliased = []
the_op = IncSubtensor(x.owner.op.idx_list, inplace, set_instead_of_inc,
destroyhandler_tolerate_aliased=destroyhandler_tolerate_aliased
)
real_x = x.owner.inputs[0]
real_idxargs = x.owner.inputs[1:]
return the_op(real_x, y, *real_idxargs)
elif isinstance(x.owner.op, AdvancedSubtensor1):
real_x = x.owner.inputs[0]
ilist = x.owner.inputs[1]
the_op = AdvancedIncSubtensor1(inplace,
set_instead_of_inc=set_instead_of_inc)
return the_op(real_x, y, ilist)
elif isinstance(x.owner.op, AdvancedSubtensor):
real_x = x.owner.inputs[0]
ilist = x.owner.inputs[1:]
the_op = AdvancedIncSubtensor(inplace,
set_instead_of_inc=set_instead_of_inc)
return the_op(real_x, y, *ilist)
elif isinstance(x.owner.op, DimShuffle):
inner_x = x.owner.inputs[0]
# In the dimshuffle case, there are in fact two dimshuffles:
# one to make the indexed dimension the last one,
# and one to put it back where it was. So, in the case where we have
# inc_subtensor(x[:,i], y), the graph is actually
# inc_subtensor((x.T)[i].T, y).
# We could get all the way to x, and then get rid of the dimshuffles
# completely, but the problem is that advanced_inc_subtensor1 can only
# work on the first (outer-most, left-most) dimension of x,
# just like advanced_subtensor1.
# So we call advanced_inc_subtensor1(x.T, i, y), but then we need to
# return something that has the same shape as x, not as x.T (inner_x).
# So re-apply the outer dimshuffle on the new inc_subtensor,
# and return advanced_inc_subtensor1(x.T, i, y).T.
inner_incsubtensor = inc_subtensor(inner_x, y,
inplace=inplace,
set_instead_of_inc=set_instead_of_inc,
tolerate_inplace_aliasing=tolerate_inplace_aliasing)
return x.owner.op(inner_incsubtensor, *x.owner.inputs[1:])
elif isinstance(x.owner.op, theano.tensor.Reshape):
inner_x = x.owner.inputs[0]
# Try to apply inc_subtensor on inner_x.
# If it works, there is no need to reshape, as the inc_subtensor
# will have the same shape as inner_x, which is what we want.
inner_incsubtensor = inc_subtensor(inner_x, y.flatten(),
inplace=inplace,
set_instead_of_inc=set_instead_of_inc,
tolerate_inplace_aliasing=tolerate_inplace_aliasing)
return inner_incsubtensor
else:
raise TypeError('x must be the result of a subtensor operation')
class IncSubtensor(Op):
"""Increment a subtensor.
This is like numpy's
x[i,j,k] += y
It is used internally to implement the gradient on SubTensor.
:param set_instead_of_inc: if True set the subtensor to the value instead
of incrementing it by that value.
"""
def __init__(self, idx_list, inplace=False, set_instead_of_inc=False,
destroyhandler_tolerate_aliased=None):
if destroyhandler_tolerate_aliased is None:
destroyhandler_tolerate_aliased = []
self.idx_list = map(Subtensor.convert, idx_list)
self.inplace = inplace
if inplace:
self.destroy_map = {0: [0]}
self.destroyhandler_tolerate_aliased = list(
destroyhandler_tolerate_aliased)
self.set_instead_of_inc = set_instead_of_inc
def __eq__(self, other):
return type(self) == type(other) \
and self.idx_list == other.idx_list \
and self.inplace == other.inplace \
and self.set_instead_of_inc == other.set_instead_of_inc
def __hash__(self):
msg = []
for entry in self.idx_list:
if isinstance(entry, slice):
msg += [(entry.start, entry.stop, entry.step)]
else:
msg += [entry]
idx_list = tuple(msg)
# backport
#idx_list = tuple((entry.start, entry.stop, entry.step)
# if isinstance(entry, slice)
# else entry
# for entry in self.idx_list)
return hashtype(self) ^ hash(idx_list) ^ hash(self.inplace) \
^ hash(self.set_instead_of_inc)
def __str__(self):
indices = []
for entry in self.idx_list:
if isinstance(entry, slice):
indices.append(Subtensor.str_from_slice(entry))
else:
indices.append(str(entry))
if self.inplace:
msg = 'Inplace'
else:
msg = ''
if not self.set_instead_of_inc:
msg += 'Inc'
else:
msg += 'Set'
return "%s{%s;%s}" % (
self.__class__.__name__,
msg,
", ".join(indices))
def make_node(self, x, y, *inputs):
"""
x: the tensor to increment
y: the value to increment by
inputs: TODO WRITEME
"""
x, y = map(theano.tensor.as_tensor_variable, [x, y])
if y.ndim > x.ndim:
raise ValueError(("Trying to increment a %d-dimensional "
"subtensor with a %d-dimensional value.") % (x.ndim,
y.ndim))
inputs = tuple(map(Subtensor.my_as_scalar, inputs))
idx_list = list(self.idx_list)
if len(idx_list) > x.type.ndim:
exception = ValueError(
Subtensor.e_invalid % (
len(idx_list),
x.type.ndim))
exception.subtensor_invalid = True
raise exception
input_types = Subtensor.collapse(idx_list,
lambda entry: isinstance(entry, gof.Type))
if len(inputs) != len(input_types):
raise IndexError(
"Not enough inputs to fill in the Subtensor template.",
inputs, idx_list)
for input, expected_type in izip(inputs, input_types):
if input.type != expected_type:
raise TypeError(
"Wrong type for Subtensor template. Expected %s, got %s."
% (input.type, expected_type))
return gof.Apply(self,
(x, y) + inputs,
[x.type()])
def decl_view(self):
return "PyArrayObject * zview = NULL;"
def perform(self, node, inputs, out_):
out, = out_
x, y = inputs[:2]
indices = list(reversed(inputs[2:]))
def convert(entry):
if isinstance(entry, gof.Type):
rval = indices.pop()
if sys.version_info < (2, 5):
# Before Python 2.5, PySlice_GetIndicesEx requires
# Python int to be passed.
rval_ = int(rval)
if rval_ != rval:
raise IndexError((
"Invalid value for indexing: %s. "
"That value may be too big.") % rval)
return rval_
return rval
elif isinstance(entry, slice):
return slice(convert(entry.start),
convert(entry.stop),
convert(entry.step))
else:
return entry
cdata = tuple(map(convert, self.idx_list))
if len(cdata) == 1:
cdata = cdata[0]
if not self.inplace:
x = x.copy()
sub_x = x.__getitem__(cdata)
if sub_x.shape:
# we've sliced out an N-D tensor with N > 0
if not self.set_instead_of_inc:
sub_x += y
else:
#sub_x += -sub_x + y
x.__setitem__(cdata, y)
else:
# scalar case
if not self.set_instead_of_inc:
x.__setitem__(cdata, sub_x + y)
else:
x.__setitem__(cdata, y)
out[0] = x
def c_code(self, node, name, inputs, outputs, sub):
# This method delegates much of the work to helper
# methods. This method implements the main logic
# but subclasses may override the helper methods
# to change the particulars, e.g. GpuIncSubtensor
# turns the view/copy operations on numpy arrays
# into the same operations on cuda arrays.
self.do_type_checking(node)
if self.inplace: # convert bool to int
inplace = 1
else:
inplace = 0
x = inputs[0]
y = inputs[1]
z, = outputs
if self.set_instead_of_inc: # convert bool to int
op_is_set = 1
else:
op_is_set = 0
fail = sub['fail']
view_ndim = (node.inputs[0].ndim -
numpy.sum([not isinstance(idx, slice)
for idx in self.idx_list]))
copy_of_x = self.copy_of_x(x)
copy_input_if_necessary = """
if (%(inplace)s)
{
if (%(x)s != %(z)s)
{
Py_XDECREF(%(z)s);
Py_INCREF(%(x)s);
%(z)s = %(x)s;
}
}
else
{
Py_XDECREF(%(z)s);
%(z)s = %(copy_of_x)s;
}
""" % locals()
# get info needed to make zview: a view of %(z)s
helper_args = self.get_helper_c_code_args()
get_zview = Subtensor.helper_c_code(
node=node,
name=name,
inputs=outputs[:1] + inputs[2:],
outputs=outputs,
sub=sub,
idx_list=self.idx_list,
view_ndim=view_ndim,
** helper_args
)
#Make a view on the output, as we will write into it.
alloc_zview = self.make_view_array(z, view_ndim)
build_view = """
//TODO: give this Op a second output so that this view can be cached
//TODO: alternatively, fix the memory leak on failure
%(alloc_zview)s;
if (!zview)
{
%(fail)s;
}
""" % locals()
copy_into = self.copy_into("zview", y)
add_to_zview = self.add_to_zview(name, y, fail)
make_modification = """
if (%(op_is_set)s)
{
if (%(copy_into)s) // does broadcasting
{
Py_DECREF(zview);
%(fail)s;
}
}
else
{
%(add_to_zview)s
}
""" % locals()
return (self.decl_view() +
copy_input_if_necessary +
get_zview +
build_view +
make_modification +
"Py_DECREF(zview);"
)
def do_type_checking(self, node):
""" Should raise NotImplementedError if c_code does not support
the types involved in this node.
"""
if not isinstance(node.inputs[0].type, theano.tensor.TensorType):
raise NotImplementedError()
def c_code_cache_version(self):
hv = Subtensor.helper_c_code_cache_version()
if hv:
return (1, hv)
else:
return ()
def copy_of_x(self, x):
"""
:param x: a string giving the name of a C variable
pointing to an array
:return: C code expression to make a copy of x
Base class uses PyArrayObject *, subclasses may override for
different types of arrays.
"""
# Parameters of PyArrary_FromAny are:
# array
# dtype: we pass NULL to say any dtype is acceptable, so the existing
# dtype will be copied
# min_depth: we pass 0 to have this parameter ignored
# max_depth: we pass 0 to have this parameter ignored
# requirements: here we pass NPY_ARRAY_ENSURECOPY to force a copy
# context: this is almost always NULL, I'm not sure what it's used for
return """(PyArrayObject*)PyArray_FromAny(py_%(x)s, NULL, 0, 0,
NPY_ARRAY_ENSURECOPY, NULL)""" % locals()
def make_view_array(self, x, view_ndim):
"""
:param x: a string identifying an array to be viewed
:param view_ndim: a string specifying the number of dimensions
to have in the view
This doesn't need to actually set up the view with the
right indexing; we'll do that manually later.
"""
return """Py_INCREF(PyArray_DESCR(%(x)s));
zview = (PyArrayObject*)PyArray_NewFromDescr(
&PyArray_Type,
PyArray_DESCR(%(x)s),
%(view_ndim)s,
xview_dims, //PyArray_DIMS(%(x)s),
xview_strides, //PyArray_STRIDES(%(x)s),
PyArray_BYTES(%(x)s) + xview_offset, //PyArray_DATA(%(x)s),
PyArray_FLAGS(%(x)s),
NULL);
//This is needed for NumPy 1.5, but not 1.7.2
PyArray_UpdateFlags(zview, NPY_ARRAY_C_CONTIGUOUS| NPY_ARRAY_F_CONTIGUOUS);
""" % locals()
def get_helper_c_code_args(self):
""" Return a dictionary of arguments to pass to helper_c_code."""
return Subtensor.default_helper_c_code_args()
def copy_into(self, view, source):
"""
view: string, C code expression for an array
source: string, C code expression for an array
returns a C code expression to copy source into view, and
return 0 on success
"""
return """PyArray_CopyInto(%(view)s, %(source)s)""" % locals()
def add_to_zview(self, name, x, fail):
""" Return C code to add x to zview. Should DECREF zview if the
add fails."""
return """
PyArrayObject * add_rval = (PyArrayObject*)PyNumber_InPlaceAdd(
(PyObject*)zview, py_%(x)s);
if (add_rval)
{
assert (PyArray_Check((PyObject*)add_rval));
assert (PyArray_DATA(add_rval) == PyArray_DATA(zview));
Py_DECREF(add_rval);
}
else
{
Py_DECREF(zview);
%(fail)s;
}""" % locals()
def infer_shape(self, node, shapes):
return [shapes[0]]
def R_op(self, inputs, eval_points):
if eval_points[0] is None or eval_points[1] is None:
return [None]
# Again we ignore eval points for indices because incsubtensor is
# not differentiable wrt to those
return self(eval_points[0], eval_points[1], *inputs[2:],
**dict(return_list=True))
def connection_pattern(self, node):
rval = [[True], [True]]
for ipt in node.inputs[2:]:
rval.append([False])
return rval
def grad(self, inputs, grads):
g_output, = grads
x, y = inputs[:2]
idx_list = inputs[2:]
if self.set_instead_of_inc:
gx = set_subtensor(
Subtensor(idx_list=self.idx_list)(g_output, *idx_list),
theano.tensor.zeros_like(y))
else:
gx = g_output
gy = Subtensor(idx_list=self.idx_list)(g_output, *idx_list)
return [gx, gy] + [DisconnectedType()()] * len(idx_list)
#########################
# Advanced indexing
#########################
#
# Should reproduce numpy's behaviour, see url:
# docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
class AdvancedSubtensor1(Op):
"""Implement x[ilist] where ilist is a vector of integers."""
def __init__(self, sparse_grad=False):
self.sparse_grad = sparse_grad
def __hash__(self):
return hash(type(self))
def __eq__(self, other):
# Don't check the sparse_grad attribute as
# This don't change the output of this op
# So we want the merge optimier to merge two op
# that differ from there sparse_grad attribute.
return type(self) == type(other)
def __str__(self):
return self.__class__.__name__
def make_node(self, x, ilist):
x_ = theano.tensor.as_tensor_variable(x)
ilist_ = theano.tensor.as_tensor_variable(ilist)
if ilist_.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('index must be integers')
if ilist_.type.ndim != 1:
raise TypeError('index must be vector')
if x_.type.ndim == 0:
raise TypeError('cannot index into a scalar')
return Apply(self, [x_, ilist_], [x_.type()])
def perform(self, node, inp, out_):
x, i = inp
out, = out_
# Copy always implied by numpy advanced indexing semantic.
if out[0] is not None and out[0].shape == (len(i),) + x.shape[1:]:
o = out[0]
else:
o = None
# If i.dtype is more precise than numpy.intp (int32 on 32-bit machines,
# int64 on 64-bit machines), numpy may raise the following error:
# TypeError: array cannot be safely cast to required type.
# We need to check if values in i can fit in numpy.intp, because
# if they don't, that should be an error (no array can have that
# many elements on a 32-bit arch).
if i.dtype != numpy.intp:
i_ = theano._asarray(i, dtype=numpy.intp)
if not numpy.can_cast(i.dtype, numpy.intp):
# Check if there was actually an incorrect conversion
if numpy.any(i != i_):
raise IndexError('index contains values that are bigger '
'than the maximum array size on this system.', i)
i = i_
out[0] = x.take(i, axis=0, out=o)
def connection_pattern(self, node):
rval = [[True]]
for ipt in node.inputs[1:]:
rval.append([False])
return rval
def grad(self, inputs, grads):
global sparse_module_ref
x, ilist = inputs
gz, = grads
assert len(inputs) == 2
sparse = False
if getattr(x.type, 'sparse_grad', False):
sparse = True
warnings.warn(
"DEPRECATION WARNING: AdvancedSubtensor1, you are using"
" an old interface to the sparse grad. You should use"
" theano.sparse_grad(a_tensor[an_int_vector]). ")
if sparse or self.sparse_grad:
if x.type.ndim != 2:
raise TypeError(
"AdvancedSubtensor1: you can't take the sparse grad"
" from a tensor with ndim != 2. ndim is " +
str(x.type.ndim))
if sparse_module_ref is None:
import theano.sparse as sparse_module_ref
rval1 = [sparse_module_ref.construct_sparse_from_list(x, gz,
ilist)]
else:
rval1 = [advanced_inc_subtensor1(x.zeros_like(), gz, ilist)]
return rval1 + [DisconnectedType()()] * (len(inputs) - 1)
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self.make_node(eval_points[0], *inputs[1:]).outputs
def infer_shape(self, node, ishapes):
x, ilist = ishapes
return [ilist + x[1:]]
def c_support_code(self):
# In some versions of numpy, NPY_MIN_INTP is defined as MIN_LONG,
# which is not defined. It should be NPY_MIN_LONG instead in that case.
return dedent("""\
#ifndef MIN_LONG
#define MIN_LONG NPY_MIN_LONG
#endif""")
def c_code(self, node, name, input_names, output_names, sub):
if self.__class__ is not AdvancedSubtensor1:
raise MethodNotDefined(
"c_code defined for AdvancedSubtensor1,"
" not for child class", type(self))
a_name, i_name = input_names[0], input_names[1]
output_name = output_names[0]
fail = sub['fail']
return """
PyArrayObject *indices;
int i_type = PyArray_TYPE(%(i_name)s);
if (i_type != NPY_INTP) {
// Cast %(i_name)s to NPY_INTP (expected by PyArray_TakeFrom),
// if all values fit.
if (!PyArray_CanCastSafely(i_type, NPY_INTP)) {
npy_int64 min_val, max_val;
PyObject* py_min_val = PyArray_Min(%(i_name)s, NPY_MAXDIMS,
NULL);
if (py_min_val == NULL) {
%(fail)s;
}
min_val = PyLong_AsLongLong(py_min_val);
Py_DECREF(py_min_val);
if (min_val == -1 && PyErr_Occurred()) {
%(fail)s;
}
PyObject* py_max_val = PyArray_Max(%(i_name)s, NPY_MAXDIMS,
NULL);
if (py_max_val == NULL) {
%(fail)s;
}
max_val = PyLong_AsLongLong(py_max_val);
Py_DECREF(py_max_val);
if (max_val == -1 && PyErr_Occurred()) {
%(fail)s;
}
if (min_val < NPY_MIN_INTP || max_val > NPY_MAX_INTP) {
PyErr_SetString(PyExc_IndexError,
"Index contains values "
"that are bigger than the maximum array "
"size on this system.");
%(fail)s;
}
}
indices = (PyArrayObject*) PyArray_Cast(%(i_name)s, NPY_INTP);
if (indices == NULL) {
%(fail)s;
}
}
else {
indices = %(i_name)s;
Py_INCREF(indices);
}
if (%(output_name)s != NULL) {
npy_intp nd, i, *shape;
nd = PyArray_NDIM(%(a_name)s) + PyArray_NDIM(indices) - 1;
if (PyArray_NDIM(%(output_name)s) != nd) {
Py_CLEAR(%(output_name)s);
}
else {
shape = PyArray_DIMS(%(output_name)s);
for (i = 0; i < PyArray_NDIM(indices); i++) {
if (shape[i] != PyArray_DIMS(indices)[i]) {
Py_CLEAR(%(output_name)s);
break;
}
}
if (%(output_name)s != NULL) {
for (; i < nd; i++) {
if (shape[i] != PyArray_DIMS(%(a_name)s)[
i-PyArray_NDIM(indices)+1]) {
Py_CLEAR(%(output_name)s);
break;
}
}
}
}
}
%(output_name)s = (PyArrayObject*)PyArray_TakeFrom(
%(a_name)s, (PyObject*)indices, 0, %(output_name)s, NPY_RAISE);
Py_DECREF(indices);
if (%(output_name)s == NULL) %(fail)s;
""" % locals()
def c_code_cache_version(self):
return (0, 1, 1)
advanced_subtensor1 = AdvancedSubtensor1()
class AdvancedIncSubtensor1(Op):
"""Increments a subtensor using advanced slicing (list of index)"""
def __init__(self, inplace=False, set_instead_of_inc=False):
self.inplace = inplace
self.set_instead_of_inc = set_instead_of_inc
if inplace:
self.destroy_map = {0: [0]}
def __hash__(self):
return hash((type(self), self.inplace, self.set_instead_of_inc))
def __eq__(self, other):
return (type(self) == type(other)
and self.inplace == other.inplace
and self.set_instead_of_inc == other.set_instead_of_inc)
def __str__(self):
if self.inplace:
msg = "inplace"
else:
msg = "no_inplace"
if self.set_instead_of_inc:
msg += ",set"
else:
msg += ",inc"
return self.__class__.__name__ + "{%s}" % msg
def make_node(self, x, y, ilist):
x_ = theano.tensor.as_tensor_variable(x)
y_ = theano.tensor.as_tensor_variable(y)
ilist_ = theano.tensor.as_tensor_variable(ilist)
if ilist_.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('index must be integers')
if ilist_.type.ndim != 1:
raise TypeError('index must be vector')
if x_.type.ndim == 0:
raise TypeError('cannot index into a scalar')
if y_.type.ndim > x_.type.ndim:
if self.set_instead_of_inc:
opname = 'set'
else:
opname = 'increment'
raise TypeError('cannot %s x subtensor with ndim=%s'
' by y with ndim=%s to x subtensor with ndim=%s ' % (
opname, x_.type.ndim, y_.type.ndim))
return Apply(self, [x_, y_, ilist_], [x_.type()])
def perform(self, node, inp, out_):
# TODO opt to make this inplace
x, y, idx = inp
out, = out_
if not self.inplace:
x = x.copy()
# In Numpy, x[idx] += y doesn't work if the same index is present
# many times: it does it only once. Is it a bug? In any case, for
# this reason we implement our own 'inc' iteration.
if self.set_instead_of_inc:
x[idx] = y
else:
increment = inplace_increment
if increment is None:
increment = self.inplace_increment1d_slow
increment(x, idx, y)
out[0] = x
def inplace_increment1d_slow(self, x, idx, y):
# If `y` has as many dimensions as `x`, then we want to iterate
# jointly on `x` and `y`. Otherwise, it means `y` should be
# broadcasted to fill all relevant rows of `x`.
assert y.ndim <= x.ndim # Should be guaranteed by `make_node`
if y.ndim == x.ndim:
assert len(y) == len(idx)
for (j, i) in enumerate(idx):
x[i] += y[j]
else:
for i in idx:
x[i] += y
def infer_shape(self, node, ishapes):
x, y, ilist = ishapes
return [x]
def R_op(self, inputs, eval_points):
if None in eval_points[:2]:
return [None]
return self.make_node(eval_points[0], eval_points[1],
*inputs[2:]).outputs
def connection_pattern(self, node):
rval = [[True], [True], [False]]
return rval
def grad(self, inputs, grads):
g_output, = grads
x, y = inputs[:2]
idx_list = inputs[2:]
gx = g_output
gy = advanced_subtensor1(g_output, *idx_list)
return [gx, gy] + [DisconnectedType()()] * len(idx_list)
advanced_inc_subtensor1 = AdvancedIncSubtensor1()
def as_index_variable(idx):
if idx is None:
return NoneConst.clone()
if isinstance(idx, slice):
return make_slice(idx)
idx = theano.tensor.as_tensor_variable(idx)
if idx.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('index must be integers')
return idx
def adv_index_broadcastable_pattern(a, idx):
"""
This function is only used to determine the broadcast pattern for
AdvancedSubtensor output variable.
For this, we make a fake ndarray and a fake idx and call use ask numpy
the output. From this, we find the output broadcast pattern.
"""
def replace_slice(v):
if isinstance(v, gof.Apply):
if len(v.outputs) != 1:
raise ValueError(
"It is ambiguous which output of a multi-output Op has"
" to be fetched.", v)
else:
v = v.outputs[0]
if NoneConst.equals(v):
return None
if isinstance(v.type, SliceType):
return slice(None, None)
return numpy.zeros((2,) * v.ndim, int)
newidx = tuple(map(replace_slice, idx))
#2 - True = 1; 2 - False = 2
fakeshape = [2 - bc for bc in a.broadcastable]
retshape = numpy.empty(fakeshape)[newidx].shape
return tuple([dim == 1 for dim in retshape])
class AdvancedSubtensor(Op):
"""Return a subtensor copy, using advanced indexing.
"""
# Should be used by __getitem__ and __getslice__, as follow:
# AdvancedSubtensor()(self, *args),
# if args contains and advanced indexing pattern
def __eq__(self, other):
return self.__class__ == other.__class__
def __hash__(self):
return hash(self.__class__)
def __str__(self):
return self.__class__.__name__
def make_node(self, x, *index):
x = theano.tensor.as_tensor_variable(x)
index = tuple(map(as_index_variable, index))
bcast = adv_index_broadcastable_pattern(x, index)
return gof.Apply(self,
(x,) + index,
[theano.tensor.tensor(dtype=x.type.dtype,
broadcastable=bcast)])
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self.make_node(eval_points[0], *inputs[1:]).outputs
def infer_shape(self, node, ishapes):
# Really special case
if len(ishapes) == 3:
xshp, ind1shp, ind2shp = ishapes
if len(xshp) == 2 and len(ind1shp) == 1 and len(ind2shp) == 1:
# if the graph is correct, we can assume ind1shp[0] and
# ind2shp[0] will have the same value.
# Try to return the one closest to the graph input.
if node.inputs[2].owner is None:
return [ind2shp]
else:
return [ind1shp]
# Default case, we don't know
return node.fgraph.shape_feature.default_infer_shape(node, ishapes)
def perform(self, node, inputs, out_):
out, = out_
# TODO: in general, we need to re-pack the inputs into a valid
# index, just like subtensor
out[0] = inputs[0].__getitem__(inputs[1:])
if (numpy.__version__ <= '1.6.1' and
out[0].size != numpy.uint32(out[0].size)):
warnings.warn(
'Numpy versions 1.6.1 and below have a bug preventing '
'advanced indexing from correctly filling arrays that '
'are too big (>= 2^32 elements). It is possible that '
'out[0] (%s), with shape %s, is not correctly filled.'
% (out[0], out[0].shape))
# return
#raise NotImplementedError()
def connection_pattern(self, node):
rval = [[True]]
for ipt in node.inputs[1:]:
rval.append([False])
return rval
def grad(self, inputs, grads):
gz, = grads
x = inputs[0]
rest = inputs[1:]
return [advanced_inc_subtensor(theano.tensor.zeros_like(x), gz,
*rest)] + \
[DisconnectedType()()] * len(rest)
advanced_subtensor = AdvancedSubtensor()
class AdvancedIncSubtensor(Op):
"""Increments a subtensor using advanced indexing.
:note: We need the numpy.inplace_increment() function currently
numpy's PR 326 to be able to make an inplace version of this
op.
"""
def __init__(self, inplace=False, set_instead_of_inc=False):
self.inplace = inplace
self.set_instead_of_inc = set_instead_of_inc
# The assert is needed as in the pass the first argument was
# something else that was not used.
assert isinstance(inplace, bool)
if self.inplace:
raise NotImplementedError('In place computation is not'
' implemented')
self.allow_legacy_perform = False
def __hash__(self):
return hash((type(self), self.inplace, self.set_instead_of_inc))
def __eq__(self, other):
return (type(self) == type(other)
and self.inplace == other.inplace
and self.set_instead_of_inc == other.set_instead_of_inc)
def __str__(self):
return "%s{%s, %s}" % (self.__class__.__name__,
"inplace=" + str(self.inplace),
" set_instead_of_inc=" + str(self. set_instead_of_inc))
def make_node(self, x, y, *inputs):
x = theano.tensor.as_tensor_variable(x)
y = theano.tensor.as_tensor_variable(y)
op = self
# If we are incrementing, but the increment compiled function is not
# available, we need to support legacy cases.
if not self.set_instead_of_inc and inplace_increment is None:
legacy_conditions = False
if x.ndim == 2 and y.ndim == 1 and len(inputs) == 2:
ind1 = theano.tensor.as_tensor_variable(inputs[0])
ind2 = theano.tensor.as_tensor_variable(inputs[1])
if ind1.ndim == 1 and ind2.ndim == 1:
if ind1.owner and isinstance(ind1.owner.op, ARange):
legacy_conditions = True
elif isinstance(ind1, Constant):
# Make sure no index is duplicated
val = ind1.value
if numpy.unique(val).size == val.size:
legacy_conditions = True
elif ind2.owner and isinstance(ind2.owner.op, ARange):
legacy_conditions = True
elif isinstance(ind2, Constant):
# Make sure no index is duplicated
val = ind2.value
if numpy.unique(val).size == val.size:
legacy_conditions = True
if legacy_conditions:
op = copy(self)
op.allow_legacy_perform = True
else:
raise NotImplementedError(
'Could not import inplace_increment, so some advanced '
'indexing features are disabled. They will be '
'available if you update NumPy to version 1.8 or '
'later, or to the latest development version. '
'You may need to clear the cache (theano-cache clear) '
'afterwards.')
return gof.Apply(op,
(x, y) + inputs,
[theano.tensor.tensor(dtype=x.type.dtype,
broadcastable=x.type.broadcastable)])
def perform(self, node, inputs, out_):
# TODO: 1. opt to make this in place 2. generalize as described in
# AdvancedSubtensor's perform TODO
out, = out_
if not self.inplace:
out[0] = inputs[0].copy()
else:
out[0] = inputs[0]
if self.set_instead_of_inc:
out[0][inputs[2:]] = inputs[1]
elif inplace_increment is not None:
inplace_increment(out[0], tuple(inputs[2:]), inputs[1])
elif self.allow_legacy_perform:
out[0][inputs[2:]] += inputs[1]
else:
raise NotImplementedError(
'Could not import inplace_increment, so some advanced '
'indexing features are disabled. They will be '
'available if you update NumPy to version 1.8 or '
'later, or to the latest development version. '
'You may need to clear the cache (theano-cache clear) '
'afterwards.')
if (numpy.__version__ <= '1.6.1' and
out[0].size != numpy.uint32(out[0].size)):
warnings.warn(
'Numpy versions 1.6.1 and below have a bug preventing '
'advanced indexing from correctly filling arrays that '
'are too big (>= 2^32 elements). It is possible that '
'out[0] (%s), with shape %s, is not correctly filled.'
% (out[0], out[0].shape))
def infer_shape(self, node, ishapes):
return [ishapes[0]]
def connection_pattern(self, node):
rval = [[True], [True]]
for ipt in node.inputs[2:]:
rval.append([False])
return rval
def grad(self, inpt, output_gradients):
x, y = inpt[:2]
idxs = inpt[2:]
outgrad, = output_gradients
d_x_wrt_C = outgrad
d_y_wrt_C = AdvancedSubtensor()(outgrad, *idxs)
return [d_x_wrt_C, d_y_wrt_C] + \
[DisconnectedType()() for _ in idxs]
def R_op(self, inputs, eval_points):
if None in eval_points[:2]:
return [None]
return self.make_node(eval_points[0], eval_points[1],
*inputs[2:]).outputs
advanced_inc_subtensor = AdvancedIncSubtensor()
def take(a, indices, axis=None, mode='raise'):
a = theano.tensor.as_tensor_variable(a)
indices = theano.tensor.as_tensor_variable(indices)
# Reuse advanced_subtensor1 if indices is a vector
if indices.ndim == 1:
if mode == 'clip':
indices = clip(indices, 0, a.shape[axis] - 1)
elif mode == 'wrap':
indices = indices % a.shape[axis]
if axis is None:
return advanced_subtensor1(a.flatten(), indices)
elif axis == 0:
return advanced_subtensor1(a, indices)
else:
if axis < 0:
axis += a.ndim
assert axis >= 0
shuffle = range(a.ndim)
shuffle[0] = axis
shuffle[axis] = 0
return advanced_subtensor1(
a.dimshuffle(shuffle), indices).dimshuffle(shuffle)
if axis is None:
shape = indices.shape
ndim = indices.ndim
else:
shape = theano.tensor.concatenate(
[a.shape[:axis], indices.shape, a.shape[axis + 1:]])
ndim = a.ndim + indices.ndim - 1
return take(a, indices.flatten(), axis, mode).reshape(shape, ndim)
| 36.384466
| 94
| 0.5305
|
79052816aec8b76957540d72b3609cbf4bb76b0c
| 1,528
|
py
|
Python
|
src/commercetools/platform/client/matching_cart/by_project_key_shipping_methods_matching_cart_request_builder.py
|
lime-green/commercetools-python-sdk
|
63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6
|
[
"MIT"
] | 1
|
2021-04-07T20:01:30.000Z
|
2021-04-07T20:01:30.000Z
|
src/commercetools/platform/client/matching_cart/by_project_key_shipping_methods_matching_cart_request_builder.py
|
lime-green/commercetools-python-sdk
|
63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6
|
[
"MIT"
] | null | null | null |
src/commercetools/platform/client/matching_cart/by_project_key_shipping_methods_matching_cart_request_builder.py
|
lime-green/commercetools-python-sdk
|
63b77f6e5abe43e2b3ebbf3cdbbe00c7cf80dca6
|
[
"MIT"
] | null | null | null |
# Generated file, please do not change!!!
import typing
from ...models.error import ErrorResponse
from ...models.shipping_method import ShippingMethodPagedQueryResponse
if typing.TYPE_CHECKING:
from ...base_client import BaseClient
class ByProjectKeyShippingMethodsMatchingCartRequestBuilder:
_client: "BaseClient"
_project_key: str
def __init__(
self,
project_key: str,
client: "BaseClient",
):
self._project_key = project_key
self._client = client
def get(
self,
*,
cart_id: str,
expand: typing.List["str"] = None,
headers: typing.Dict[str, str] = None,
options: typing.Dict[str, typing.Any] = None,
) -> typing.Optional["ShippingMethodPagedQueryResponse"]:
headers = {} if headers is None else headers
response = self._client._get(
endpoint=f"/{self._project_key}/shipping-methods/matching-cart",
params={"cartId": cart_id, "expand": expand},
headers=headers,
options=options,
)
if response.status_code == 200:
return ShippingMethodPagedQueryResponse.deserialize(response.json())
elif response.status_code in (400, 401, 403, 500, 503):
obj = ErrorResponse.deserialize(response.json())
raise self._client._create_exception(obj, response)
elif response.status_code == 404:
return None
raise ValueError("Unhandled status code %s", response.status_code)
| 32.510638
| 80
| 0.643979
|
f426afc0fcdbe383d46815ade7f64023f4dfe6ce
| 1,917
|
py
|
Python
|
test/test_get_transaction_details_by_transaction_id_from_callback_ribsl.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 5
|
2021-05-17T04:45:03.000Z
|
2022-03-23T12:51:46.000Z
|
test/test_get_transaction_details_by_transaction_id_from_callback_ribsl.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | null | null | null |
test/test_get_transaction_details_by_transaction_id_from_callback_ribsl.py
|
Crypto-APIs/Crypto_APIs_2.0_SDK_Python
|
c59ebd914850622b2c6500c4c30af31fb9cecf0e
|
[
"MIT"
] | 2
|
2021-06-02T07:32:26.000Z
|
2022-02-12T02:36:23.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.get_transaction_details_by_transaction_idribsl_vin import GetTransactionDetailsByTransactionIDRIBSLVin
from cryptoapis.model.get_transaction_details_by_transaction_idribsl_vout import GetTransactionDetailsByTransactionIDRIBSLVout
globals()['GetTransactionDetailsByTransactionIDRIBSLVin'] = GetTransactionDetailsByTransactionIDRIBSLVin
globals()['GetTransactionDetailsByTransactionIDRIBSLVout'] = GetTransactionDetailsByTransactionIDRIBSLVout
from cryptoapis.model.get_transaction_details_by_transaction_id_from_callback_ribsl import GetTransactionDetailsByTransactionIDFromCallbackRIBSL
class TestGetTransactionDetailsByTransactionIDFromCallbackRIBSL(unittest.TestCase):
"""GetTransactionDetailsByTransactionIDFromCallbackRIBSL unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetTransactionDetailsByTransactionIDFromCallbackRIBSL(self):
"""Test GetTransactionDetailsByTransactionIDFromCallbackRIBSL"""
# FIXME: construct object with mandatory attributes with example values
# model = GetTransactionDetailsByTransactionIDFromCallbackRIBSL() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 46.756098
| 484
| 0.82264
|
f7977280429846a6a1a24bb202ae2f2a6ce767dd
| 17,415
|
py
|
Python
|
dvc/repo/experiments/executor/base.py
|
Sayanta66/dvc
|
0d484d45f710b39c6baf541ac4d23aa983792aae
|
[
"Apache-2.0"
] | 1
|
2021-07-20T05:33:18.000Z
|
2021-07-20T05:33:18.000Z
|
dvc/repo/experiments/executor/base.py
|
Sayanta66/dvc
|
0d484d45f710b39c6baf541ac4d23aa983792aae
|
[
"Apache-2.0"
] | null | null | null |
dvc/repo/experiments/executor/base.py
|
Sayanta66/dvc
|
0d484d45f710b39c6baf541ac4d23aa983792aae
|
[
"Apache-2.0"
] | 1
|
2021-08-05T07:15:30.000Z
|
2021-08-05T07:15:30.000Z
|
import logging
import os
import pickle
from abc import ABC, abstractmethod
from contextlib import contextmanager
from dataclasses import dataclass
from functools import partial
from typing import (
TYPE_CHECKING,
Callable,
Iterable,
NamedTuple,
Optional,
Union,
)
from funcy import cached_property
from dvc.exceptions import DvcException
from dvc.path_info import PathInfo
from dvc.repo.experiments.base import (
EXEC_BASELINE,
EXEC_BRANCH,
EXEC_CHECKPOINT,
EXEC_HEAD,
EXEC_MERGE,
EXEC_NAMESPACE,
EXPS_NAMESPACE,
EXPS_STASH,
CheckpointExistsError,
ExperimentExistsError,
ExpRefInfo,
UnchangedExperimentError,
)
from dvc.scm import SCM
from dvc.stage import PipelineStage
from dvc.stage.monitor import CheckpointKilledError
from dvc.stage.serialize import to_lockfile
from dvc.utils import dict_sha256
from dvc.utils.fs import remove
if TYPE_CHECKING:
from multiprocessing import Queue
from dvc.scm.git import Git
logger = logging.getLogger(__name__)
class ExecutorResult(NamedTuple):
exp_hash: Optional[str]
ref_info: Optional["ExpRefInfo"]
force: bool
@dataclass
class ExecutorInfo:
PARAM_PID = "pid"
PARAM_GIT_URL = "git"
PARAM_BASELINE_REV = "baseline"
PARAM_LOCATION = "location"
pid: Optional[int]
git_url: Optional[str]
baseline_rev: Optional[str]
location: Optional[str]
@classmethod
def from_dict(cls, d):
return cls(
d.get(cls.PARAM_PID),
d.get(cls.PARAM_GIT_URL),
d.get(cls.PARAM_BASELINE_REV),
d.get(cls.PARAM_LOCATION),
)
def to_dict(self):
return {
self.PARAM_PID: self.pid,
self.PARAM_GIT_URL: self.git_url,
self.PARAM_BASELINE_REV: self.baseline_rev,
self.PARAM_LOCATION: self.location,
}
class BaseExecutor(ABC):
"""Base class for executing experiments in parallel.
Args:
src: source Git SCM instance.
dvc_dir: relpath to DVC root from SCM root.
Optional keyword args:
branch: Existing git branch for this experiment.
"""
PACKED_ARGS_FILE = "repro.dat"
WARN_UNTRACKED = False
QUIET = False
PIDFILE_EXT = ".run"
DEFAULT_LOCATION: Optional[str] = "workspace"
def __init__(
self,
src: "Git",
dvc_dir: str,
root_dir: Optional[Union[str, PathInfo]] = None,
branch: Optional[str] = None,
name: Optional[str] = None,
**kwargs,
):
assert root_dir is not None
self._dvc_dir = dvc_dir
self.root_dir = root_dir
self._init_git(src, branch)
self.name = name
def _init_git(self, scm: "Git", branch: Optional[str] = None):
"""Init git repo and collect executor refs from the specified SCM."""
from dulwich.repo import Repo as DulwichRepo
DulwichRepo.init(os.fspath(self.root_dir))
cwd = os.getcwd()
os.chdir(self.root_dir)
try:
refspec = f"{EXEC_NAMESPACE}/"
scm.push_refspec(self.git_url, refspec, refspec)
if branch:
scm.push_refspec(self.git_url, branch, branch)
self.scm.set_ref(EXEC_BRANCH, branch, symbolic=True)
elif self.scm.get_ref(EXEC_BRANCH):
self.scm.remove_ref(EXEC_BRANCH)
if self.scm.get_ref(EXEC_CHECKPOINT):
self.scm.remove_ref(EXEC_CHECKPOINT)
# checkout EXEC_HEAD and apply EXEC_MERGE on top of it without
# committing
head = EXEC_BRANCH if branch else EXEC_HEAD
self.scm.checkout(head, detach=True)
merge_rev = self.scm.get_ref(EXEC_MERGE)
self.scm.merge(merge_rev, squash=True, commit=False)
finally:
os.chdir(cwd)
@cached_property
def scm(self):
return SCM(self.root_dir)
@property
@abstractmethod
def git_url(self) -> str:
pass
@property
def dvc_dir(self) -> str:
return os.path.join(self.root_dir, self._dvc_dir)
@staticmethod
def hash_exp(stages: Iterable["PipelineStage"]) -> str:
exp_data = {}
for stage in stages:
if isinstance(stage, PipelineStage):
exp_data.update(to_lockfile(stage))
return dict_sha256(exp_data)
def cleanup(self):
self.scm.close()
del self.scm
# TODO: come up with better way to stash repro arguments
@staticmethod
def pack_repro_args(path, *args, fs=None, extra=None, **kwargs):
dpath = os.path.dirname(path)
if fs:
open_func = fs.open
fs.makedirs(dpath)
else:
from dvc.utils.fs import makedirs
open_func = open
makedirs(dpath, exist_ok=True)
data = {"args": args, "kwargs": kwargs}
if extra is not None:
data["extra"] = extra
with open_func(path, "wb") as fobj:
pickle.dump(data, fobj)
@staticmethod
def unpack_repro_args(path):
with open(path, "rb") as fobj:
data = pickle.load(fobj)
return data["args"], data["kwargs"]
@classmethod
def fetch_exps(
cls,
dest_scm: "Git",
url: str,
force: bool = False,
on_diverged: Callable[[str, bool], None] = None,
) -> Iterable[str]:
"""Fetch reproduced experiments into the specified SCM.
Args:
dest_scm: Destination Git instance.
url: Git remote URL to fetch from.
force: If True, diverged refs will be overwritten
on_diverged: Callback in the form on_diverged(ref, is_checkpoint)
to be called when an experiment ref has diverged.
"""
refs = []
has_checkpoint = False
for ref in dest_scm.iter_remote_refs(url, base=EXPS_NAMESPACE):
if ref == EXEC_CHECKPOINT:
has_checkpoint = True
elif not ref.startswith(EXEC_NAMESPACE) and ref != EXPS_STASH:
refs.append(ref)
def on_diverged_ref(orig_ref: str, new_rev: str):
if force:
logger.debug("Replacing existing experiment '%s'", orig_ref)
return True
cls._raise_ref_conflict(
dest_scm, orig_ref, new_rev, has_checkpoint
)
if on_diverged:
on_diverged(orig_ref, has_checkpoint)
logger.debug("Reproduced existing experiment '%s'", orig_ref)
return False
# fetch experiments
dest_scm.fetch_refspecs(
url,
[f"{ref}:{ref}" for ref in refs],
on_diverged=on_diverged_ref,
force=force,
)
# update last run checkpoint (if it exists)
if has_checkpoint:
dest_scm.fetch_refspecs(
url,
[f"{EXEC_CHECKPOINT}:{EXEC_CHECKPOINT}"],
force=True,
)
return refs
@classmethod
def reproduce(
cls,
dvc_dir: Optional[str],
rev: str,
queue: Optional["Queue"] = None,
rel_cwd: Optional[str] = None,
name: Optional[str] = None,
log_errors: bool = True,
log_level: Optional[int] = None,
**kwargs,
) -> "ExecutorResult":
"""Run dvc repro and return the result.
Returns tuple of (exp_hash, exp_ref, force) where exp_hash is the
experiment hash (or None on error), exp_ref is the experiment ref,
and force is a bool specifying whether or not this experiment
should force overwrite any existing duplicates.
"""
from dvc.repo.checkout import checkout as dvc_checkout
from dvc.repo.reproduce import reproduce as dvc_reproduce
unchanged = []
if queue is not None:
queue.put((rev, os.getpid()))
if log_errors and log_level is not None:
cls._set_log_level(log_level)
def filter_pipeline(stages):
unchanged.extend(
[stage for stage in stages if isinstance(stage, PipelineStage)]
)
exp_hash: Optional[str] = None
exp_ref: Optional["ExpRefInfo"] = None
repro_force: bool = False
with cls._repro_dvc(
dvc_dir,
rel_cwd,
log_errors,
**kwargs,
) as dvc:
args, kwargs = cls._repro_args(dvc)
if args:
targets: Optional[Union[list, str]] = args[0]
else:
targets = kwargs.get("targets")
repro_force = kwargs.get("force", False)
logger.trace( # type: ignore[attr-defined]
"Executor repro with force = '%s'", str(repro_force)
)
repro_dry = kwargs.get("dry")
# NOTE: checkpoint outs are handled as a special type of persist
# out:
#
# - checkpoint out may not yet exist if this is the first time this
# experiment has been run, this is not an error condition for
# experiments
# - if experiment was run with --reset, the checkpoint out will be
# removed at the start of the experiment (regardless of any
# dvc.lock entry for the checkpoint out)
# - if run without --reset, the checkpoint out will be checked out
# using any hash present in dvc.lock (or removed if no entry
# exists in dvc.lock)
checkpoint_reset: bool = kwargs.pop("reset", False)
if not repro_dry:
dvc_checkout(
dvc,
targets=targets,
with_deps=targets is not None,
force=True,
quiet=True,
allow_missing=True,
checkpoint_reset=checkpoint_reset,
)
checkpoint_func = partial(
cls.checkpoint_callback,
dvc.scm,
name,
repro_force or checkpoint_reset,
)
stages = dvc_reproduce(
dvc,
*args,
on_unchanged=filter_pipeline,
checkpoint_func=checkpoint_func,
**kwargs,
)
exp_hash = cls.hash_exp(stages)
if not repro_dry:
try:
is_checkpoint = any(
stage.is_checkpoint for stage in stages
)
if is_checkpoint and checkpoint_reset:
# For reset checkpoint stages, we need to force
# overwriting existing checkpoint refs even though
# repro may not have actually been run with --force
repro_force = True
cls.commit(
dvc.scm,
exp_hash,
exp_name=name,
force=repro_force,
checkpoint=is_checkpoint,
)
except UnchangedExperimentError:
pass
ref = dvc.scm.get_ref(EXEC_BRANCH, follow=False)
if ref:
exp_ref = ExpRefInfo.from_ref(ref)
if cls.WARN_UNTRACKED:
untracked = dvc.scm.untracked_files()
if untracked:
logger.warning(
"The following untracked files were present in "
"the experiment directory after reproduction but "
"will not be included in experiment commits:\n"
"\t%s",
", ".join(untracked),
)
# ideally we would return stages here like a normal repro() call, but
# stages is not currently picklable and cannot be returned across
# multiprocessing calls
return ExecutorResult(exp_hash, exp_ref, repro_force)
@classmethod
@contextmanager
def _repro_dvc(
cls,
dvc_dir: Optional[str],
rel_cwd: Optional[str],
log_errors: bool,
pidfile: Optional[str] = None,
git_url: Optional[str] = None,
**kwargs,
):
from dvc.repo import Repo
from dvc.utils.serialize import modify_yaml
dvc = Repo(dvc_dir)
if cls.QUIET:
dvc.scm.quiet = cls.QUIET
if dvc_dir is not None:
old_cwd: Optional[str] = os.getcwd()
if rel_cwd:
os.chdir(os.path.join(dvc.root_dir, rel_cwd))
else:
os.chdir(dvc.root_dir)
else:
old_cwd = None
if pidfile is not None:
info = ExecutorInfo(
os.getpid(),
git_url,
dvc.scm.get_rev(),
cls.DEFAULT_LOCATION,
)
with modify_yaml(pidfile) as d:
d.update(info.to_dict())
logger.debug("Running repro in '%s'", os.getcwd())
try:
yield dvc
except CheckpointKilledError:
raise
except DvcException:
if log_errors:
logger.exception("")
raise
except Exception:
if log_errors:
logger.exception("unexpected error")
raise
finally:
if pidfile is not None:
remove(pidfile)
dvc.close()
if old_cwd:
os.chdir(old_cwd)
@classmethod
def _repro_args(cls, dvc):
args_path = os.path.join(dvc.tmp_dir, cls.PACKED_ARGS_FILE)
if os.path.exists(args_path):
args, kwargs = cls.unpack_repro_args(args_path)
remove(args_path)
# explicitly git rm/unstage the args file
dvc.scm.add([args_path])
else:
args = []
kwargs = {}
return args, kwargs
@classmethod
def checkpoint_callback(
cls,
scm: "Git",
name: Optional[str],
force: bool,
unchanged: Iterable["PipelineStage"],
stages: Iterable["PipelineStage"],
):
try:
exp_hash = cls.hash_exp(list(stages) + list(unchanged))
exp_rev = cls.commit(
scm, exp_hash, exp_name=name, force=force, checkpoint=True
)
logger.info("Checkpoint experiment iteration '%s'.", exp_rev[:7])
except UnchangedExperimentError:
pass
@classmethod
def commit(
cls,
scm: "Git",
exp_hash: str,
exp_name: Optional[str] = None,
force: bool = False,
checkpoint: bool = False,
):
"""Commit stages as an experiment and return the commit SHA."""
rev = scm.get_rev()
if not scm.is_dirty():
logger.debug("No changes to commit")
raise UnchangedExperimentError(rev)
check_conflict = False
branch = scm.get_ref(EXEC_BRANCH, follow=False)
if branch:
old_ref = rev
logger.debug("Commit to current experiment branch '%s'", branch)
else:
baseline_rev = scm.get_ref(EXEC_BASELINE)
name = exp_name if exp_name else f"exp-{exp_hash[:5]}"
ref_info = ExpRefInfo(baseline_rev, name)
branch = str(ref_info)
old_ref = None
if scm.get_ref(branch):
if not force:
check_conflict = True
logger.debug(
"%s existing experiment branch '%s'",
"Replace" if force else "Reuse",
branch,
)
else:
logger.debug("Commit to new experiment branch '%s'", branch)
scm.add([], update=True)
scm.commit(f"dvc: commit experiment {exp_hash}", no_verify=True)
new_rev = scm.get_rev()
if check_conflict:
new_rev = cls._raise_ref_conflict(scm, branch, new_rev, checkpoint)
else:
scm.set_ref(branch, new_rev, old_ref=old_ref)
scm.set_ref(EXEC_BRANCH, branch, symbolic=True)
if checkpoint:
scm.set_ref(EXEC_CHECKPOINT, new_rev)
return new_rev
@staticmethod
def _raise_ref_conflict(scm, ref, new_rev, checkpoint):
# If this commit is a duplicate of the existing commit at 'ref', return
# the existing commit. Otherwise, error out and require user to re-run
# with --force as needed
orig_rev = scm.get_ref(ref)
if scm.diff(orig_rev, new_rev):
if checkpoint:
raise CheckpointExistsError(ref)
raise ExperimentExistsError(ref)
return orig_rev
@staticmethod
def _set_log_level(level):
from dvc.logger import disable_other_loggers
# When executor.reproduce is run in a multiprocessing child process,
# dvc.main will not be called for that child process so we need to
# setup logging ourselves
dvc_logger = logging.getLogger("dvc")
disable_other_loggers()
if level is not None:
dvc_logger.setLevel(level)
| 32.071823
| 79
| 0.558599
|
75b99fb192196d9b6bf4f8907baf56184748d30b
| 6,334
|
py
|
Python
|
gooey/tests/test_integration.py
|
jwaschkau/Gooey
|
57b4f2e79b9d1e72325ee02179ead4a46b1d97a6
|
[
"MIT"
] | null | null | null |
gooey/tests/test_integration.py
|
jwaschkau/Gooey
|
57b4f2e79b9d1e72325ee02179ead4a46b1d97a6
|
[
"MIT"
] | null | null | null |
gooey/tests/test_integration.py
|
jwaschkau/Gooey
|
57b4f2e79b9d1e72325ee02179ead4a46b1d97a6
|
[
"MIT"
] | null | null | null |
import json
import time
import unittest
from concurrent import futures
from gooey.gui import application
from gooey.gui.lang.i18n import _
class TestGooeyIntegration(unittest.TestCase):
"""
A few quick integration tests that exercise Gooey's various run modes
WX Python needs to control the main thread. So, in order to simulate a user
running through the system, we have to execute the actual assertions in a
different thread
"""
def performTest(self, configPath, assertionFunction):
"""
Primary test harness.
Instantiates the WX App, and spawns the threads
required to make assertions against it
"""
with open(configPath, 'r') as f:
build_spec = json.loads(f.read())
try:
assert 0
except AssertionError as e:
print('wtf')
print(e)
app = application.build_app(build_spec=build_spec)
executor = futures.ThreadPoolExecutor(max_workers=1)
testResult = executor.submit(assertionFunction, app, build_spec)
app.MainLoop()
testResult.result()
# some extra padding time between starting/stopping the wx App
app.Destroy()
time.sleep(1)
def test_gooeyNormalRun(self):
""" Tests the happy path through the default run mode of Gooey """
self.performTest('./gooey/tests/gooey_config__normal.json', self.gooeySanityTest)
def test_gooeySubparserMode(self):
""" Tests the happy path through the subparser run mode of Gooey """
self.performTest('./gooey/tests/gooey_config__subparser.json', self.gooeySanityTest)
def test__gooeyAutoStart(self):
"""Verifies that issue #201 doesn't regress and auto_start skips the config
screen and hops right into the client's program"""
self.performTest('./gooey/tests/gooey_config__autostart.json', self.verifyAutoStart)
def test__gooeyValidation(self):
"""Verifies that custom validation routines supplied via gooey_options prevents
the user from advancing past the configuration page when they fail"""
self.performTest('./gooey/tests/gooey_config__autostart.json', self.verifyValidators)
def verifyValidators(self, app, buildSpec):
time.sleep(1)
try:
app.TopWindow.onStart()
# we should still be on the configuration page due to a validation fail
title = app.TopWindow.header._header.GetLabel()
subtitle = app.TopWindow.header._subheader.GetLabel()
self.assertNotEqual(title, buildSpec['program_name'])
self.assertNotEqual(subtitle, buildSpec['program_description'])
except:
app.TopWindow.Destroy()
raise
else:
app.TopWindow.Destroy()
return None
def verifyAutoStart(self, app, buildSpec):
"""
When the auto_start flag == True Gooey should skip the
configuration screen
"""
time.sleep(1)
try:
# Gooey should NOT be showing the name/description headers
# present on the config page
title = app.TopWindow.header._header.GetLabel()
subtitle = app.TopWindow.header._subheader.GetLabel()
self.assertNotEqual(title, buildSpec['program_name'])
self.assertNotEqual(subtitle, buildSpec['program_description'])
# Gooey should be showing the console messages straight away
# without manually starting the program
title = app.TopWindow.header._header.GetLabel()
subtitle = app.TopWindow.header._subheader.GetLabel()
self.assertEqual(title,_("running_title"))
self.assertEqual(subtitle, _('running_msg'))
# Wait for Gooey to swap the header to the final screen
while app.TopWindow.header._header.GetLabel() == _("running_title"):
time.sleep(.1)
# verify that we've landed on the success screen
title = app.TopWindow.header._header.GetLabel()
subtitle = app.TopWindow.header._subheader.GetLabel()
self.assertEqual(title, _("finished_title"))
self.assertEqual(subtitle, _('finished_msg'))
# and that output was actually written to the console
self.assertIn("Success", app.TopWindow.console.textbox.GetValue())
except:
app.TopWindow.Destroy()
raise
else:
app.TopWindow.Destroy()
return None
def gooeySanityTest(self, app, buildSpec):
time.sleep(1)
try:
# Check out header is present and showing data
title = app.TopWindow.header._header.GetLabel()
subtitle = app.TopWindow.header._subheader.GetLabel()
self.assertEqual(title, buildSpec['program_name'])
self.assertEqual(subtitle, buildSpec['program_description'])
# switch to the run screen
app.TopWindow.onStart()
# Should find the expected test in the header
title = app.TopWindow.header._header.GetLabel()
subtitle = app.TopWindow.header._subheader.GetLabel()
self.assertEqual(title,_("running_title"))
self.assertEqual(subtitle, _('running_msg'))
# Wait for Gooey to swap the header to the final screen
while app.TopWindow.header._header.GetLabel() == _("running_title"):
time.sleep(.1)
# verify that we've landed on the success screen
title = app.TopWindow.header._header.GetLabel()
subtitle = app.TopWindow.header._subheader.GetLabel()
self.assertEqual(title, _("finished_title"))
self.assertEqual(subtitle, _('finished_msg'))
# and that output was actually written to the console
self.assertIn("Success", app.TopWindow.console.textbox.GetValue())
except:
app.TopWindow.Destroy()
raise
else:
app.TopWindow.Destroy()
return None
if __name__ == '__main__':
unittest.main()
| 37.47929
| 94
| 0.618251
|
800199a38d8cf1c5886f8347e4eb868d640f1b93
| 2,553
|
py
|
Python
|
tests/test_indicator.py
|
alexcwyu/python-trading
|
a494f602411a3ebfdecae002a16a5ea93fc7a046
|
[
"Apache-2.0"
] | 17
|
2016-03-30T21:52:30.000Z
|
2021-05-01T18:21:48.000Z
|
tests/test_indicator.py
|
ajmal017/python-trading
|
a494f602411a3ebfdecae002a16a5ea93fc7a046
|
[
"Apache-2.0"
] | 2
|
2016-10-04T19:29:05.000Z
|
2017-02-01T19:24:39.000Z
|
tests/test_indicator.py
|
ajmal017/python-trading
|
a494f602411a3ebfdecae002a16a5ea93fc7a046
|
[
"Apache-2.0"
] | 9
|
2016-04-24T05:05:26.000Z
|
2020-05-03T13:01:34.000Z
|
from unittest import TestCase
from algotrader.trading.context import ApplicationContext
from algotrader.utils.indicator import parse_series, get_or_create_indicator
from algotrader.technical.ma import SMA
class IndicatorTest(TestCase):
def setUp(self):
self.app_context = ApplicationContext()
def test_reuse(self):
close = self.app_context.inst_data_mgr.get_series("bar")
close.start(self.app_context)
sma1 = get_or_create_indicator(self.app_context.inst_data_mgr, cls=SMA, inputs='bar', input_keys='close',
length=3)
sma1.start(self.app_context)
sma2 = get_or_create_indicator(self.app_context.inst_data_mgr, cls=SMA, inputs='bar', input_keys='close',
length=3)
sma2.start(self.app_context)
sma3 = get_or_create_indicator(self.app_context.inst_data_mgr, cls=SMA, inputs='bar', input_keys='close',
length=10)
sma3.start(self.app_context)
self.assertEquals(sma1, sma2)
self.assertNotEquals(sma2, sma3)
self.assertNotEquals(sma1, sma3)
sma4 = get_or_create_indicator(self.app_context.inst_data_mgr, cls=SMA, inputs=sma3, length=10)
sma4.start(self.app_context)
self.assertEquals(sma4.input_series[0], sma3)
# def test_parse(self):
# bar = parse_series(self.app_context.inst_data_mgr, "bar")
# bar.start(self.app_context)
#
# sma1 = parse_series(self.app_context.inst_data_mgr, "SMA(bar[close],length=3)")
# sma1.start(self.app_context)
#
# sma2 = parse_series(self.app_context.inst_data_mgr, "SMA(SMA(bar[close],length=3)[value],length=10)")
# sma2.start(self.app_context)
#
# rsi = parse_series(self.app_context.inst_data_mgr, "RSI(SMA(SMA('bar',close,3),value,10),value,14, 9)")
# rsi.start(self.app_context)
#
# self.assertEquals(sma1.input, bar)
# self.assertEquals(3, sma1.length)
#
# self.assertEquals(sma2.input, sma1)
# self.assertEquals(10, sma2.length)
#
# self.assertEquals(rsi.input, sma2)
# self.assertEquals(14, rsi.length)
#
# def test_fail_parse(self):
# with self.assertRaises(AssertionError):
# parse_series(self.app_context.inst_data_mgr, "SMA('Bar.Close',3")
#
# with self.assertRaises(AssertionError):
# parse_series(self.app_context.inst_data_mgr, "RSI(SMA(SMA('Bar.Close',3,10),14)")
| 39.276923
| 113
| 0.64669
|
232db6b8d3e673ec4e8d04319bbcaec1fad1c5d5
| 8,918
|
py
|
Python
|
cryptoapis/model/list_transactions_by_block_height_response_item_blockchain_specific_dash_vin.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
cryptoapis/model/list_transactions_by_block_height_response_item_blockchain_specific_dash_vin.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
cryptoapis/model/list_transactions_by_block_height_response_item_blockchain_specific_dash_vin.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | 1
|
2021-07-21T03:35:18.000Z
|
2021-07-21T03:35:18.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from cryptoapis.model.list_transactions_by_block_height_response_item_blockchain_specific_dash_script_sig import ListTransactionsByBlockHeightResponseItemBlockchainSpecificDashScriptSig
globals()['ListTransactionsByBlockHeightResponseItemBlockchainSpecificDashScriptSig'] = ListTransactionsByBlockHeightResponseItemBlockchainSpecificDashScriptSig
class ListTransactionsByBlockHeightResponseItemBlockchainSpecificDashVin(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'addresses': ([str],), # noqa: E501
'coinbase': (str,), # noqa: E501
'script_sig': (ListTransactionsByBlockHeightResponseItemBlockchainSpecificDashScriptSig,), # noqa: E501
'sequence': (str,), # noqa: E501
'txinwitness': ([str],), # noqa: E501
'vout': (int,), # noqa: E501
'txid': (str,), # noqa: E501
'value': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'addresses': 'addresses', # noqa: E501
'coinbase': 'coinbase', # noqa: E501
'script_sig': 'scriptSig', # noqa: E501
'sequence': 'sequence', # noqa: E501
'txinwitness': 'txinwitness', # noqa: E501
'vout': 'vout', # noqa: E501
'txid': 'txid', # noqa: E501
'value': 'value', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, addresses, coinbase, script_sig, sequence, txinwitness, vout, *args, **kwargs): # noqa: E501
"""ListTransactionsByBlockHeightResponseItemBlockchainSpecificDashVin - a model defined in OpenAPI
Args:
addresses ([str]):
coinbase (str): Represents the coinbase hex.
script_sig (ListTransactionsByBlockHeightResponseItemBlockchainSpecificDashScriptSig):
sequence (str): Represents the script sequence number.
txinwitness ([str]):
vout (int): It refers to the index of the output address of this transaction. The index starts from 0.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
txid (str): Represents the reference transaction identifier.. [optional] # noqa: E501
value (str): Represents the sent/received amount.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.addresses = addresses
self.coinbase = coinbase
self.script_sig = script_sig
self.sequence = sequence
self.txinwitness = txinwitness
self.vout = vout
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| 44.148515
| 484
| 0.6146
|
7ea4878b4ff0813016985329087ff75c6805e2cc
| 2,371
|
py
|
Python
|
Solutions/VMX2-VoicemailExpress/Code/sub_other.py
|
cbgandhi-code/amazon-connect-salesforce-scv
|
fc5da5445b01295e530b50aa774598e91087c57a
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
Solutions/VMX2-VoicemailExpress/Code/sub_other.py
|
cbgandhi-code/amazon-connect-salesforce-scv
|
fc5da5445b01295e530b50aa774598e91087c57a
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
Solutions/VMX2-VoicemailExpress/Code/sub_other.py
|
cbgandhi-code/amazon-connect-salesforce-scv
|
fc5da5445b01295e530b50aa774598e91087c57a
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
# Version: 2022.03.23
"""
**********************************************************************************************************************
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated *
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation *
* the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and *
* to permit persons to whom the Software is furnished to do so. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO *
* THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF *
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS *
* IN THE SOFTWARE. *
**********************************************************************************************************************
"""
# Import the necessary modules for this flow to work
import json
import os
import logging
import boto3
logger = logging.getLogger()
logger.setLevel(logging.getLevelName(os.getenv('lambda_logging_level', 'DEBUG')))
def vm_to_other(writer_payload):
logger.info('Beginning Voicemail to other')
logger.debug(writer_payload)
try:
# TODO Implement
logger.error('Record {0} Successfully executed.'.format(writer_payload['loop_counter']))
return 'success'
except Exception as e:
logger.error(e)
logger.error('Record {0} Failed to execute other.'.format(writer_payload['loop_counter']))
return 'fail'
| 56.452381
| 119
| 0.518768
|
bc9d84411558123a05436a09a0577c76e61e4e5b
| 140
|
py
|
Python
|
scorecard/templatetags/jsonify.py
|
desafinadude/municipal-data
|
1c86c55bbb59f9c8087f6920fae3585dd90d5d43
|
[
"MIT"
] | 19
|
2018-01-09T10:54:15.000Z
|
2022-01-25T13:10:55.000Z
|
wazimap/templatetags/jsonify.py
|
ChrisAchinga/wazimap
|
a66a1524030a8b98e7ea0dfb270d1946ca75b3b2
|
[
"MIT"
] | 66
|
2016-02-15T08:59:29.000Z
|
2017-09-21T14:00:43.000Z
|
wazimap/templatetags/jsonify.py
|
ChrisAchinga/wazimap
|
a66a1524030a8b98e7ea0dfb270d1946ca75b3b2
|
[
"MIT"
] | 18
|
2017-10-06T12:26:37.000Z
|
2021-08-30T01:38:37.000Z
|
import json
from django import template
register = template.Library()
@register.filter
def jsonify(value):
return json.dumps(value)
| 12.727273
| 29
| 0.757143
|
7fb03a28b0bb7be23f9ae112637c66d6ec603515
| 2,539
|
py
|
Python
|
src/tox/session/commands/show_config.py
|
snsnlou/tox
|
036dfaca03a8202be77ccc3ce70e1f1f17ece57c
|
[
"MIT"
] | 1
|
2021-07-07T01:46:02.000Z
|
2021-07-07T01:46:02.000Z
|
src/tox/session/commands/show_config.py
|
snsnlou/tox
|
036dfaca03a8202be77ccc3ce70e1f1f17ece57c
|
[
"MIT"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
src/tox/session/commands/show_config.py
|
snsnlou/tox
|
036dfaca03a8202be77ccc3ce70e1f1f17ece57c
|
[
"MIT"
] | 3
|
2021-04-15T06:17:40.000Z
|
2021-09-14T04:17:59.000Z
|
import sys
from collections import OrderedDict
from packaging.requirements import Requirement
from packaging.utils import canonicalize_name
from six import StringIO
from six.moves import configparser
from tox import reporter
from tox.util.stdlib import importlib_metadata
DO_NOT_SHOW_CONFIG_ATTRIBUTES = (
"interpreters",
"envconfigs",
"envlist",
"pluginmanager",
"envlist_explicit",
)
def show_config(config):
parser = configparser.RawConfigParser()
if not config.envlist_explicit or reporter.verbosity() >= reporter.Verbosity.INFO:
tox_info(config, parser)
version_info(parser)
tox_envs_info(config, parser)
content = StringIO()
parser.write(content)
value = content.getvalue().rstrip()
reporter.verbosity0(value)
def tox_envs_info(config, parser):
if config.envlist_explicit:
env_list = config.envlist
elif config.option.listenvs:
env_list = config.envlist_default
else:
env_list = list(config.envconfigs.keys())
for name in env_list:
env_config = config.envconfigs[name]
values = OrderedDict(
(attr.name, str(getattr(env_config, attr.name)))
for attr in config._parser._testenv_attr
)
section = "testenv:{}".format(name)
set_section(parser, section, values)
def tox_info(config, parser):
info = OrderedDict(
(i, str(getattr(config, i)))
for i in sorted(dir(config))
if not i.startswith("_") and i not in DO_NOT_SHOW_CONFIG_ATTRIBUTES
)
info["host_python"] = sys.executable
set_section(parser, "tox", info)
def version_info(parser):
versions = OrderedDict()
to_visit = {"tox"}
while to_visit:
current = to_visit.pop()
current_dist = importlib_metadata.distribution(current)
current_name = canonicalize_name(current_dist.metadata["name"])
versions[current_name] = current_dist.version
if current_dist.requires is not None:
for require in current_dist.requires:
pkg = Requirement(require)
pkg_name = canonicalize_name(pkg.name)
if (
pkg.marker is None or pkg.marker.evaluate({"extra": ""})
) and pkg_name not in versions:
to_visit.add(pkg_name)
set_section(parser, "tox:versions", versions)
def set_section(parser, section, values):
parser.add_section(section)
for key, value in values.items():
parser.set(section, key, value)
| 29.870588
| 86
| 0.664829
|
01fb516c83f0e4393acb7127cb904fddb3bd860a
| 770
|
py
|
Python
|
Raspberry_Pi_Pico/button_test.py
|
jckantor/cbe61622
|
bdc08e6c4f0674c5e991617945cafd1b121d6b4b
|
[
"MIT"
] | 2
|
2021-11-22T20:36:35.000Z
|
2021-12-07T07:52:10.000Z
|
Raspberry_Pi_Pico/button_test.py
|
jckantor/cbe-virtual-laboratory
|
bdc08e6c4f0674c5e991617945cafd1b121d6b4b
|
[
"MIT"
] | null | null | null |
Raspberry_Pi_Pico/button_test.py
|
jckantor/cbe-virtual-laboratory
|
bdc08e6c4f0674c5e991617945cafd1b121d6b4b
|
[
"MIT"
] | 1
|
2021-12-11T20:39:32.000Z
|
2021-12-11T20:39:32.000Z
|
from machine import Pin
from rp2 import PIO, StateMachine, asm_pio
from time import sleep
import sys
@asm_pio(set_init=(PIO.OUT_LOW,) * 4)
def prog():
wrap_target()
set(pins, 8) [31] # 8
nop() [31]
nop() [31]
nop() [31]
nop() [31]
nop() [31]
nop() [31]
set(pins, 4) [31] # 4
nop() [31]
nop() [31]
nop() [31]
nop() [31]
nop() [31]
nop() [31]
set(pins, 2) [31] # 2
nop() [31]
nop() [31]
nop() [31]
nop() [31]
nop() [31]
nop() [31]
set(pins, 1) [31] # 1
nop() [31]
nop() [31]
nop() [31]
nop() [31]
nop() [31]
nop() [31]
wrap()
sm = StateMachine(0, prog, freq=100000, set_base=Pin(2))
sm.active(1)
sleep(10)
sm.active(0)
sm.exec("set(pins,0)")
| 16.041667
| 56
| 0.487013
|
8b2d760e2abd047fba953c34620f1f66ae437ef9
| 263
|
py
|
Python
|
tests/artificial/transf_Fisher/trend_MovingMedian/cycle_7/ar_/test_artificial_128_Fisher_MovingMedian_7__0.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/artificial/transf_Fisher/trend_MovingMedian/cycle_7/ar_/test_artificial_128_Fisher_MovingMedian_7__0.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/artificial/transf_Fisher/trend_MovingMedian/cycle_7/ar_/test_artificial_128_Fisher_MovingMedian_7__0.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 7, transform = "Fisher", sigma = 0.0, exog_count = 0, ar_order = 0);
| 37.571429
| 163
| 0.730038
|
34d7fa6a9721c48429b22da3bbfef67aa499bb5a
| 9,282
|
py
|
Python
|
pims/formats/utils/abstract.py
|
Cytomine-ULiege/pims
|
3c13f054be3ce9b6755428ccd9c5e0c1a8fb02d4
|
[
"Apache-2.0"
] | 2
|
2022-01-19T08:58:12.000Z
|
2022-01-28T14:40:41.000Z
|
pims/formats/utils/abstract.py
|
Cytomine-ULiege/pims
|
3c13f054be3ce9b6755428ccd9c5e0c1a8fb02d4
|
[
"Apache-2.0"
] | 18
|
2021-09-20T08:47:11.000Z
|
2022-03-14T15:51:37.000Z
|
pims/formats/utils/abstract.py
|
Cytomine-ULiege/pims
|
3c13f054be3ce9b6755428ccd9c5e0c1a8fb02d4
|
[
"Apache-2.0"
] | null | null | null |
# * Copyright (c) 2020-2021. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from __future__ import annotations
import logging
import re
from abc import ABC
from functools import cached_property
from typing import Any, Dict, List, Optional, TYPE_CHECKING, Type
from pims.cache import SimpleDataCache
from pims.formats.utils.checker import AbstractChecker
from pims.formats.utils.convertor import AbstractConvertor
from pims.formats.utils.histogram import AbstractHistogramReader
from pims.formats.utils.parser import AbstractParser
from pims.formats.utils.reader import AbstractReader
from pims.formats.utils.structures.annotations import ParsedMetadataAnnotation
from pims.formats.utils.structures.metadata import ImageMetadata, MetadataStore
from pims.formats.utils.structures.planes import PlanesInfo
from pims.formats.utils.structures.pyramid import Pyramid
if TYPE_CHECKING:
from pims.files.file import Path
log = logging.getLogger("pims.formats")
_CAMEL_TO_SPACE_PATTERN = re.compile(r'((?<=[a-z])[A-Z]|(?<!\A)[A-Z](?=[a-z]))')
class CachedDataPath(SimpleDataCache):
"""
A cache associated to a path.
Technical details: It would be more meaningful to have `CachedDataPath` inheriting
from `SimpleDataCache` and `Path` as Python allows multiple inheritance. Other
meaningful implementation could be to have `CachedDataPath` that extends `Path` and
have an attribute `cache`. However, both solutions are impossible because they
cause circular imports.
"""
def __init__(self, path: Path):
super().__init__()
self.path = path
class AbstractFormat(ABC, SimpleDataCache):
"""
Base format. All image formats must extend this class.
"""
checker_class: Type[AbstractChecker] = None
parser_class: Type[AbstractParser] = None
reader_class: Type[AbstractReader] = None
convertor_class: Type[AbstractConvertor] = None
histogram_reader_class: Type[AbstractHistogramReader] = None
def __init__(self, path: Path, existing_cache: Dict[str, Any] = None):
"""
Initialize an image in this format. It does nothing until some
parsing or reading methods are called.
Parameters
----------
path
The image path
existing_cache
A cache of data related to the image that have been previously
computed and that could be used again in the future.
In practice, it is used to collect data computed during matching
(format identification) that can be used again in parser or reader.
"""
self._path = path
super(AbstractFormat, self).__init__(existing_cache)
self._enabled = False
self.parser = self.parser_class(self)
self.reader = self.reader_class(self)
self.convertor = self.convertor_class(self) if self.convertor_class else None
self.histogram_reader = self.histogram_reader_class(self)
@classmethod
def init(cls):
"""
Initialize the format, such that all third-party libs are ready.
"""
pass
@classmethod
def _get_identifier(cls):
"""
Get the format identifier. It must be unique across all formats.
"""
return cls.__name__.replace('Format', '')
@classmethod
def get_identifier(cls, uppercase: bool = True) -> str:
"""
Get the format identifier. It must be unique across all formats.
Parameters
----------
uppercase: bool
If the format must be returned in uppercase.
In practice, comparisons are always done using the uppercase identifier
Returns
-------
identifier: str
The format identifier
"""
identifier = cls._get_identifier()
if uppercase:
return identifier.upper()
return identifier
@classmethod
def get_name(cls) -> str:
"""Get the format name in a human-readable way."""
return re.sub(_CAMEL_TO_SPACE_PATTERN, r' \1', cls.get_identifier(False))
@classmethod
def get_remarks(cls) -> str:
"""Get format remarks in a human-readable way."""
return str()
@classmethod
def get_plugin_name(cls) -> str:
"""Get PIMS format plugin name adding this format."""
return '.'.join(cls.__module__.split('.')[:-1])
@classmethod
def is_readable(cls) -> bool:
"""Whether PIMS can read images in this format."""
return cls.reader_class is not None
@classmethod
def is_writable(cls): # TODO
return False
@classmethod
def is_convertible(cls) -> bool:
"""Whether PIMS can convert images in this format into another one."""
return cls.convertor_class is not None
@classmethod
def is_spatial(cls) -> bool:
"""Whether this format is adapted for spatial data requests."""
return False
@classmethod
def is_spectral(cls) -> bool:
"""Whether this format is adapted for spectral data requests."""
return False
@classmethod
def match(cls, cached_path: CachedDataPath) -> bool:
"""
Identify if it is this format or not.
Parameters
----------
cached_path : CachedDataPath
The path, proxied with some useful results across formats.
Returns
-------
match: boolean
Whether it is this format
"""
if cls.checker_class:
return cls.checker_class.match(cached_path)
return False
@classmethod
def from_proxy(cls, cached_path: CachedDataPath) -> AbstractFormat:
return cls(path=cached_path.path, existing_cache=cached_path.cache)
@classmethod
def from_path(cls, path: Path) -> AbstractFormat:
return cls(path=path)
@property
def enabled(self):
return self._enabled
@enabled.setter
def enabled(self, value):
self._enabled = value
@property
def path(self) -> Path:
return self._path
@property
def media_type(self) -> str:
return "image"
# Conversion
@cached_property
def need_conversion(self) -> bool:
"""
Whether the image in this format needs to be converted to another one.
Decision can be made based on the format metadata.
"""
return True
def conversion_format(self) -> Optional[Type[AbstractFormat]]:
"""
Get the format to which the image in this format will be converted,
if needed.
"""
if self.convertor:
return self.convertor.conversion_format()
else:
return None
def convert(self, dest_path: Path) -> bool:
"""
Convert the image in this format to another one at a given destination
path.
Returns
-------
result
Whether the conversion succeeded or not
"""
if self.convertor:
return self.convertor.convert(dest_path)
else:
raise NotImplementedError()
# Metadata parsing
@cached_property
def main_imd(self) -> ImageMetadata:
"""
Get main image metadata, that is, required metadata to process
any request.
It is possible that other non-required metadata have been populated.
"""
return self.parser.parse_main_metadata()
@cached_property
def full_imd(self) -> ImageMetadata:
"""
Get full image metadata, that is, all known and standardised metadata.
`self.full_imd.is_complete` should be true.
"""
return self.parser.parse_known_metadata()
@cached_property
def raw_metadata(self) -> MetadataStore:
"""
Get all raw metadata in a generic store. Raw metadata are not
standardised and highly depend on underlying parsed format.
Raw metadata MUST NOT be used by PIMS for processing.
"""
return self.parser.parse_raw_metadata()
@cached_property
def pyramid(self) -> Pyramid:
"""
Get image format pyramid. There is always at least one tier (the
pyramid basis).
"""
return self.parser.parse_pyramid()
@cached_property
def planes_info(self) -> PlanesInfo:
"""
Information about each plane.
"""
return self.parser.parse_planes()
@cached_property
def annotations(self) -> List[ParsedMetadataAnnotation]:
"""
Get annotations stored in image format metadata.
"""
return self.parser.parse_annotations()
@cached_property
def histogram(self):
return self.histogram_reader
| 30.94
| 87
| 0.648028
|
01c9e895234998512a96bc9e6a171dae8990d6ad
| 12,240
|
py
|
Python
|
beastbot/utility/vec.py
|
NicEastvillage/RLBot-Beast
|
1d178cd79a22a4d2d9b22341906bb810c3f8659b
|
[
"MIT"
] | 8
|
2019-04-24T12:16:30.000Z
|
2021-12-23T14:35:57.000Z
|
beastbot/utility/vec.py
|
NicEastvillage/RLBot-Beast
|
1d178cd79a22a4d2d9b22341906bb810c3f8659b
|
[
"MIT"
] | null | null | null |
beastbot/utility/vec.py
|
NicEastvillage/RLBot-Beast
|
1d178cd79a22a4d2d9b22341906bb810c3f8659b
|
[
"MIT"
] | 2
|
2018-12-16T16:59:00.000Z
|
2021-12-23T14:36:04.000Z
|
import math
from utility.rlmath import clip
class Vec3:
def __init__(self, x: float or 'Vec3'=0.0, y: float=0.0, z: float=0.0):
if hasattr(x, 'x'):
# We have been given a vector. Copy it
self.x = float(x.x)
self.y = float(x.y) if hasattr(x, 'y') else 0
self.z = float(x.z) if hasattr(x, 'z') else 0
else:
self.x = float(x)
self.y = float(y)
self.z = float(z)
def __getitem__(self, item: int):
return (self.x, self.y, self.z)[item]
def __add__(self, other: 'Vec3') -> 'Vec3':
return Vec3(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other: 'Vec3') -> 'Vec3':
return Vec3(self.x - other.x, self.y - other.y, self.z - other.z)
def __neg__(self) -> 'Vec3':
return Vec3(-self.x, -self.y, -self.z)
def __mul__(self, scale: float) -> 'Vec3':
return Vec3(self.x * scale, self.y * scale, self.z * scale)
def __rmul__(self, scale: float) -> 'Vec3':
return self * scale
def __truediv__(self, scale: float) -> 'Vec3':
scale = 1 / float(scale)
return self * scale
def __abs__(self) -> 'Vec3':
return Vec3(abs(self.x), abs(self.y), abs(self.z))
def __str__(self):
return "Vec3(" + str(self.x) + ", " + str(self.y) + ", " + str(self.z) + ")"
class Mat33:
def __init__(self, xx: float or Vec3 or 'Mat33'=0.0, xy: float or Vec3=0.0, xz: float or Vec3=0.0,
yx: float=0.0, yy: float=0.0, yz: float=0.0, zx: float=0.0, zy: float=0.0, zz: float=0.0):
"""
Mat33(xx, xy, xz, yx, yy, yz, zx, zy, zz)
Mat33(mat)
"""
if hasattr(xx, "data"):
self.data = xx.data.copy()
else:
self.data = [xx, xy, xz, yx, yy, yz, zx, zy, zz]
xx = property(lambda self: self.get(0, 0), lambda self: self.set(0, 0), None)
xy = property(lambda self: self.get(0, 1), lambda self: self.set(0, 1), None)
xz = property(lambda self: self.get(0, 2), lambda self: self.set(0, 2), None)
yx = property(lambda self: self.get(1, 0), lambda self: self.set(1, 0), None)
yy = property(lambda self: self.get(1, 1), lambda self: self.set(1, 1), None)
yz = property(lambda self: self.get(1, 2), lambda self: self.set(1, 2), None)
zx = property(lambda self: self.get(2, 0), lambda self: self.set(2, 0), None)
zy = property(lambda self: self.get(2, 1), lambda self: self.set(2, 1), None)
zz = property(lambda self: self.get(2, 2), lambda self: self.set(2, 2), None)
def __getitem__(self, item: int):
return self.data[item]
def __setitem__(self, key: int, value: float):
self.data[key] = value
def get(self, row: int, col: int) -> float:
return self.data[row * 3 + col]
def set(self, row: int, col: int, val: float):
self.data[row * 3 + col] = val
def __add__(self, other: 'Mat33') -> 'Mat33':
mat = Mat33()
for i in range(9):
mat[i] = self[i] + other[i]
return mat
def __sub__(self, other: 'Mat33') -> 'Mat33':
mat = Mat33()
for i in range(9):
mat[i] = self[i] - other[i]
return mat
def __neg__(self):
mat = Mat33()
for i in range(9):
mat[i] = -self[i]
return mat
def __mul__(self, scale: float or 'Mat33') -> 'Mat33':
mat = Mat33()
if hasattr(scale, "data"):
for i in range(9):
mat[i] = self[i] * scale[i]
else:
for i in range(9):
mat[i] = self[i] * scale
return mat
def __rmul__(self, scale):
return self * scale
def __truediv__(self, scale: float) -> 'Mat33':
scale = 1 / float(scale)
return self * scale
def __str__(self):
return "Mat33(" + str(self.xx) + ", " + str(self.xy) + ", " + str(self.xz) + ", " \
+ str(self.yx) + ", " + str(self.yy) + ", " + str(self.yz) + ", " \
+ str(self.zx) + ", " + str(self.zy) + ", " + str(self.zz) + ")"
def col(self, n: int) -> Vec3:
return Vec3(self.get(0, n), self.get(1, n), self.get(2, n))
def row(self, n: int) -> Vec3:
return Vec3(self.get(n, 0), self.get(n, 1), self.get(n, 2))
@staticmethod
def of(v: float) -> 'Mat33':
return Mat33(v, v, v, v, v, v, v, v, v)
@staticmethod
def from_rows(row_a: Vec3, row_b: Vec3, row_c: Vec3) -> 'Mat33':
return Mat33(
row_a.x, row_a.y, row_a.z,
row_b.x, row_b.y, row_b.z,
row_c.x, row_c.y, row_c.z
)
@staticmethod
def from_columns(col_a: Vec3, col_b: Vec3, col_c: Vec3) -> 'Mat33':
return Mat33(
col_a.x, col_b.x, col_c.x,
col_a.y, col_b.y, col_c.y,
col_a.z, col_b.z, col_c.z
)
@staticmethod
def identity():
return Mat33(1, 0, 0, 0, 1, 0, 0, 0, 1)
def xy(vec: Vec3) -> Vec3:
return Vec3(vec.x, vec.y, 0.0)
def norm(vec: Vec3) -> float:
return math.sqrt(vec.x**2 + vec.y**2 + vec.z**2)
def normalize(vec: Vec3) -> Vec3:
return vec / norm(vec)
def dot(mat1: Vec3 or Mat33, mat2: Vec3 or Mat33) -> float or Vec3 or Mat33:
if hasattr(mat1, "data") and hasattr(mat2, "data"):
# Mat dot Mat -> Mat
res = Mat33()
for i in range(3):
for j in range(3):
for k in range(3):
v = res.get(i, j) + mat1.get(i, k) * mat2.get(k, j)
res.set(i, j, v)
return res
elif hasattr(mat1, "data") and hasattr(mat2, "x"):
# Mat dot Vec -> Vec
return Vec3(
mat1.xx * mat2.x + mat1.xy * mat2.y + mat1.xz * mat2.z,
mat1.yx * mat2.x + mat1.yy * mat2.y + mat1.yz * mat2.z,
mat1.zx * mat2.x + mat1.zy * mat2.y + mat1.zz * mat2.z
)
elif hasattr(mat1, "x") and hasattr(mat2, "data"):
# Vec dot Mat -> Vec
return Vec3(
mat1.x * mat2.xx + mat1.y * mat2.yx + mat1.z * mat2.zx,
mat1.x * mat2.xy + mat1.y * mat2.yy + mat1.z * mat2.zy,
mat1.x * mat2.xz + mat1.y * mat2.yz + mat1.z * mat2.zz
)
else:
# Vec dot Vec
return mat1.x * mat2.x + mat1.y * mat2.y + mat1.z * mat2.z
def cross(vecA: Vec3, vecB: Vec3) -> Vec3:
return Vec3(
vecA.y * vecB.z - vecA.z * vecB.y,
vecA.z * vecB.x - vecA.x * vecB.z,
vecA.x * vecB.y - vecA.y * vecB.x
)
def transpose(mat: Mat33) -> Mat33:
matT = Mat33()
for i in range(3):
for j in range(3):
matT.set(j, i, mat.get(i, j))
return matT
def fnorm(mat: Mat33) -> float:
sum = 0.0
for i in range(9):
sum += mat[i]
return math.sqrt(sum)
def tr(mat: Mat33) -> float:
return mat.xx + mat.yy + mat.zz
def det(mat: Mat33) -> float:
return mat.get(0, 0) * mat.get(1, 1) * mat.get(2, 2) + mat.get(0, 1) * mat.get(1, 2) * mat.get(2, 0) + \
mat.get(0, 2) * mat.get(1, 0) * mat.get(2, 1) - mat.get(0, 0) * mat.get(1, 2) * mat.get(2, 1) - \
mat.get(0, 1) * mat.get(1, 0) * mat.get(2, 2) - mat.get(0, 2) * mat.get(1, 1) * mat.get(2, 0)
def inv(mat: Mat33) -> Mat33:
invm = Mat33()
invdet = 1.0 / det(mat)
invm.set(0, 0, (mat.get(1, 1) * mat.get(2, 2) - mat.get(1, 2) * mat.get(2, 1)) * invdet)
invm.set(0, 1, (mat.get(0, 2) * mat.get(2, 1) - mat.get(0, 1) * mat.get(2, 2)) * invdet)
invm.set(0, 2, (mat.get(0, 1) * mat.get(1, 2) - mat.get(0, 2) * mat.get(1, 1)) * invdet)
invm.set(1, 0, (mat.get(1, 2) * mat.get(2, 0) - mat.get(1, 0) * mat.get(2, 2)) * invdet)
invm.set(1, 1, (mat.get(0, 0) * mat.get(2, 2) - mat.get(0, 2) * mat.get(2, 0)) * invdet)
invm.set(1, 2, (mat.get(0, 2) * mat.get(1, 0) - mat.get(0, 0) * mat.get(1, 2)) * invdet)
invm.set(2, 0, (mat.get(1, 0) * mat.get(2, 1) - mat.get(1, 1) * mat.get(2, 0)) * invdet)
invm.set(2, 1, (mat.get(0, 1) * mat.get(2, 0) - mat.get(0, 0) * mat.get(2, 1)) * invdet)
invm.set(2, 2, (mat.get(0, 0) * mat.get(1, 1) - mat.get(0, 1) * mat.get(1, 0)) * invdet)
return invm
def vec_max(a: Vec3, b: Vec3) -> Vec3:
return Vec3(max(a.x, b.x), max(a.y, b.y), max(a.z, b.z))
def max_comp(vec: Vec3) -> float:
return max(vec.x, vec.y, vec.z)
def angle_between(v: Vec3, u: Vec3) -> float:
return math.acos(dot(normalize(v), normalize(u)))
def axis_to_rotation(axis: Vec3) -> Mat33:
radians = norm(axis)
if abs(radians) < 0.000001:
return Mat33.identity()
else:
axis = normalize(axis)
K = Mat33(
0.0, -axis[2], axis[1],
axis[2], 0.0, -axis[0],
-axis[1], axis[0], 0.0
)
return Mat33.identity() + math.sin(radians) * K + (1.0 - math.cos(radians)) * dot(K, K)
"""
u = axis / radians
c = math.cos(radians)
s = math.sin(radians)
return Mat33(
u[0] * u[0] * (1.0 - c) + c,
u[0] * u[1] * (1.0 - c) - u[2] * s,
u[0] * u[2] * (1.0 - c) + u[1] * s,
u[1] * u[0] * (1.0 - c) + u[2] * s,
u[1] * u[1] * (1.0 - c) + c,
u[1] * u[2] * (1.0 - c) - u[0] * s,
u[2] * u[0] * (1.0 - c) - u[1] * s,
u[2] * u[1] * (1.0 - c) + u[0] * s,
u[2] * u[2] * (1.0 - c) + c
)
"""
def rotation_to_axis(rot: Mat33) -> Vec3:
ang = math.acos(clip(0.5 * (tr(rot) - 1.0), -1.0, 1.0))
# For small angles, prefer series expansion to division by sin(theta) ~ 0
if abs(ang) < 0.00001:
scale = 0.5 + ang * ang / 12.0
else:
scale = 0.5 * ang / math.sin(ang)
return Vec3(
rot.get(2, 1) - rot.get(1, 2),
rot.get(0, 2) - rot.get(2, 0),
rot.get(1, 0) - rot.get(0, 1)
) * scale
def euler_to_rotation(pitch_yaw_roll: Vec3) -> Mat33:
cp = math.cos(pitch_yaw_roll[0])
sp = math.sin(pitch_yaw_roll[0])
cy = math.cos(pitch_yaw_roll[1])
sy = math.sin(pitch_yaw_roll[1])
cr = math.cos(pitch_yaw_roll[2])
sr = math.sin(pitch_yaw_roll[2])
rotation = Mat33()
# front direction
rotation.set(0, 0, cp * cy)
rotation.set(1, 0, cp * sy)
rotation.set(2, 0, sp)
# left direction
rotation.set(0, 1, cy * sp * sr - cr * sy)
rotation.set(1, 1, sy * sp * sr + cr * cy)
rotation.set(2, 1, -cp * sr)
# up direction
rotation.set(0, 2, -cr * cy * sp - sr * sy)
rotation.set(1, 2, -cr * sy * sp + sr * cy)
rotation.set(2, 2, cp * cr)
return rotation
def rotation_to_euler(rotation: Mat33) -> Vec3:
return Vec3(
math.atan2(rotation.get(2, 0), norm(Vec3(rotation.get(0, 0), rotation.get(1, 0)))),
math.atan2(rotation.get(1, 0), rotation.get(0, 0)),
math.atan2(-rotation.get(2, 1), rotation.get(2, 2))
)
def rotate2d(vec: Vec3, ang: float) -> Vec3:
c = math.cos(ang)
s = math.sin(ang)
return Vec3(c * vec.x - s * vec.y,
s * vec.x + c * vec.y)
def proj_onto(src: Vec3, dir: Vec3) -> Vec3:
"""
Returns the vector component of src that is parallel with dir, i.e. the projection of src onto dir.
"""
try:
return (dot(src, dir) / dot(dir, dir)) * dir
except ZeroDivisionError:
return Vec3()
def proj_onto_size(src: Vec3, dir: Vec3) -> float:
"""
Returns the size of the vector that is the project of src onto dir
"""
try:
dir_n = normalize(dir)
return dot(src, dir_n) / dot(dir_n, dir_n) # can be negative!
except ZeroDivisionError:
return norm(src)
# Unit tests
if __name__ == "__main__":
assert angle_between(Vec3(x=1), Vec3(y=1)) == math.pi / 2
assert angle_between(Vec3(y=1), Vec3(y=-1, z=1)) == 0.75 * math.pi
assert norm(dot(axis_to_rotation(Vec3(x=-math.pi)), Vec3(y=1)) - Vec3(y=-1)) < 0.000001
assert norm(dot(axis_to_rotation(Vec3(y=0.5*math.pi)), Vec3(z=1)) - Vec3(x=-1)) < 0.000001
assert norm(dot(axis_to_rotation(Vec3(z=math.pi)), Vec3(x=1)) - Vec3(x=-1)) < 0.000001
pyr = Vec3(0.5, 0.2, -0.4)
assert norm(rotation_to_euler(euler_to_rotation(pyr)) - pyr) < 0.000001
| 31.465296
| 109
| 0.51781
|
d257e5f3201f4d6c4cb8b2ce8869459c6f1d8b63
| 1,030
|
py
|
Python
|
paprika/repositories/SilverpopMailingRepository.py
|
thunder-/paprika
|
af262407ec9c195dbb5a7c205510e6ad2fb65f36
|
[
"MIT"
] | null | null | null |
paprika/repositories/SilverpopMailingRepository.py
|
thunder-/paprika
|
af262407ec9c195dbb5a7c205510e6ad2fb65f36
|
[
"MIT"
] | null | null | null |
paprika/repositories/SilverpopMailingRepository.py
|
thunder-/paprika
|
af262407ec9c195dbb5a7c205510e6ad2fb65f36
|
[
"MIT"
] | null | null | null |
from paprika.repositories.Repository import Repository
class SilverpopMailingRepository(Repository):
def __init__(self, connector):
Repository.__init__(self, connector)
def insert(self, silverpop_mailing):
connection = self.get_connection()
cursor = connection.cursor()
params = dict()
params['mailing_id'] = silverpop_mailing['mailing_id']
params['mailing_name'] = silverpop_mailing['mailing_name']
statement = "insert into silverpop_mailings(mailing_id, mailing_name) values (:mailing_id, :mailing_name)"
statement, parameters = self.statement(statement, params)
cursor.execute(statement, parameters)
if self.has_lastrowid():
silverpop_mailing['id'] = cursor.lastrowid
connection.commit()
return silverpop_mailing
def clean(self):
connection = self.get_connection()
cursor = connection.cursor()
statement = "delete from silverpop_mailings"
cursor.execute(statement)
| 31.212121
| 114
| 0.683495
|
59e71258eec76674dd2b21f097643b0f9f9ab2ff
| 38,105
|
py
|
Python
|
test/test_fapi.py
|
niooss-ledger/tpm2-pytss
|
f748de7993ff6b39900ba4a0332f893eba19194f
|
[
"BSD-2-Clause"
] | null | null | null |
test/test_fapi.py
|
niooss-ledger/tpm2-pytss
|
f748de7993ff6b39900ba4a0332f893eba19194f
|
[
"BSD-2-Clause"
] | null | null | null |
test/test_fapi.py
|
niooss-ledger/tpm2-pytss
|
f748de7993ff6b39900ba4a0332f893eba19194f
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python3 -u
"""
SPDX-License-Identifier: BSD-2
"""
import random
import string
import sys
import pytest
pytestmark = pytest.mark.skipif(
"tpm2_pytss.FAPI" not in sys.modules, reason="FAPI Not Detected"
)
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import ec, padding
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PSS
from tpm2_pytss import *
from tpm2_pytss.internal.utils import is_bug_fixed
from .TSS2_BaseTest import TpmSimulator, TSS2_BaseTest
@pytest.fixture(scope="module")
def simulator():
tpm = TpmSimulator.getSimulator()
tpm.start()
yield tpm
tpm.close()
@pytest.fixture(scope="class")
def fapi_config_ecc(simulator):
with FAPIConfig(
temp_dirs=True,
tcti=simulator.tcti_name_conf,
ek_cert_less="yes",
profile_name="P_ECCP256SHA256",
) as fapi_config:
yield fapi_config
@pytest.fixture(scope="class")
def fapi_config_rsa(simulator):
with FAPIConfig(
temp_dirs=True,
tcti=simulator.tcti_name_conf,
ek_cert_less="yes",
profile_name="P_RSA2048SHA256",
) as fapi_config:
yield fapi_config
@pytest.fixture(scope="class")
def fapi_ecc(fapi_config_ecc):
with FAPI() as fapi:
fapi.provision(is_provisioned_ok=False)
yield fapi
fapi.delete("/")
@pytest.fixture(scope="class")
def fapi_rsa(fapi_config_rsa):
with FAPI() as fapi:
fapi.provision(is_provisioned_ok=False)
yield fapi
fapi.delete("/")
def random_uid() -> str:
"""Generate a random id which can be used e.g. for unique key names."""
return "".join(random.choices(string.digits, k=10))
def sha256(data: bytes) -> bytes:
"""Calculate the SHA256 digest of given data."""
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(data)
digest = digest.finalize()
return digest
# TODO unprovisioned tests
@pytest.fixture(scope="class")
def init_fapi_ecc(request, fapi_ecc):
request.cls.fapi = fapi_ecc
request.cls.profile_name = request.cls.fapi.config.profile_name
yield request.cls.fapi
@pytest.fixture(scope="class")
def init_fapi_rsa(request, fapi_rsa):
request.cls.fapi = fapi_rsa
request.cls.profile_name = request.cls.fapi.config.profile_name
yield request.cls.fapi
class Common:
@pytest.fixture
def esys(self):
with ESAPI(tcti=self.fapi.tcti) as esys:
yield esys
@pytest.fixture
def cryptography_key(self):
key = ec.generate_private_key(ec.SECP256R1(), backend=default_backend())
key_public_pem = (
key.public_key()
.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
.decode()
)
return key, key_public_pem
@pytest.fixture
def sign_key(self):
profile_name = self.fapi.config.profile_name
key_path = f"/{profile_name}/HS/SRK/key_{random_uid()}"
self.fapi.create_key(path=key_path, type_="sign, exportable")
yield key_path
self.fapi.delete(path=key_path)
@pytest.fixture
def decrypt_key(self):
profile_name = self.fapi.config.profile_name
key_path = f"/{profile_name}/HS/SRK/key_{random_uid()}"
self.fapi.create_key(path=key_path, type_="decrypt")
yield key_path
self.fapi.delete(path=key_path)
@pytest.fixture
def seal(self):
profile_name = self.fapi.config.profile_name
seal_path = f"/{profile_name}/HS/SRK/seal_{random_uid()}"
seal_data = random_uid().encode()
self.fapi.create_seal(path=seal_path, data=seal_data)
yield seal_path, seal_data
self.fapi.delete(path=seal_path)
@pytest.fixture
def ext_key(self, cryptography_key):
key, key_public_pem = cryptography_key
key_path = f"/ext/key_{random_uid()}"
self.fapi.import_object(path=key_path, import_data=key_public_pem)
yield key_path, key
self.fapi.delete(path=key_path)
@pytest.fixture
def nv_ordinary(self):
nv_path = f"/nv/Owner/nv_{random_uid()}" # TODO Owner should be case insensitive (fix upstream)?
self.fapi.create_nv(path=nv_path, size=10)
yield nv_path
self.fapi.delete(path=nv_path)
@pytest.fixture
def nv_increment(self):
nv_path = f"/nv/Owner/nv_{random_uid()}"
self.fapi.create_nv(path=nv_path, size=10, type_="counter")
yield nv_path
self.fapi.delete(path=nv_path)
@pytest.fixture
def nv_pcr(self):
nv_path = f"/nv/Owner/nv_{random_uid()}"
self.fapi.create_nv(path=nv_path, size=32, type_="pcr")
yield nv_path
self.fapi.delete(path=nv_path)
@pytest.fixture
def nv_bitfield(self):
nv_path = f"/nv/Owner/nv_{random_uid()}"
self.fapi.create_nv(path=nv_path, size=32, type_="bitfield")
yield nv_path
self.fapi.delete(path=nv_path)
def test_provision_ok(self):
provisioned = self.fapi.provision()
assert provisioned is False
def test_provision_fail(self):
with pytest.raises(TSS2_Exception):
self.fapi.provision(is_provisioned_ok=False)
def test_get_random(self):
random_bytes = self.fapi.get_random(42)
assert type(random_bytes) == bytes
assert len(random_bytes) == 42
def test_get_random_zero(self):
random_bytes = self.fapi.get_random(0)
assert type(random_bytes) == bytes
assert len(random_bytes) == 0
def test_get_random_large(self):
with pytest.raises(OverflowError):
self.fapi.get_random(0xFFFFFFFFFFFFFFFF + 1)
def test_get_random_negative(self):
with pytest.raises(OverflowError):
self.fapi.get_random(-1)
def test_get_info(self):
info = self.fapi.get_info()
assert type(info) is str
json.loads(info)
assert "capabilities" in info
def test_list(self):
profile_name = self.fapi.config.profile_name
path_list = self.fapi.list()
assert type(path_list) is list
assert len(path_list) > 0
assert type(path_list[0]) is str
assert f"/{profile_name}/HS" in path_list
def test_list_search_path(self):
profile_name = self.fapi.config.profile_name
search_path = f"/{profile_name}/HE"
path_list = self.fapi.list(search_path)
assert type(path_list) is list
assert len(path_list) > 0
assert type(path_list[0]) is str
assert all(path.startswith(search_path) for path in path_list)
def test_list_bad_search_path(self):
with pytest.raises(TSS2_Exception):
self.fapi.list("/nonexistent")
def test_create_key(self):
profile_name = self.fapi.config.profile_name
key_path = f"/{profile_name}/HS/key_{random_uid()}"
created = self.fapi.create_key(path=key_path)
assert created is True
assert key_path in self.fapi.list()
def test_create_key_double_ok(self):
profile_name = self.fapi.config.profile_name
key_path = f"/{profile_name}/HS/key_{random_uid()}"
created = self.fapi.create_key(path=key_path)
assert created is True
assert key_path in self.fapi.list()
created = self.fapi.create_key(path=key_path, exists_ok=True)
assert created is False
def test_create_key_double_fail(self):
profile_name = self.fapi.config.profile_name
key_path = f"/{profile_name}/HS/key_{random_uid()}"
created = self.fapi.create_key(path=key_path)
assert created is True
assert key_path in self.fapi.list()
with pytest.raises(TSS2_Exception):
self.fapi.create_key(path=key_path)
def test_get_esys_blob_contextload(self, esys, sign_key):
blob_data, blob_type = self.fapi.get_esys_blob(path=sign_key)
assert blob_type == lib.FAPI_ESYSBLOB_CONTEXTLOAD
esys_handle = esys.load_blob(blob_data, blob_type)
esys.read_public(esys_handle)
esys.flush_context(esys_handle)
def test_get_esys_blob_deserialize(self, esys, nv_ordinary):
blob_data, blob_type = self.fapi.get_esys_blob(path=nv_ordinary)
assert blob_type == lib.FAPI_ESYSBLOB_DESERIALIZE
esys_handle = esys.load_blob(blob_data, blob_type)
esys.nv_read_public(esys_handle)
def test_verify(self, ext_key):
# create signature externally
key_path, key = ext_key
message = b"Hello World"
signature = key.sign(message, ec.ECDSA(hashes.SHA256()))
# verify signature via fapi
self.fapi.verify_signature(key_path, sha256(message), signature)
def test_verify_fail(self, ext_key):
key_path, key = ext_key
with pytest.raises(TSS2_Exception):
self.fapi.verify_signature(
key_path, digest=b"A" * 32, signature=b"bad signature"
)
# TODO test encrypt with RSA profile. Needs to be provisioned separately.
@pytest.mark.skipif(
not is_bug_fixed(fixed_in="3.2"), reason="tpm2-tss bug, see #2028"
)
def test_import_key_double_ok(self, cryptography_key):
key, key_public_pem = cryptography_key
key_path = f"/ext/key_{random_uid()}"
imported = self.fapi.import_object(path=key_path, import_data=key_public_pem)
assert imported is True
assert key_path in self.fapi.list()
imported = self.fapi.import_object(
path=key_path, import_data=key_public_pem, exists_ok=True
)
assert imported is False
@pytest.mark.skipif(
not is_bug_fixed(fixed_in="3.2"), reason="tpm2-tss bug, see #2028"
)
def test_import_key_double_fail(self, cryptography_key):
key, key_public_pem = cryptography_key
key_path = f"/ext/key_{random_uid()}"
imported = self.fapi.import_object(path=key_path, import_data=key_public_pem)
assert imported is True
assert key_path in self.fapi.list()
with pytest.raises(TSS2_Exception):
self.fapi.import_object(path=key_path, import_data=key_public_pem)
@pytest.mark.skipif(
not is_bug_fixed(fixed_in="3.2"), reason="tpm2-tss bug, see #2028"
)
def test_import_policy_double_ok(self):
policy = """
{
"description":"Description of this policy",
"policy":[{"type": "POLICYAUTHVALUE"}]
}
"""
policy_path = f"/policy/policy_{random_uid()}"
imported = self.fapi.import_object(path=policy_path, import_data=policy)
assert imported is True
assert policy_path in self.fapi.list()
imported = self.fapi.import_object(
path=policy_path, import_data=policy, exists_ok=True
)
assert imported is False
@pytest.mark.skipif(
not is_bug_fixed(fixed_in="3.2"), reason="tpm2-tss bug, see #2028"
)
def test_import_policy_double_fail(self):
policy = """
{
"description":"Description of this policy",
"policy":[{"type": "POLICYAUTHVALUE"}]
}
"""
policy_path = f"/policy/policy_{random_uid()}"
imported = self.fapi.import_object(path=policy_path, import_data=policy)
assert imported is True
assert policy_path in self.fapi.list()
with pytest.raises(TSS2_Exception):
self.fapi.import_object(path=policy_path, import_data=policy)
def test_import_exported_key(self, sign_key):
exported_data = self.fapi.export_key(path=sign_key)
profile_name = self.fapi.config.profile_name
new_path = f"/{profile_name}/HS/SRK/key_{random_uid()}"
self.fapi.import_object(path=new_path, import_data=exported_data)
def test_export_imported_policy(self):
policy = """
{
"description":"Description of this policy",
"policy":[{"type": "POLICYAUTHVALUE"}]
}
"""
policy_path = f"/policy/policy_{random_uid()}"
self.fapi.import_object(path=policy_path, import_data=policy)
exported_policy = self.fapi.export_policy(path=policy_path)
assert type(exported_policy) == str
assert "Description of this policy" in exported_policy
def test_create_seal(self):
profile_name = self.fapi.config.profile_name
seal_path = f"/{profile_name}/HS/SRK/seal_{random_uid()}"
seal_data = "Hello World"
created = self.fapi.create_seal(path=seal_path, data=seal_data)
assert created is True
assert seal_path in self.fapi.list()
def test_create_seal_double_ok(self):
profile_name = self.fapi.config.profile_name
seal_path = f"/{profile_name}/HS/SRK/seal_{random_uid()}"
seal_data = "Hello World"
created = self.fapi.create_seal(path=seal_path, data=seal_data)
assert created is True
assert seal_path in self.fapi.list()
created = self.fapi.create_seal(path=seal_path, data=seal_data, exists_ok=True)
assert created is False
def test_create_seal_double_fail(self):
profile_name = self.fapi.config.profile_name
seal_path = f"/{profile_name}/HS/SRK/seal_{random_uid()}"
seal_data = "Hello World"
created = self.fapi.create_seal(path=seal_path, data=seal_data)
assert created is True
assert seal_path in self.fapi.list()
with pytest.raises(TSS2_Exception):
self.fapi.create_seal(path=seal_path, data=seal_data)
def test_create_seal_random(self):
profile_name = self.fapi.config.profile_name
seal_path = f"/{profile_name}/HS/SRK/seal_{random_uid()}"
seal_len = 12
created = self.fapi.create_seal(path=seal_path, size=seal_len)
assert created is True
assert seal_path in self.fapi.list()
unseal_data = self.fapi.unseal(path=seal_path)
assert type(unseal_data) is bytes
assert len(unseal_data) == seal_len
def test_create_seal_both_data_and_size_fail(self):
profile_name = self.fapi.config.profile_name
seal_path = f"/{profile_name}/HS/SRK/seal_{random_uid()}"
with pytest.raises(ValueError):
self.fapi.create_seal(path=seal_path, data="Hello World", size=11)
def test_create_seal_neither_data_nor_size_fail(self):
profile_name = self.fapi.config.profile_name
seal_path = f"/{profile_name}/HS/SRK/seal_{random_uid()}"
with pytest.raises(ValueError):
self.fapi.create_seal(path=seal_path)
def test_unseal(self, seal):
seal_path, seal_data = seal
unseal_data = self.fapi.unseal(path=seal_path)
assert type(unseal_data) is bytes
assert seal_data == unseal_data
def test_quote_verify(self, sign_key):
info, signature, pcr_log, certificate = self.fapi.quote(
path=sign_key, pcrs=[7, 9]
)
info_json = json.loads(info)
assert info_json["attest"]["type"] == "ATTEST_QUOTE"
assert type(signature) is bytes
pcr_log_json = json.loads(pcr_log)
assert pcr_log_json == []
assert certificate == ""
# TODO verify via openssl
# exported_data = self.fapi.export_key(path=sign_key)
# sign_key_public_pem = json.loads(exported_data)["pem_ext_public"].encode()
# public_key = serialization.load_pem_public_key(sign_key_public_pem)
# message = b"TODO"
# public_key.verify(signature, message, ec.ECDSA(hashes.SHA256()))
# signature via fapi
self.fapi.verify_quote(path=sign_key, signature=signature, quote_info=info)
def test_export_key(self, sign_key):
exported_data = self.fapi.export_key(path=sign_key)
assert type(exported_data) is str
json.loads(exported_data)
def test_delete_key(self):
profile_name = self.fapi.config.profile_name
key_path = f"/{profile_name}/HS/key_{random_uid()}"
self.fapi.create_key(path=key_path)
assert key_path in self.fapi.list()
self.fapi.delete(path=key_path)
assert key_path not in self.fapi.list()
def test_set_get_description(self, sign_key):
description = "Nobody expects the Spanish Inquisition!"
self.fapi.set_description(path=sign_key, description=description)
returned_description = self.fapi.get_description(path=sign_key)
assert description == returned_description
def test_get_empty_description(self, sign_key):
description = self.fapi.get_description(path=sign_key)
assert description == ""
def test_set_get_app_data(self, sign_key):
app_data = b"\x00\xDE\xCA\xFB\xAD\x00"
self.fapi.set_app_data(path=sign_key, app_data=app_data)
returned_app_data = self.fapi.get_app_data(path=sign_key)
assert app_data == returned_app_data
def test_get_no_app_data(self, sign_key):
app_data = self.fapi.get_app_data(path=sign_key)
assert app_data is None
def test_set_get_certificate(self, sign_key):
certificate = "<PEM-encoded certificate (but FAPI does not really check)>"
self.fapi.set_certificate(path=sign_key, certificate=certificate)
returned_certificate = self.fapi.get_certificate(path=sign_key)
assert certificate == returned_certificate
def test_get_empty_certificate(self, sign_key):
certificate = self.fapi.get_certificate(path=sign_key)
assert certificate == ""
def test_get_empty_platform_certificates_ok(self):
certificates = self.fapi.get_platform_certificates(no_cert_ok=True)
assert certificates == b""
def test_get_empty_platform_certificates_fail(self):
with pytest.raises(TSS2_Exception):
self.fapi.get_platform_certificates()
def test_pcr_read(self):
value, log = self.fapi.pcr_read(7)
assert value == b"\0" * 32
assert log == "[\n]"
def test_pcr_extend_read(self):
index = 16
value_old, _ = self.fapi.pcr_read(index)
data = b"\x11" * 100
log = '{"test":"myfile"}'
self.fapi.pcr_extend(index, data, log)
returned_value, returned_log = self.fapi.pcr_read(index)
assert returned_value == sha256(value_old + sha256(data))
assert '"test":"myfile"' in returned_log
def test_nv_write_read(self, nv_ordinary):
data = b"ABCDEFGHIJ" # 10 bytes as defined in fixture
self.fapi.nv_write(nv_ordinary, data)
returned_data, log = self.fapi.nv_read(nv_ordinary)
assert returned_data == data
assert log == ""
def test_nv_increment(self, nv_increment):
# TODO initial increment should not be necessary, check in with upstream
self.fapi.nv_increment(nv_increment)
data_before, log = self.fapi.nv_read(nv_increment)
assert len(data_before) == 8
assert log == ""
self.fapi.nv_increment(nv_increment)
data_after, log = self.fapi.nv_read(nv_increment)
assert len(data_after) == 8
assert log == ""
assert int.from_bytes(data_before, byteorder="big") + 1 == int.from_bytes(
data_after, byteorder="big"
)
def test_nv_pcr(self, nv_pcr):
value_old = b"\x00" * 32
data = b"\x11" * 100
log = '{"test":"myfile"}'
self.fapi.nv_extend(nv_pcr, data, log)
returned_value, returned_log = self.fapi.nv_read(nv_pcr)
assert returned_value == sha256(value_old + data)
assert '"test":"myfile"' in returned_log
def test_nv_set_bits(self, nv_bitfield):
bitfield = 0x0000DECAFBAD0000
self.fapi.nv_set_bits(nv_bitfield, bitfield)
returned_value, returned_log = self.fapi.nv_read(nv_bitfield)
assert returned_value == bitfield.to_bytes(8, byteorder="big")
assert returned_log == ""
def test_set_auth_callback(self, sign_key):
def callback(path, descr, user_data):
print(f"Callback: path={path}, descr={descr}, user_data={user_data}")
return user_data
profile_name = self.fapi.config.profile_name
key_path = f"/{profile_name}/HS/SRK/key_{random_uid()}"
self.fapi.create_key(path=key_path, auth_value=b"123456")
self.fapi.set_auth_callback(callback, user_data=b"123456")
self.fapi.sign(key_path, b"\x11" * 32)
self.fapi.change_auth(path=key_path, auth_value=b"ABCDEF")
self.fapi.set_auth_callback(callback, user_data=b"ABCDEF")
self.fapi.sign(key_path, b"\x22" * 32)
def test_unset_auth_callback(self, sign_key):
def callback(path, descr, user_data):
print(f"Callback: path={path}, descr={descr}, user_data={user_data}")
return user_data
profile_name = self.fapi.config.profile_name
key_path = f"/{profile_name}/HS/SRK/key_{random_uid()}"
self.fapi.create_key(path=key_path, auth_value=b"123456")
self.fapi.set_auth_callback(callback, user_data=b"123456")
self.fapi.sign(key_path, b"\x11" * 32)
self.fapi.change_auth(path=key_path, auth_value=None)
self.fapi.set_auth_callback(callback=None)
self.fapi.sign(key_path, b"\x22" * 32)
@pytest.mark.skipif(
not is_bug_fixed(fixed_in="3.2", backports=["2.4.7", "3.0.5", "3.1.1"]),
reason="tpm2-tss bug, see #2084",
)
def test_write_authorize_nv(self, esys):
# write CommandCode policy for sign key into nv index
nv_path = f"/nv/Owner/nv_policy_{random_uid()}"
policy = """
{
"description": "",
"policy": [
{
"type": "CommandCode",
"code": "sign"
}
]
}"""
policy_auth_nv_path = f"/policy/policy_{random_uid()}"
self.fapi.import_object(path=policy_auth_nv_path, import_data=policy)
self.fapi.create_nv(path=nv_path, size=34)
self.fapi.write_authorize_nv(nv_path, policy_auth_nv_path)
# create key with AuthorizeNV policy (which ties the above policy, stored in the nv index, to the key)
policy_auth_nv = f"""
{{
"description":"Description pol_authorize_nv",
"policy":[
{{
"type": "AuthorizeNV",
"nvPath": "{nv_path}",
}}
]
}}
"""
policy_path = f"/policy/policy_{random_uid()}"
self.fapi.import_object(path=policy_path, import_data=policy_auth_nv)
profile_name = self.fapi.config.profile_name
key_path = f"/{profile_name}/HS/SRK/key_{random_uid()}"
self.fapi.create_key(path=key_path, type_="sign", policy_path=policy_path)
# use key for signing: success
self.fapi.sign(path=key_path, digest=b"\x11" * 32)
# use key for quoting: fail
with pytest.raises(TSS2_Exception):
self.fapi.quote(path=key_path, pcrs=[7, 9])
@pytest.mark.skipif(
not is_bug_fixed(fixed_in="3.2", backports=["2.4.7", "3.0.5", "3.1.1"]),
reason="tpm2-tss bug, see #2084",
)
def test_authorize_policy(self, sign_key):
# create policy Authorize, which is satisfied via a signature by sign_key
policy_authorize_path = f"/policy/policy_{random_uid()}"
policy_authorize = f"""
{{
"description": "Description pol_authorize",
"policy": [
{{
"type": "Authorize",
"policyRef": [1, 2, 3, 4, 5],
"keyPath": "{sign_key}",
}}
]
}}
"""
self.fapi.import_object(
path=policy_authorize_path, import_data=policy_authorize
)
# create policy CommandCode
policy = """
{
"description": "",
"policy": [
{
"type": "CommandCode",
"code": "sign"
}
]
}"""
policy_path = f"/policy/policy_{random_uid()}"
self.fapi.import_object(path=policy_path, import_data=policy)
# create key which can only be used if policy Authorize is satisfied
profile_name = self.fapi.config.profile_name
key_path = f"/{profile_name}/HS/SRK/key_{random_uid()}"
self.fapi.create_key(
path=key_path, type_="sign", policy_path=policy_authorize_path
)
# try to use key without satisfying policy Authorize: fail
with pytest.raises(TSS2_Exception):
self.fapi.sign(path=key_path, digest=b"\x11" * 32)
# specify underlying policy CommandCode and use key: success
self.fapi.authorize_policy(
policy_path=policy_path,
key_path=sign_key,
policy_ref=b"\x01\x02\x03\x04\x05",
)
self.fapi.sign(path=key_path, digest=b"\x11" * 32)
# specify underlying policy CommandCode and use key: fail because policy CommandCode is not satisfied
self.fapi.authorize_policy(
policy_path=policy_path,
key_path=sign_key,
policy_ref=b"\x01\x02\x03\x04\x05",
)
with pytest.raises(TSS2_Exception):
self.fapi.quote(path=key_path, pcrs=[7, 9])
@pytest.mark.skipif(
not is_bug_fixed(fixed_in="3.2"), reason="tpm2-tss bug, see #2080"
)
def test_policy_signed(self, cryptography_key):
# create external signing key used by the signing authority external to the TPM
sign_key, sign_key_public_pem = cryptography_key
# create policy Signed, which is satisfied via a signature by sign_key
policy = f"""
{{
"description": "Description pol_signed",
"policy": [
{{
"type": "PolicySigned",
"publicKeyHint": "Test key hint",
"keyPEM": "{sign_key_public_pem}",
}}
]
}}
"""
policy_path = f"/policy/policy_{random_uid()}"
self.fapi.import_object(path=policy_path, import_data=policy)
# create key which can only be used if policy Signed is satisfied
profile_name = self.fapi.config.profile_name
key_path = f"/{profile_name}/HS/SRK/key_{random_uid()}"
self.fapi.create_key(path=key_path, type_="sign", policy_path=policy_path)
# try to use key without satisfying policy Signed: fail
with pytest.raises(TSS2_Exception):
self.fapi.sign(path=key_path, digest=b"\x11" * 32)
def sign_callback(
path,
description,
public_key,
public_key_hint,
hash_alg,
data_to_sign,
user_data,
):
assert key_path.endswith(path)
assert description == "PolicySigned"
assert public_key == sign_key_public_pem
assert public_key_hint == "Test key hint"
assert hash_alg == lib.TPM2_ALG_SHA256
assert user_data == b"123456"
# signing authority signs external to TPM (via openssl) to authorize usage of key (policy Signed)
return sign_key.sign(data_to_sign, ec.ECDSA(hashes.SHA256()))
# set signing callback, will be called if policy Signed is to be satisfied
self.fapi.set_sign_callback(callback=sign_callback, user_data=b"123456")
# use key for signing: success
self.fapi.sign(path=key_path, digest=b"\x11" * 32)
def test_policy_branched(self):
pcr_index = 15
pcr_data = b"ABCDEF"
pcr_digest, _ = self.fapi.pcr_read(index=pcr_index)
pcr_digest = sha256(pcr_digest + sha256(pcr_data))
# create policy Signed, which is satisfied via a signature by sign_key
policy = f"""
{{
"description": "Read, Password for write",
"policy": [
{{
"type": "PolicyOR",
"branches": [
{{
"name": "Read",
"description": "des",
"policy": [
{{
"type": "CommandCode",
"code": "NV_READ"
}}
]
}},
{{
"name": "Write",
"description": "dgf",
"policy": [
{{
"type": "CommandCode",
"code": "NV_WRITE"
}},
{{
"type": "PolicyPCR",
"pcrs":[
{{
"pcr": {pcr_index},
"hashAlg": "TPM2_ALG_SHA256",
"digest": "{binascii.hexlify(pcr_digest).decode()}"
}}
]
}}
]
}}
]
}}
]
}}
"""
policy_path = f"/policy/policy_{random_uid()}"
self.fapi.import_object(path=policy_path, import_data=policy)
# create key which can only be used if policy Signed is satisfied
nv_path = f"/nv/Owner/nv_{random_uid()}"
self.fapi.create_nv(path=nv_path, size=11, policy_path=policy_path)
def branch_callback(path, description, branch_names, user_data):
assert path == nv_path
assert description == "PolicyOR"
assert branch_names == ["Read", "Write"]
assert user_data == b"123456"
return policy_coice(branch_names)
# set branch callback, will be called if the nv index is accessed
self.fapi.set_branch_callback(callback=branch_callback, user_data=b"123456")
# at first, we will choose the 'Write' branch
policy_coice = lambda options: options.index("Write")
# write to nv index: fail
with pytest.raises(TSS2_Exception):
self.fapi.nv_write(path=nv_path, data="Hello World")
# satisfy policy PCR (and thus policy OR)
self.fapi.pcr_extend(index=pcr_index, data=pcr_data)
# write to nv index: success
self.fapi.nv_write(path=nv_path, data="Hello World")
# extend PCR so policy PCR cannot be satisfied anymore
self.fapi.pcr_extend(
index=pcr_index, data="nobody expects the spanish inquisition!"
)
# secondly, we will choose the 'Read' branch
policy_coice = lambda options: options.index("Read")
# use the 'Read' branch (satisfied via policy CommandCode)
nv_data, _ = self.fapi.nv_read(nv_path)
assert nv_data == b"Hello World"
policy_coice = None
# thirdly, we set different branch callback function (here lambda) and read again
self.fapi.set_branch_callback(
callback=lambda _path, _description, branch_names, _user_data: branch_names.index(
"Read"
)
)
nv_data, _ = self.fapi.nv_read(nv_path)
assert nv_data == b"Hello World"
# clean up
self.fapi.delete(path=nv_path)
@pytest.mark.skipif(
not is_bug_fixed(fixed_in="3.2", backports=["2.4.7", "3.0.5", "3.1.1"]),
reason="tpm2-tss bug, see #2089",
)
def test_policy_action(self):
# create policy Action, which is satisfied via the callback
policy = f"""
{{
"description":"The description",
"policy":[
{{
"type": "POLICYACTION",
"action": "myaction"
}}
]
}}
"""
policy_path = f"/policy/policy_{random_uid()}"
self.fapi.import_object(path=policy_path, import_data=policy)
# create key which can only be used if policy Action is satisfied
profile_name = self.fapi.config.profile_name
key_path = f"/{profile_name}/HS/SRK/key_{random_uid()}"
self.fapi.create_key(path=key_path, type_="sign", policy_path=policy_path)
# try to use key without satisfying policy Action: fail
with pytest.raises(TSS2_Exception):
self.fapi.sign(path=key_path, digest=b"\x11" * 32)
def policy_action_callback_error(path, action, user_data) -> None:
assert f"/{path}" == key_path
assert action == "myaction"
assert user_data == b"123456"
raise ValueError("Policy Action: Invalid action.")
# set policy Action callback, will be called if policy Action is to be satisfied
self.fapi.set_policy_action_callback(
callback=policy_action_callback_error, user_data=b"123456"
)
# try to use key with policy Action that raises an exception: fail
with pytest.raises(TSS2_Exception):
self.fapi.sign(path=key_path, digest=b"\x11" * 32)
# set policy Action callback to lambda, returning success
self.fapi.set_policy_action_callback(callback=lambda *_: None)
# use key for signing: success
self.fapi.sign(path=key_path, digest=b"\x11" * 32)
@pytest.mark.usefixtures("init_fapi_ecc")
class TestFapiECC(Common):
def test_sign(self, sign_key):
# create signature
message = b"Hello World"
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(message)
digest = digest.finalize()
signature, key_public_pem, cert_pem = self.fapi.sign(
path=sign_key, digest=digest
)
assert type(signature) == bytes
assert type(key_public_pem) == bytes
assert type(cert_pem) == bytes
# verify via fapi
self.fapi.verify_signature(sign_key, digest, signature)
# verify via openssl
public_key = serialization.load_pem_public_key(
key_public_pem, backend=default_backend()
)
public_key.verify(signature, message, ec.ECDSA(hashes.SHA256()))
def test_get_tpm_blobs(self, sign_key):
tpm_2b_public, tpm_2b_private, policy = self.fapi.get_tpm_blobs(path=sign_key)
assert tpm_2b_public.size == 0x56
assert tpm_2b_public.publicArea.type == lib.TPM2_ALG_ECC
assert tpm_2b_public.publicArea.nameAlg == lib.TPM2_ALG_SHA256
assert (
tpm_2b_public.publicArea.objectAttributes
== lib.TPMA_OBJECT_SIGN_ENCRYPT
| lib.TPMA_OBJECT_USERWITHAUTH
| lib.TPMA_OBJECT_SENSITIVEDATAORIGIN
)
assert tpm_2b_public.publicArea.authPolicy.size == 0
assert (
tpm_2b_public.publicArea.parameters.eccDetail.symmetric.algorithm
== lib.TPM2_ALG_NULL
)
assert (
tpm_2b_public.publicArea.parameters.eccDetail.scheme.scheme
== lib.TPM2_ALG_NULL
)
assert (
tpm_2b_public.publicArea.parameters.eccDetail.curveID
== lib.TPM2_ECC_NIST_P256
)
assert (
tpm_2b_public.publicArea.parameters.eccDetail.kdf.scheme
== lib.TPM2_ALG_NULL
)
assert tpm_2b_private.size == 0x7E
assert policy == ""
@pytest.mark.usefixtures("init_fapi_rsa")
class TestFapiRSA(Common):
def test_sign(self, sign_key):
# create signature
message = b"Hello World"
digest = hashes.Hash(hashes.SHA256(), backend=default_backend())
digest.update(message)
digest = digest.finalize()
signature, key_public_pem, cert_pem = self.fapi.sign(
path=sign_key, digest=digest
)
assert type(signature) == bytes
assert type(key_public_pem) == bytes
assert type(cert_pem) == bytes
# verify via fapi
self.fapi.verify_signature(sign_key, digest, signature)
# verify via openssl
public_key = serialization.load_pem_public_key(
key_public_pem, backend=default_backend()
)
public_key.verify(
signature,
message,
PSS(mgf=padding.MGF1(hashes.SHA256()), salt_length=32),
hashes.SHA256(),
)
def test_get_tpm_blobs(self, sign_key):
tpm_2b_public, tpm_2b_private, policy = self.fapi.get_tpm_blobs(path=sign_key)
assert tpm_2b_public.size == 0x116
assert tpm_2b_public.publicArea.type == lib.TPM2_ALG_RSA
assert tpm_2b_public.publicArea.nameAlg == lib.TPM2_ALG_SHA256
assert (
tpm_2b_public.publicArea.objectAttributes
== lib.TPMA_OBJECT_SIGN_ENCRYPT
| lib.TPMA_OBJECT_USERWITHAUTH
| lib.TPMA_OBJECT_SENSITIVEDATAORIGIN
)
assert tpm_2b_public.publicArea.authPolicy.size == 0
assert (
tpm_2b_public.publicArea.parameters.rsaDetail.symmetric.algorithm
== lib.TPM2_ALG_NULL
)
assert (
tpm_2b_public.publicArea.parameters.rsaDetail.scheme.scheme
== lib.TPM2_ALG_NULL
)
assert tpm_2b_public.publicArea.parameters.rsaDetail.keyBits == 2048
assert tpm_2b_public.publicArea.parameters.rsaDetail.exponent == 0
assert tpm_2b_private.size == 0xDE
assert policy == ""
@pytest.mark.skipif(
not is_bug_fixed(fixed_in="3.2", backports=["2.4.7", "3.0.5", "3.1.1"]),
reason="tpm2-tss bug, see #2092",
)
def test_encrypt_decrypt(self, decrypt_key):
plaintext = b"Hello World!"
ciphertext = self.fapi.encrypt(decrypt_key, plaintext)
assert isinstance(ciphertext, bytes)
decrypted = self.fapi.decrypt(decrypt_key, ciphertext)
assert decrypted == plaintext
if __name__ == "__main__":
sys.exit(pytest.main(sys.argv))
| 35.982059
| 110
| 0.624747
|
53e6839d0e121b7f9b6f0927874f3de5ca965984
| 2,554
|
py
|
Python
|
tests/test_cli.py
|
91nunocosta/cookiecutterizer
|
fb450b3878a4d0af3400258e2b736dae2e4b65b0
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
91nunocosta/cookiecutterizer
|
fb450b3878a4d0af3400258e2b736dae2e4b65b0
|
[
"MIT"
] | null | null | null |
tests/test_cli.py
|
91nunocosta/cookiecutterizer
|
fb450b3878a4d0af3400258e2b736dae2e4b65b0
|
[
"MIT"
] | null | null | null |
"""Test Command Line Interface."""
import json
import textwrap
from pathlib import Path
from click.testing import CliRunner
from cookiecutterizer.cli import cookiecutterize
from tests.helpers import create_text_file, load_text_file
def test_cookiecutterize(tmp_path):
"""Test cookiecutterize command."""
runner = CliRunner()
with runner.isolated_filesystem(temp_dir=tmp_path) as test_directory_path:
test_directory = Path(test_directory_path)
project = test_directory / "project"
destination = test_directory / "destination"
project.mkdir()
destination.mkdir()
substitutions = create_text_file(
test_directory,
"substitutions.json",
json.dumps(
{
"cookiecutterizer": "cookiecutterizer",
}
),
)
create_text_file(
project,
"pyproject.toml",
"""
[tool.poetry]
name = "cookiecutterizer"
""",
)
create_text_file(
project / "cookiecutterizer",
"__init__.py",
"""
__version__ = 0.1.0
""",
)
create_text_file(
project / "tests",
"test_cookiecutterizer.py",
"""
from cookiecutterizer import __version__
def test_version():
assert __version__ == "0.1.0"
""",
)
result = runner.invoke(
cookiecutterize,
[
"--substitutions",
str(substitutions),
"--destination",
str(destination),
str(project),
],
)
assert result.exit_code == 0
assert result.output == ""
target_project = destination / "project"
assert target_project.exists()
assert load_text_file(target_project, "pyproject.toml") == textwrap.dedent(
"""
[tool.poetry]
name = "cookiecutterizer"
"""
)
assert load_text_file(
target_project / "cookiecutterizer", "__init__.py"
) == textwrap.dedent(
"""
__version__ = 0.1.0
"""
)
assert load_text_file(
target_project / "tests", "test_cookiecutterizer.py"
) == textwrap.dedent(
"""
from cookiecutterizer import __version__
def test_version():
assert __version__ == "0.1.0"
"""
)
| 24.09434
| 79
| 0.523884
|
8f3d509c21b86f44334315efaaeff8a048c71398
| 3,508
|
py
|
Python
|
Projects/3_Adversarial Search/agents/40_ab_negascout.py
|
rpaech/udacity-aind
|
58bafa6758465c03cc0723f27e781e9d8336c871
|
[
"MIT"
] | null | null | null |
Projects/3_Adversarial Search/agents/40_ab_negascout.py
|
rpaech/udacity-aind
|
58bafa6758465c03cc0723f27e781e9d8336c871
|
[
"MIT"
] | null | null | null |
Projects/3_Adversarial Search/agents/40_ab_negascout.py
|
rpaech/udacity-aind
|
58bafa6758465c03cc0723f27e781e9d8336c871
|
[
"MIT"
] | null | null | null |
import random
from sample_players import DataPlayer
from isolation.isolation import Action, Isolation
from typing import Optional, Tuple, Dict
def liberty_difference(state: Isolation) -> int:
def liberty_count(player_id: int) -> int:
return len(state.liberties(state.locs[player_id]))
this_player = state.player()
next_player = 1 - this_player
return liberty_count(this_player) - liberty_count(next_player)
def log_stat(state: Isolation, search_depth: int, max_search_depth: int,
best_score: int, action: Action) -> Dict:
return {'round': state.ply_count,
'depth': search_depth,
'max depth': max_search_depth,
'score': best_score,
'action': action}
class CustomPlayer(DataPlayer):
def get_action(self, state: Isolation) -> None:
if state.ply_count < 2:
self.queue.put(random.choice(state.actions()))
else:
max_search_depth = len(state.liberties(None)) // 2
for search_depth in range(max_search_depth):
alpha = float("-inf")
beta = float("inf")
best_score = float("-inf")
best_action = None
for action in state.actions():
score = self.scout(state.result(action), search_depth,
-beta, -max(alpha, best_score))
if best_score < score or best_action is None:
best_score = score
best_action = action
self.queue.put(best_action)
self.context = log_stat(state, search_depth, max_search_depth,
best_score, best_action)
def scout(self, state: Isolation, search_depth: int,
alpha: float, beta: float) -> float:
if state.terminal_test():
result = state.utility(state.player())
elif search_depth == 0:
result = liberty_difference(state)
else:
best_score = float('-inf')
adaptive_beta = beta
for action in state.actions():
new_state = state.result(action)
score = self.search(new_state, search_depth - 1,
-adaptive_beta, -max(alpha, best_score))
if best_score < score:
if adaptive_beta == beta or search_depth <= 2:
best_score = score
else:
best_score = self.scout(new_state, search_depth,
-beta, -score)
if best_score >= beta:
break
adaptive_beta = max(alpha, best_score) + 1
result = best_score
return -result
def search(self, state: Isolation, search_depth: int,
alpha: float, beta: float) -> float:
if state.terminal_test():
result = state.utility(state.player())
elif search_depth == 0:
result = liberty_difference(state)
else:
score = float('-inf')
for action in state.actions():
score = max(score,
self.search(state.result(action), search_depth - 1,
-beta, -max(alpha, score)))
if score >= beta:
break
result = score
return -result
| 31.321429
| 79
| 0.525371
|
2e4cf36b60b1f37901596e7e0b9a2842671afd7a
| 379
|
py
|
Python
|
Array/Medium_JumpGame_55_WYH.py
|
LinkWoong/LC-Solutions
|
98b2ce55f05f6acb672f20519f79ca9f4248961d
|
[
"MIT"
] | 4
|
2019-05-15T10:40:34.000Z
|
2020-07-27T03:05:39.000Z
|
Array/Medium_JumpGame_55_WYH.py
|
LinkWoong/LC-Solutions
|
98b2ce55f05f6acb672f20519f79ca9f4248961d
|
[
"MIT"
] | 2
|
2019-08-20T15:34:33.000Z
|
2019-09-20T19:41:27.000Z
|
Array/Medium_JumpGame_55_WYH.py
|
LinkWoong/LC-Solutions
|
98b2ce55f05f6acb672f20519f79ca9f4248961d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# In[20]:
#็ฎๆณๆ่ทฏ๏ผ
#ไปๅทฆๅๅณ้ๅๆดไธชarray
#ๆฃๆฅๅฝๅๅฏไปฅๆๅคง่พพๅฐ็ๆฐmax_reach,่ฝๅฆ่ทไธcurrent่ๆญฅๆฐi
#ๅฆๆ่ทไธไธๅ่ฟๅFalse,่ทๅพไธๅนถไธ่ถ
่ถไบ็ป็นๅ่ฟๅTrue
class Solution:
def canJump(self, nums):
max_reach, n = 0, len(nums)
for i, x in enumerate(nums):
if max_reach < i: return False
if max_reach >= n - 1: return True
max_reach = max(max_reach, i + x)
| 19.947368
| 46
| 0.620053
|
76f662228dcbe2be3bbbf288f5401e2235157f9a
| 689
|
py
|
Python
|
modules/ckanext-ytp_request/ckanext/ytp_request/logic/auth/create.py
|
eetumans/opendata
|
061f58550bcb820016a764cca4763ed0a5f627fe
|
[
"MIT"
] | 16
|
2018-07-12T14:26:02.000Z
|
2022-02-24T12:10:00.000Z
|
modules/ckanext-ytp_request/ckanext/ytp_request/logic/auth/create.py
|
eetumans/opendata
|
061f58550bcb820016a764cca4763ed0a5f627fe
|
[
"MIT"
] | 751
|
2017-09-28T07:47:50.000Z
|
2022-03-31T12:08:25.000Z
|
modules/ckanext-ytp_request/ckanext/ytp_request/logic/auth/create.py
|
vrk-kpa/opendata-ckan
|
8936e2d9e700b9e5534fe2a51eedc2d1ede8c10b
|
[
"MIT"
] | 6
|
2017-10-31T07:47:07.000Z
|
2021-10-06T07:09:07.000Z
|
from ckanext.ytp_request.helper import get_user_member
from ckan import authz
import logging
from ckan.common import _
log = logging.getLogger(__name__)
def member_request_create(context, data_dict):
""" Only allow to logged in users """
if not authz.auth_is_loggedin_user():
return {'success': False, 'msg': _('User is not logged in')}
organization_id = None if not data_dict else data_dict.get(
'organization_id', None)
if organization_id:
member = get_user_member(organization_id)
if member:
return {'success': False, 'msg': _('The user has already a pending request or an active membership')}
return {'success': True}
| 34.45
| 113
| 0.701016
|
5fb054455e669692a7c42e892a7c29942b31ede3
| 7,036
|
py
|
Python
|
Lib/site-packages/qutebrowser/commands/runners.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | null | null | null |
Lib/site-packages/qutebrowser/commands/runners.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
Lib/site-packages/qutebrowser/commands/runners.py
|
fochoao/cpython
|
3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9
|
[
"bzip2-1.0.6",
"0BSD"
] | null | null | null |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Module containing command managers (SearchRunner and CommandRunner)."""
import traceback
import re
import contextlib
from typing import TYPE_CHECKING, Callable, Dict, Iterator, Mapping, MutableMapping
from PyQt5.QtCore import pyqtSlot, QUrl, QObject
from qutebrowser.api import cmdutils
from qutebrowser.commands import cmdexc, parser
from qutebrowser.utils import message, objreg, qtutils, usertypes, utils
from qutebrowser.keyinput import macros, modeman
if TYPE_CHECKING:
from qutebrowser.mainwindow import tabbedbrowser
_ReplacementFunction = Callable[['tabbedbrowser.TabbedBrowser'], str]
last_command = {}
def _url(tabbed_browser):
"""Convenience method to get the current url."""
try:
return tabbed_browser.current_url()
except qtutils.QtValueError as e:
msg = "Current URL is invalid"
if e.reason:
msg += " ({})".format(e.reason)
msg += "!"
raise cmdutils.CommandError(msg)
def _init_variable_replacements() -> Mapping[str, _ReplacementFunction]:
"""Return a dict from variable replacements to fns processing them."""
replacements: Dict[str, _ReplacementFunction] = {
'url': lambda tb: _url(tb).toString(
QUrl.FullyEncoded | QUrl.RemovePassword),
'url:pretty': lambda tb: _url(tb).toString(
QUrl.DecodeReserved | QUrl.RemovePassword),
'url:domain': lambda tb: "{}://{}{}".format(
_url(tb).scheme(), _url(tb).host(),
":" + str(_url(tb).port()) if _url(tb).port() != -1 else ""),
'url:auth': lambda tb: "{}:{}@".format(
_url(tb).userName(),
_url(tb).password()) if _url(tb).userName() else "",
'url:scheme': lambda tb: _url(tb).scheme(),
'url:username': lambda tb: _url(tb).userName(),
'url:password': lambda tb: _url(tb).password(),
'url:host': lambda tb: _url(tb).host(),
'url:port': lambda tb: str(
_url(tb).port()) if _url(tb).port() != -1 else "",
'url:path': lambda tb: _url(tb).path(),
'url:query': lambda tb: _url(tb).query(),
'title': lambda tb: tb.widget.page_title(tb.widget.currentIndex()),
'clipboard': lambda _: utils.get_clipboard(),
'primary': lambda _: utils.get_clipboard(selection=True),
}
for key in list(replacements):
modified_key = '{' + key + '}'
# x = modified_key is to avoid binding x as a closure
replacements[modified_key] = (
lambda _, x=modified_key: x) # type: ignore[misc]
return replacements
VARIABLE_REPLACEMENTS = _init_variable_replacements()
# A regex matching all variable replacements
VARIABLE_REPLACEMENT_PATTERN = re.compile(
"{(?P<var>" + "|".join(VARIABLE_REPLACEMENTS.keys()) + ")}")
def replace_variables(win_id, arglist):
"""Utility function to replace variables like {url} in a list of args."""
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
values: MutableMapping[str, str] = {}
args = []
def repl_cb(matchobj):
"""Return replacement for given match."""
var = matchobj.group("var")
if var not in values:
values[var] = VARIABLE_REPLACEMENTS[var](tabbed_browser)
return values[var]
try:
for arg in arglist:
# using re.sub with callback function replaces all variables in a
# single pass and avoids expansion of nested variables (e.g.
# "{url}" from clipboard is not expanded)
args.append(VARIABLE_REPLACEMENT_PATTERN.sub(repl_cb, arg))
except utils.ClipboardError as e:
raise cmdutils.CommandError(e)
return args
class AbstractCommandRunner(QObject):
"""Abstract base class for CommandRunner."""
def run(self, text, count=None, *, safely=False):
raise NotImplementedError
@pyqtSlot(str, int)
@pyqtSlot(str)
def run_safely(self, text, count=None):
"""Run a command and display exceptions in the statusbar."""
self.run(text, count, safely=True)
class CommandRunner(AbstractCommandRunner):
"""Parse and run qutebrowser commandline commands.
Attributes:
_win_id: The window this CommandRunner is associated with.
"""
def __init__(self, win_id, partial_match=False, parent=None):
super().__init__(parent)
self._parser = parser.CommandParser(partial_match=partial_match)
self._win_id = win_id
@contextlib.contextmanager
def _handle_error(self, safely: bool) -> Iterator[None]:
"""Show exceptions as errors if safely=True is given."""
try:
yield
except cmdexc.Error as e:
if safely:
message.error(str(e), stack=traceback.format_exc())
else:
raise
def run(self, text, count=None, *, safely=False):
"""Parse a command from a line of text and run it.
Args:
text: The text to parse.
count: The count to pass to the command.
safely: Show CmdError exceptions as messages.
"""
record_last_command = True
record_macro = True
mode_manager = modeman.instance(self._win_id)
cur_mode = mode_manager.mode
parsed = None
with self._handle_error(safely):
parsed = self._parser.parse_all(text)
if parsed is None:
return # type: ignore[unreachable]
for result in parsed:
with self._handle_error(safely):
if result.cmd.no_replace_variables:
args = result.args
else:
args = replace_variables(self._win_id, result.args)
result.cmd.run(self._win_id, args, count=count)
if result.cmdline[0] == 'repeat-command':
record_last_command = False
if result.cmdline[0] in ['macro-record', 'macro-run', 'set-cmd-text']:
record_macro = False
if record_last_command:
last_command[cur_mode] = (text, count)
if record_macro and cur_mode == usertypes.KeyMode.normal:
macros.macro_recorder.record_command(text, count)
| 35.535354
| 83
| 0.639568
|
84a0fbe31d9f60b314683e4f6aa6df933e0b1631
| 1,618
|
py
|
Python
|
rltools/domains/randomwalk.py
|
omtinez/rltools
|
ce60b906fbe109e96f7866ae98144c9c9988650b
|
[
"MIT"
] | null | null | null |
rltools/domains/randomwalk.py
|
omtinez/rltools
|
ce60b906fbe109e96f7866ae98144c9c9988650b
|
[
"MIT"
] | null | null | null |
rltools/domains/randomwalk.py
|
omtinez/rltools
|
ce60b906fbe109e96f7866ae98144c9c9988650b
|
[
"MIT"
] | null | null | null |
import random
class RandomWalk(object):
ACTIONS = [0]
def __init__(self):
self.num_states = 7
self.current_state = 3
def take_action(self, action):
if action > 0:
raise ValueError('Only action "0" supported in this domain')
if self.current_state == 0 or self.current_state == self.num_states:
raise RuntimeError('Terminal state reached')
roll = random.random()
if roll > 0.5:
self.current_state += 1
else:
self.current_state -= 1
reward = 1 if self.current_state == self.num_states - 1 else 0
return action, reward, self.current_state
def play(strategy, iterations=100, converge=False):
strategy.valid_actions = RandomWalk.ACTIONS
mydomain = RandomWalk()
strategy.fit((0, 0, 0))
count = 0
while count < iterations:
action = strategy.policy(mydomain.current_state)
try:
a, r, s = mydomain.take_action(action)
strategy.fit((s, a, r))
except RuntimeError:
count += 1
mydomain.current_state = 3
strategy.init_episode()
strategy.fit((0, 0, 0))
if converge:
strategy.converge(max_time=60)
true_prob = [1./6, 1./3, 1./2, 2./3, 5./6]
print('Estimated probabilities:', ['%.5f' % strategy.learner.val(i, 0) for i in range(1,6)])
print('Expected probabilities:', ['%.5f' % p for p in true_prob])
rmse = sum([(strategy.learner.val(i, 0) - true_prob[i-1]) ** 2 for i in range(1,6)]) / len(true_prob)
print('RMSE:', rmse)
return rmse
| 29.418182
| 105
| 0.590235
|
b45b37e4f82d040620a41ac216478942b28d2be9
| 3,377
|
py
|
Python
|
kornia/losses/psnr.py
|
ChristophReich1996/kornia
|
35f955b46e8015da1cb9faa28c6943ec2b09cc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kornia/losses/psnr.py
|
ChristophReich1996/kornia
|
35f955b46e8015da1cb9faa28c6943ec2b09cc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kornia/losses/psnr.py
|
ChristophReich1996/kornia
|
35f955b46e8015da1cb9faa28c6943ec2b09cc2a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import torch
import torch.nn as nn
from torch.nn.functional import mse_loss as mse
def psnr(input: torch.Tensor, target: torch.Tensor, max_val: float) -> torch.Tensor:
r"""Creates a function that calculates the PSNR between 2 images.
PSNR is Peek Signal to Noise Ratio, which is similar to mean squared error.
Given an m x n image, the PSNR is:
.. math::
\text{PSNR} = 10 \log_{10} \bigg(\frac{\text{MAX}_I^2}{MSE(I,T)}\bigg)
where
.. math::
\text{MSE}(I,T) = \frac{1}{mn}\sum_{i=0}^{m-1}\sum_{j=0}^{n-1} [I(i,j) - T(i,j)]^2
and :math:`\text{MAX}_I` is the maximum possible input value
(e.g for floating point images :math:`\text{MAX}_I=1`).
Args:
input (torch.Tensor): the input image with arbitrary shape :math:`(*)`.
labels (torch.Tensor): the labels image with arbitrary shape :math:`(*)`.
max_val (float): The maximum value in the input tensor.
Return:
torch.Tensor: the computed loss as a scalar.
Examples:
>>> ones = torch.ones(1)
>>> psnr(ones, 1.2 * ones, 2.) # 10 * log(4/((1.2-1)**2)) / log(10)
tensor(20.0000)
Reference:
https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio#Definition
"""
if not isinstance(input, torch.Tensor):
raise TypeError(f"Expected torch.Tensor but got {type(target)}.")
if not isinstance(target, torch.Tensor):
raise TypeError(f"Expected torch.Tensor but got {type(input)}.")
if input.shape != target.shape:
raise TypeError(f"Expected tensors of equal shapes, but got {input.shape} and {target.shape}")
return 10. * torch.log10(max_val**2 / mse(input, target, reduction='mean'))
def psnr_loss(input: torch.Tensor, target: torch.Tensor, max_val: float) -> torch.Tensor:
r"""Function that computes the PSNR loss.
The loss is computed as follows:
.. math::
\text{loss} = -\text{psnr(x, y)}
See :meth:`~kornia.losses.psnr` for details abut PSNR.
Args:
input (torch.Tensor): the input image with shape :math:`(*)`.
labels (torch.Tensor): the labels image with shape :math:`(*)`.
max_val (float): The maximum value in the input tensor.
Return:
torch.Tensor: the computed loss as a scalar.
Examples:
>>> ones = torch.ones(1)
>>> psnr_loss(ones, 1.2 * ones, 2.) # 10 * log(4/((1.2-1)**2)) / log(10)
tensor(-20.0000)
"""
return -1. * psnr(input, target, max_val)
class PSNRLoss(nn.Module):
r"""Creates a criterion that calculates the PSNR loss.
The loss is computed as follows:
.. math::
\text{loss} = -\text{psnr(x, y)}
See :meth:`~kornia.losses.psnr` for details abut PSNR.
Shape:
- Input: arbitrary dimensional tensor :math:`(*)`.
- Target: arbitrary dimensional tensor :math:`(*)` same shape as input.
- Output: a scalar.
Examples:
>>> ones = torch.ones(1)
>>> criterion = PSNRLoss(2.)
>>> criterion(ones, 1.2 * ones) # 10 * log(4/((1.2-1)**2)) / log(10)
tensor(-20.0000)
"""
def __init__(self, max_val: float) -> None:
super(PSNRLoss, self).__init__()
self.max_val: float = max_val
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return psnr_loss(input, target, self.max_val)
| 30.7
| 102
| 0.609417
|
cdc725c48f36c163104863e98d9da3e82b5cfd83
| 41
|
py
|
Python
|
flowbee/activities/__init__.py
|
blitzagency/flowbee
|
35e8977827eda34474aa5edc95bbfec86c61b33a
|
[
"MIT"
] | null | null | null |
flowbee/activities/__init__.py
|
blitzagency/flowbee
|
35e8977827eda34474aa5edc95bbfec86c61b33a
|
[
"MIT"
] | null | null | null |
flowbee/activities/__init__.py
|
blitzagency/flowbee
|
35e8977827eda34474aa5edc95bbfec86c61b33a
|
[
"MIT"
] | null | null | null |
from .base import (Activities, Workflow)
| 20.5
| 40
| 0.780488
|
d8af71eb07aaaafa426f2c57304d5c7a4f4be85f
| 7,414
|
py
|
Python
|
tests/unit/modules/test_rpm.py
|
johnskopis/salt
|
86adb6b0fe40230b8be4c74229e897a7a08f81a6
|
[
"Apache-2.0"
] | 1
|
2016-08-21T21:19:12.000Z
|
2016-08-21T21:19:12.000Z
|
tests/unit/modules/test_rpm.py
|
johnskopis/salt
|
86adb6b0fe40230b8be4c74229e897a7a08f81a6
|
[
"Apache-2.0"
] | 2
|
2019-03-06T20:43:44.000Z
|
2019-04-10T23:56:02.000Z
|
tests/unit/modules/test_rpm.py
|
johnskopis/salt
|
86adb6b0fe40230b8be4c74229e897a7a08f81a6
|
[
"Apache-2.0"
] | 1
|
2020-04-10T20:18:40.000Z
|
2020-04-10T20:18:40.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.rpm as rpm
def _called_with_root(mock):
cmd = ' '.join(mock.call_args[0][0])
return cmd.startswith('rpm --root /')
@skipIf(NO_MOCK, NO_MOCK_REASON)
class RpmTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.rpm
'''
def setup_loader_modules(self):
return {rpm: {'rpm': MagicMock(return_value=MagicMock)}}
# 'list_pkgs' function tests: 2
def test_list_pkgs(self):
'''
Test if it list the packages currently installed in a dict
'''
mock = MagicMock(return_value='')
with patch.dict(rpm.__salt__, {'cmd.run': mock}):
self.assertDictEqual(rpm.list_pkgs(), {})
self.assertFalse(_called_with_root(mock))
def test_list_pkgs_root(self):
'''
Test if it list the packages currently installed in a dict,
called with root parameter
'''
mock = MagicMock(return_value='')
with patch.dict(rpm.__salt__, {'cmd.run': mock}):
rpm.list_pkgs(root='/')
self.assertTrue(_called_with_root(mock))
# 'verify' function tests: 2
def test_verify(self):
'''
Test if it runs an rpm -Va on a system, and returns the
results in a dict
'''
mock = MagicMock(return_value={'stdout': '',
'stderr': '',
'retcode': 0,
'pid': 12345})
with patch.dict(rpm.__salt__, {'cmd.run_all': mock}):
self.assertDictEqual(rpm.verify('httpd'), {})
self.assertFalse(_called_with_root(mock))
def test_verify_root(self):
'''
Test if it runs an rpm -Va on a system, and returns the
results in a dict, called with root parameter
'''
mock = MagicMock(return_value={'stdout': '',
'stderr': '',
'retcode': 0,
'pid': 12345})
with patch.dict(rpm.__salt__, {'cmd.run_all': mock}):
rpm.verify('httpd', root='/')
self.assertTrue(_called_with_root(mock))
# 'file_list' function tests: 2
def test_file_list(self):
'''
Test if it list the files that belong to a package.
'''
mock = MagicMock(return_value='')
with patch.dict(rpm.__salt__, {'cmd.run': mock}):
self.assertDictEqual(rpm.file_list('httpd'),
{'errors': [], 'files': []})
self.assertFalse(_called_with_root(mock))
def test_file_list_root(self):
'''
Test if it list the files that belong to a package, using the
root parameter.
'''
mock = MagicMock(return_value='')
with patch.dict(rpm.__salt__, {'cmd.run': mock}):
rpm.file_list('httpd', root='/')
self.assertTrue(_called_with_root(mock))
# 'file_dict' function tests: 2
def test_file_dict(self):
'''
Test if it list the files that belong to a package
'''
mock = MagicMock(return_value='')
with patch.dict(rpm.__salt__, {'cmd.run': mock}):
self.assertDictEqual(rpm.file_dict('httpd'),
{'errors': [], 'packages': {}})
self.assertFalse(_called_with_root(mock))
def test_file_dict_root(self):
'''
Test if it list the files that belong to a package
'''
mock = MagicMock(return_value='')
with patch.dict(rpm.__salt__, {'cmd.run': mock}):
rpm.file_dict('httpd', root='/')
self.assertTrue(_called_with_root(mock))
# 'owner' function tests: 1
def test_owner(self):
'''
Test if it return the name of the package that owns the file.
'''
self.assertEqual(rpm.owner(), '')
ret = 'file /usr/bin/salt-jenkins-build is not owned by any package'
mock = MagicMock(return_value=ret)
with patch.dict(rpm.__salt__, {'cmd.run_stdout': mock}):
self.assertEqual(rpm.owner('/usr/bin/salt-jenkins-build'), '')
self.assertFalse(_called_with_root(mock))
ret = {'/usr/bin/vim': 'vim-enhanced-7.4.160-1.e17.x86_64',
'/usr/bin/python': 'python-2.7.5-16.e17.x86_64'}
mock = MagicMock(side_effect=['python-2.7.5-16.e17.x86_64',
'vim-enhanced-7.4.160-1.e17.x86_64'])
with patch.dict(rpm.__salt__, {'cmd.run_stdout': mock}):
self.assertDictEqual(rpm.owner('/usr/bin/python', '/usr/bin/vim'),
ret)
self.assertFalse(_called_with_root(mock))
def test_owner_root(self):
'''
Test if it return the name of the package that owns the file,
using the parameter root.
'''
self.assertEqual(rpm.owner(), '')
ret = 'file /usr/bin/salt-jenkins-build is not owned by any package'
mock = MagicMock(return_value=ret)
with patch.dict(rpm.__salt__, {'cmd.run_stdout': mock}):
rpm.owner('/usr/bin/salt-jenkins-build', root='/')
self.assertTrue(_called_with_root(mock))
# 'checksum' function tests: 2
def test_checksum(self):
'''
Test if checksum validate as expected
'''
ret = {
"file1.rpm": True,
"file2.rpm": False,
"file3.rpm": False,
}
mock = MagicMock(side_effect=[True, 0, True, 1, False, 0])
with patch.dict(rpm.__salt__, {'file.file_exists': mock, 'cmd.retcode': mock}):
self.assertDictEqual(rpm.checksum("file1.rpm", "file2.rpm", "file3.rpm"), ret)
self.assertFalse(_called_with_root(mock))
def test_checksum_root(self):
'''
Test if checksum validate as expected, using the parameter
root
'''
mock = MagicMock(side_effect=[True, 0])
with patch.dict(rpm.__salt__, {'file.file_exists': mock, 'cmd.retcode': mock}):
rpm.checksum("file1.rpm", root='/')
self.assertTrue(_called_with_root(mock))
def test_version_cmp_rpm(self):
'''
Test package version is called RPM version if RPM-Python is installed
:return:
'''
with patch('salt.modules.rpm.rpm.labelCompare', MagicMock(return_value=0)), \
patch('salt.modules.rpm.HAS_RPM', True):
self.assertEqual(0, rpm.version_cmp('1', '2')) # mock returns 0, which means RPM was called
def test_version_cmp_fallback(self):
'''
Test package version is called RPM version if RPM-Python is installed
:return:
'''
with patch('salt.modules.rpm.rpm.labelCompare', MagicMock(return_value=0)), \
patch('salt.modules.rpm.HAS_RPM', False):
self.assertEqual(-1, rpm.version_cmp('1', '2')) # mock returns -1, a python implementation was called
| 35.137441
| 114
| 0.570003
|
3bebd6fc5065620173f75c951d462600ad72abfd
| 9,793
|
py
|
Python
|
addition_module/face_lightning/KDF/train.py
|
xueyedamo521/FaceX-Zoo
|
aa3ecc8eb5471457d566889836cec5f138b92752
|
[
"Apache-2.0"
] | 1
|
2021-03-31T04:43:49.000Z
|
2021-03-31T04:43:49.000Z
|
addition_module/face_lightning/KDF/train.py
|
xueyedamo521/FaceX-Zoo
|
aa3ecc8eb5471457d566889836cec5f138b92752
|
[
"Apache-2.0"
] | null | null | null |
addition_module/face_lightning/KDF/train.py
|
xueyedamo521/FaceX-Zoo
|
aa3ecc8eb5471457d566889836cec5f138b92752
|
[
"Apache-2.0"
] | null | null | null |
"""
@author: Jun Wang
@date: 20201019
@contact: jun21wangustc@gmail.com
"""
import os
import sys
import shutil
import argparse
import logging as logger
import torch
from torch import optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from backbone.backbone_def import BackboneFactory
from loss.loss_def import KDLossFactory
sys.path.append('../../../')
from utils.AverageMeter import AverageMeter
from data_processor.train_dataset import ImageDataset
from head.head_def import HeadFactory
logger.basicConfig(level=logger.INFO,
format='%(levelname)s %(asctime)s %(filename)s: %(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
class FaceModel(torch.nn.Module):
"""Define a traditional face model which contains a backbone and a head.
Attributes:
backbone(object): the backbone of face model.
head(object): the head of face model.
"""
def __init__(self, backbone_factory, backbone_type, head_factory):
"""Init face model by backbone factorcy and head factory.
Args:
backbone_factory(object): produce a backbone according to config files.
head_factory(object): produce a head according to config files.
"""
super(FaceModel, self).__init__()
self.backbone = backbone_factory.get_backbone(backbone_type)
self.head = head_factory.get_head()
def forward(self, data, label):
feat = self.backbone.forward(data)
pred = self.head.forward(feat, label)
return feat, pred
def get_lr(optimizer):
"""Get the current learning rate from optimizer.
"""
for param_group in optimizer.param_groups:
return param_group['lr']
def train_one_epoch(data_loader, teacher_model, student_model, optimizer,
criterion, criterion_kd, cur_epoch, loss_cls_meter, loss_kd_meter, conf):
"""Tain one epoch by traditional training.
"""
for batch_idx, (images, labels) in enumerate(data_loader):
images = images.to(conf.device)
labels = labels.to(conf.device)
labels = labels.squeeze()
feats_s, preds_s = student_model.forward(images, labels)
loss_cls = criterion(preds_s, labels)
with torch.no_grad():
feats_t, preds_t = teacher_model.forward(images, labels)
if conf.loss_type == 'PKT':
loss_kd = criterion_kd(feats_s, feats_t.detach()) * args.lambda_kd
else:
loss_kd = criterion_kd(preds_s, preds_t.detach()) * args.lambda_kd
loss = loss_cls + loss_kd
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_cls_meter.update(loss_cls.item(), images.shape[0])
loss_kd_meter.update(loss_kd.item(), images.shape[0])
if batch_idx % conf.print_freq == 0:
loss_cls_avg = loss_cls_meter.avg
loss_kd_avg = loss_kd_meter.avg
lr = get_lr(optimizer)
logger.info('Epoch %d, iter %d/%d, lr %f, loss_cls %f, loss_kd %f.' %
(cur_epoch, batch_idx, len(data_loader), lr, loss_cls_avg, loss_kd_avg))
global_batch_idx = cur_epoch * len(data_loader) + batch_idx
conf.writer.add_scalar('Cls_loss', loss_cls_avg, global_batch_idx)
conf.writer.add_scalar('KD_loss', loss_kd_avg, global_batch_idx)
conf.writer.add_scalar('Train_lr', lr, global_batch_idx)
loss_cls_meter.reset()
loss_kd_meter.reset()
if (batch_idx + 1) % conf.save_freq == 0:
saved_name = 'Epoch_%d_batch_%d.pt' % (cur_epoch, batch_idx)
state = {
'state_dict': student_model.module.state_dict(),
'epoch': cur_epoch,
'batch_id': batch_idx
}
torch.save(state, os.path.join(conf.out_dir, saved_name))
logger.info('Save checkpoint %s to disk.' % saved_name)
saved_name = 'Epoch_%d.pt' % cur_epoch
state = {'state_dict': student_model.module.state_dict(),
'epoch': cur_epoch, 'batch_id': batch_idx}
torch.save(state, os.path.join(conf.out_dir, saved_name))
logger.info('Save checkpoint %s to disk...' % saved_name)
def train(conf):
"""Total training procedure.
"""
data_loader = DataLoader(ImageDataset(conf.data_root, conf.train_file),
conf.batch_size, True, num_workers = 4)
conf.device = torch.device('cuda:0')
criterion = torch.nn.CrossEntropyLoss().cuda(conf.device)
backbone_factory = BackboneFactory(conf.backbone_conf_file)
head_factory = HeadFactory(conf.head_type, conf.head_conf_file)
kd_loss_factory = KDLossFactory(conf.loss_type, conf.loss_conf_file)
criterion_kd = kd_loss_factory.get_kd_loss().cuda(conf.device)
backbone_factory = BackboneFactory(conf.backbone_conf_file)
teacher_model = FaceModel(backbone_factory, args.teacher_backbone_type, head_factory)
state_dict = torch.load(args.pretrained_teacher)['state_dict']
teacher_model.load_state_dict(state_dict)
teacher_model = torch.nn.DataParallel(teacher_model).cuda()
student_model = FaceModel(backbone_factory, args.student_backbone_type, head_factory)
ori_epoch = 0
if conf.resume:
ori_epoch = torch.load(args.pretrain_model)['epoch'] + 1
state_dict = torch.load(args.pretrain_model)['state_dict']
student_model.load_state_dict(state_dict)
student_model = torch.nn.DataParallel(student_model).cuda()
parameters = [p for p in student_model.parameters() if p.requires_grad]
optimizer = optim.SGD(parameters, lr = conf.lr,
momentum = conf.momentum, weight_decay = 1e-4)
lr_schedule = optim.lr_scheduler.MultiStepLR(
optimizer, milestones = conf.milestones, gamma = 0.1)
loss_cls_meter = AverageMeter()
loss_kd_meter = AverageMeter()
student_model.train()
for epoch in range(ori_epoch, conf.epoches):
train_one_epoch(data_loader, teacher_model, student_model, optimizer,
criterion, criterion_kd, epoch, loss_cls_meter, loss_kd_meter, conf)
lr_schedule.step()
if __name__ == '__main__':
conf = argparse.ArgumentParser(description='traditional_training for face recognition.')
conf.add_argument("--data_root", type = str,
help = "The root folder of training set.")
conf.add_argument("--train_file", type = str,
help = "The training file path.")
conf.add_argument("--student_backbone_type", type = str,
help = "Mobilefacenets, Resnet.")
conf.add_argument("--teacher_backbone_type", type = str,
help = "Mobilefacenets, Resnet.")
conf.add_argument("--backbone_conf_file", type = str,
help = "the path of backbone_conf.yaml.")
conf.add_argument("--head_type", type = str,
help = "mv-softmax, arcface, npc-face.")
conf.add_argument("--head_conf_file", type = str,
help = "the path of head_conf.yaml.")
conf.add_argument("--loss_type", type = str,
help = "Logits, PKT...")
conf.add_argument("--loss_conf_file", type = str,
help = "the path of loss_conf.yaml.")
conf.add_argument('--lr', type = float, default = 0.1,
help='The initial learning rate.')
conf.add_argument('--lambda_kd', type = float, default = 1.0,
help='The weight of kd loss.')
conf.add_argument("--out_dir", type = str,
help = "The folder to save models.")
conf.add_argument('--epoches', type = int, default = 9,
help = 'The training epoches.')
conf.add_argument('--step', type = str, default = '2,5,7',
help = 'Step for lr.')
conf.add_argument('--print_freq', type = int, default = 10,
help = 'The print frequency for training state.')
conf.add_argument('--save_freq', type = int, default = 10,
help = 'The save frequency for training state.')
conf.add_argument('--batch_size', type = int, default = 128,
help='The training batch size over all gpus.')
conf.add_argument('--momentum', type = float, default = 0.9,
help = 'The momentum for sgd.')
conf.add_argument('--log_dir', type = str, default = 'log',
help = 'The directory to save log.log')
conf.add_argument('--tensorboardx_logdir', type = str,
help = 'The directory to save tensorboardx logs')
conf.add_argument('--pretrained_teacher', type = str, default = 'mv_epoch_8.pt',
help = 'The path of pretrained teahcer model')
conf.add_argument('--pretrained_model', type = str, default = 'mv_epoch_8.pt',
help = 'The path of pretrained model')
conf.add_argument('--resume', '-r', action = 'store_true', default = False,
help = 'Whether to resume from a checkpoint.')
args = conf.parse_args()
args.milestones = [int(num) for num in args.step.split(',')]
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
tensorboardx_logdir = os.path.join(args.log_dir, args.tensorboardx_logdir)
if os.path.exists(tensorboardx_logdir):
shutil.rmtree(tensorboardx_logdir)
writer = SummaryWriter(log_dir=tensorboardx_logdir)
args.writer = writer
logger.info('Start optimization.')
logger.info(args)
train(args)
logger.info('Optimization done!')
| 47.081731
| 97
| 0.635045
|
35cd33ce20be49dde5ce55248f2ad047fc962fa8
| 13,305
|
py
|
Python
|
src/logistic/main_mimic.py
|
rathp/time_series_prediction
|
c776f988c633868c7106041ac91ab56ca9fd7968
|
[
"MIT"
] | null | null | null |
src/logistic/main_mimic.py
|
rathp/time_series_prediction
|
c776f988c633868c7106041ac91ab56ca9fd7968
|
[
"MIT"
] | null | null | null |
src/logistic/main_mimic.py
|
rathp/time_series_prediction
|
c776f988c633868c7106041ac91ab56ca9fd7968
|
[
"MIT"
] | null | null | null |
import sys, os
import argparse
import numpy as np
import pandas as pd
import json
import time
import torch
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import (roc_curve, accuracy_score, log_loss,
balanced_accuracy_score, confusion_matrix,
roc_auc_score, make_scorer)
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from yattag import Doc
import matplotlib.pyplot as plt
def main():
parser = argparse.ArgumentParser(description='sklearn LogisticRegression')
parser.add_argument('--train_vitals_csv', type=str,
help='Location of vitals data for training')
parser.add_argument('--test_vitals_csv', type=str,
help='Location of vitals data for testing')
parser.add_argument('--metadata_csv', type=str,
help='Location of metadata for testing and training')
parser.add_argument('--data_dict', type=str)
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--save', type=str, default='LRmodel.pt',
help='path to save the final model')
parser.add_argument('--report_dir', type=str, default='results',
help='dir in which to save results report')
parser.add_argument('--is_data_simulated', type=bool, default=False,
help='boolean to check if data is simulated or from mimic')
parser.add_argument('--output_filename_prefix', type=str, default='current_config', help='file to save the loss and validation over epochs')
args = parser.parse_args()
if not(args.is_data_simulated):
# extract data
train_vitals = pd.read_csv(args.train_vitals_csv)
test_vitals = pd.read_csv(args.test_vitals_csv)
metadata = pd.read_csv(args.metadata_csv)
X_train, y_train = extract_labels(train_vitals, metadata, args.data_dict)
X_test, y_test = extract_labels(test_vitals, metadata, args.data_dict)
# remove subject_id and episode_id from the train and test features
X_train = X_train.iloc[:,2:]
X_test = X_test.iloc[:,2:]
else:
simulated_data_dir = 'simulated_data/'
X_train = torch.load(simulated_data_dir + 'X_train.pt').numpy()
X_test = torch.load(simulated_data_dir + 'X_test.pt').numpy()
y_train = torch.load(simulated_data_dir + 'y_train.pt').long().numpy()
y_test = torch.load(simulated_data_dir + 'y_test.pt').long().numpy()
# build pipeline
step_list = list()
scaler_x = StandardScaler()
step_list.append(('standardize', scaler_x))
#------------------------------------------- TRAIN ----------------------------#
# initialize regressor
logistic = LogisticRegression(solver='saga', max_iter=10000,\
class_weight = 'balanced',\
random_state = 42,\
tol = 1e-2)
step_list.append(('regressor',logistic))
# hyperparameter space
penalty = ['l1','l2']
C = [1e-5, 1e-4, \
1e-3, 1e-2, 1e-1, 1e0, 1e2, 1e3, 1e4]
hyperparameters = dict(C=C, penalty=penalty)
param_grid = dict(regressor__C=hyperparameters['C'], regressor__penalty=hyperparameters['penalty'])
# define a auc scorer function
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_proba=True)
prediction_pipeline = Pipeline(step_list)
# classifier = GridSearchCV(logistic, hyperparameters, cv=5, verbose=10, scoring = roc_auc_scorer)
classifier = GridSearchCV(prediction_pipeline, param_grid, cv=5, verbose=10, scoring = roc_auc_scorer)
t1=time.time()
best_logistic = classifier.fit(X_train, y_train)
#------------------------------------------- REPORT ----------------------------#
# View best hyperparameters
best_penalty = best_logistic.best_estimator_.get_params()['regressor__penalty']
best_C = best_logistic.best_estimator_.get_params()['regressor__C']
y_pred = best_logistic.predict(X_test)
y_pred_proba = best_logistic.predict_proba(X_test)
# check performance on training data to check for overfitting
y_train_pred = best_logistic.predict(X_train)
y_train_pred_proba = best_logistic.predict_proba(X_train)
# Brief Summary
print('Best Penalty:', best_penalty)
print('Best C:', best_C)
print('Accuracy:', accuracy_score(y_test, y_pred))
print('Balanced Accuracy:', balanced_accuracy_score(y_test, y_pred))
print('Log Loss:', log_loss(y_test, y_pred_proba))
conf_matrix = confusion_matrix(y_test, y_pred)
conf_matrix_train = confusion_matrix(y_train, y_train_pred)# to check for overfitting
true_neg = conf_matrix[0][0]
true_pos = conf_matrix[1][1]
false_neg = conf_matrix[1][0]
false_pos = conf_matrix[0][1]
true_neg_train = conf_matrix_train[0][0]
true_pos_train = conf_matrix_train[1][1]
false_neg_train = conf_matrix_train[1][0]
false_pos_train = conf_matrix_train[0][1]
print('True Positive Rate:', float(true_pos) / (true_pos + false_neg))
print('True Negative Rate:', float(true_neg) / (true_neg + false_pos))
print('Positive Predictive Value:', float(true_pos) / (true_pos + false_pos))
print('Negative Predictive Value', float(true_neg) / (true_neg + false_pos))
print('True Positive Rate on training data:', float(true_pos_train) / (true_pos_train + false_neg_train))
print('True Negative Rate on training data:', float(true_neg_train) / (true_neg_train + false_pos_train))
print('Positive Predictive Value on training data:', float(true_pos_train) / (true_pos_train + false_pos_train))
print('Negative Predictive Value on training data:', float(true_neg_train) / (true_neg_train + false_pos_train))
t2 = time.time()
print('time taken to run classifier : {} seconds'.format(t2-t1))
create_html_report(args.report_dir, y_test, y_pred, y_pred_proba,
y_train, y_train_pred, y_train_pred_proba, hyperparameters, best_penalty, best_C)
def create_html_report(report_dir, y_test, y_pred, y_pred_proba,
y_train, y_train_pred, y_train_pred_proba, hyperparameters, best_penalty, best_C):
try:
os.mkdir(report_dir)
except OSError:
pass
# Set up HTML report
doc, tag, text = Doc().tagtext()
# Metadata
with tag('h2'):
text('Logistic Classifier Results')
with tag('h3'):
text('Hyperparameters searched:')
with tag('p'):
text('Penalty: ', str(hyperparameters['penalty']))
with tag('p'):
text('C: ', str(hyperparameters['C']))
# Model
with tag('h3'):
text('Parameters of best model:')
with tag('p'):
text('Penalty: ', best_penalty)
with tag('p'):
text('C: ', best_C)
# Performance
with tag('h3'):
text('Performance Metrics:')
with tag('p'):
text('Accuracy: ', accuracy_score(y_test, y_pred))
with tag('p'):
text('Accuracy on training data: ', accuracy_score(y_train, y_train_pred))
with tag('p'):
text('Balanced Accuracy: ', balanced_accuracy_score(y_test, y_pred))
with tag('p'):
text('Log Loss: ', log_loss(y_test, y_pred_proba))
conf_matrix = confusion_matrix(y_test, y_pred)
conf_matrix_norm = conf_matrix.astype('float') / conf_matrix.sum(axis=1)[:, np.newaxis]
conf_matrix_train = confusion_matrix(y_train, y_train_pred)# to check for overfitting
conf_matrix_norm_train = conf_matrix_train.astype('float') / conf_matrix_train.sum(axis=1)[:, np.newaxis]
true_neg = conf_matrix[0][0]
true_pos = conf_matrix[1][1]
false_neg = conf_matrix[1][0]
false_pos = conf_matrix[0][1]
true_neg_train = conf_matrix_train[0][0]
true_pos_train = conf_matrix_train[1][1]
false_neg_train = conf_matrix_train[1][0]
false_pos_train = conf_matrix_train[0][1]
with tag('p'):
text('True Positive Rate: ', float(true_pos) / (true_pos + false_neg))
with tag('p'):
text('True Positive Rate on training data: ', float(true_pos_train) / (true_pos_train + false_neg_train))
with tag('p'):
text('True Negative Rate: ', float(true_neg) / (true_neg + false_pos))
with tag('p'):
text('True Negative Rate on training data: ', float(true_neg_train) / (true_neg_train + false_pos_train))
with tag('p'):
text('Positive Predictive Value: ', float(true_pos) / (true_pos + false_pos))
with tag('p'):
text('Positive Predictive Value on training data: ', float(true_pos_train) / (true_pos_train + false_pos_train))
with tag('p'):
text('Negative Predictive Value: ', float(true_neg) / (true_neg + false_pos))
with tag('p'):
text('Negative Predictive Value on training data: ', float(true_neg_train) / (true_neg_train + false_pos_train))
# Confusion Matrix
columns = ['Predicted 0', 'Predicted 1']
rows = ['Actual 0', 'Actual 1']
cell_text = []
for cm_row, cm_norm_row in zip(conf_matrix, conf_matrix_norm):
row_text = []
for i, i_norm in zip(cm_row, cm_norm_row):
row_text.append('{} ({})'.format(i, i_norm))
cell_text.append(row_text)
ax = plt.subplot(111, frame_on=False)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
confusion_table = ax.table(cellText=cell_text,
rowLabels=rows,
colLabels=columns,
loc='center')
plt.savefig(report_dir + '/confusion_matrix_test.png')
plt.close()
# save confusion matrix for training data
cell_text_train = []
for cm_row, cm_norm_row in zip(conf_matrix_train, conf_matrix_norm_train):
row_text = []
for i, i_norm in zip(cm_row, cm_norm_row):
row_text.append('{} ({})'.format(i, i_norm))
cell_text_train.append(row_text)
ax = plt.subplot(111, frame_on=False)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
confusion_table = ax.table(cellText=cell_text_train,
rowLabels=rows,
colLabels=columns,
loc='center')
plt.savefig(report_dir + '/confusion_matrix_train.png')
plt.close()
with tag('p'):
text('Confusion Matrix on test data:')
doc.stag('img', src=('confusion_matrix_test.png'))
with tag('p'):
text('Confusion Matrix on training data:')
doc.stag('img', src=('confusion_matrix_train.png'))
# ROC curve/area
y_pred_proba_neg, y_pred_proba_pos = zip(*y_pred_proba)
y_train_pred_proba_neg, y_train_pred_proba_pos = zip(*y_train_pred_proba)
fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba_pos)
fpr_train, tpr_train, thresholds_train = roc_curve(y_train, y_train_pred_proba_pos)
roc_area = roc_auc_score(y_test, y_pred_proba_pos)
roc_area_train = roc_auc_score(y_train, y_train_pred_proba_pos)
print('train ROC : {}'.format(roc_area_train))
print('test ROC : {}'.format(roc_area))
plt.plot(fpr, tpr)
plt.xlabel('FPR Test')
plt.ylabel('TPR Test')
plt.title('AUC : {}'.format(roc_area))
plt.savefig(report_dir + '/roc_curve_test.png')
plt.close()
# plot training ROC
plt.plot(fpr_train, tpr_train)
plt.xlabel('FPR Train')
plt.ylabel('TPR Train')
plt.title('AUC : {}'.format(roc_area_train))
plt.savefig(report_dir + '/roc_curve_train.png')
plt.close()
with tag('p'):
text('ROC Curve for test data:')
doc.stag('img', src=('roc_curve_test.png'))
with tag('p'):
text('ROC Area for test data: ', roc_area)
with tag('p'):
text('ROC Curve for training data:')
doc.stag('img', src=('roc_curve_train.png'))
with tag('p'):
text('ROC Area for training data: ', roc_area_train)
with open(report_dir + '/report.html', 'w') as f:
f.write(doc.getvalue())
def extract_labels(vitals, metadata, data_dict):
id_cols = parse_id_cols(data_dict)
outcome = parse_outcome_col(data_dict)
print(id_cols)
df = pd.merge(vitals, metadata, on=id_cols, how='left')
y = list(df[outcome])
if len(vitals) != len(y):
raise Exception('Number of sequences did not match number of labels.')
return vitals, y
def parse_id_cols(data_dict_file):
cols = []
with open(data_dict_file, 'r') as f:
data_dict = json.load(f)
f.close()
for col in data_dict['fields']:
if 'role' in col and col['role'] == 'id':
cols.append(col['name'])
return cols
def parse_outcome_col(data_dict_file):
cols = []
with open(data_dict_file, 'r') as f:
data_dict = json.load(f)
f.close()
for col in data_dict['fields']:
if 'role' in col and col['role'] == 'outcome':
return col['name']
return ''
if __name__ == '__main__':
main()
| 39.247788
| 148
| 0.637956
|
6fd3b348d55ff4399b86550119d50aa866c0ae97
| 678
|
py
|
Python
|
office365api/mail/drafts.py
|
swimlane/python-office365
|
42c0c0cad0d92e4cd7f18fcf3e75153045a9ea0f
|
[
"MIT"
] | 21
|
2016-10-27T10:39:25.000Z
|
2021-06-15T01:03:06.000Z
|
office365api/mail/drafts.py
|
swimlane/python-office365
|
42c0c0cad0d92e4cd7f18fcf3e75153045a9ea0f
|
[
"MIT"
] | 6
|
2017-03-08T06:39:59.000Z
|
2021-07-12T01:35:05.000Z
|
office365api/mail/drafts.py
|
swimlane/python-office365
|
42c0c0cad0d92e4cd7f18fcf3e75153045a9ea0f
|
[
"MIT"
] | 15
|
2016-12-11T22:33:56.000Z
|
2021-09-13T17:44:11.000Z
|
from office365api.mail.folders import Folder
class Drafts(Folder):
@property
def folder_name(self):
return 'Drafts'
def reply(self, message, comment=None, to_all=False):
"""
Sends reply to sender and other recipients.
:param message: Message to reply to, only Id is important.
:param comment: Optional comment.
:param to_all: If true reply to other recipients as well.
:return: None
"""
url = (self.REPLY_ALL_URL if to_all else self.REPLY_URL).format(id=message.Id)
self.connection.post(url=url, data={'Comment': comment or ''})
def create_message(self, message):
pass
| 29.478261
| 86
| 0.643068
|
edf54ff64a79958d136e730469924c4025d22a08
| 796
|
py
|
Python
|
Problems/Depth-First Search/easy/MaxDepthNaryTree/max_depth_nary_tree.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | 1
|
2021-08-16T14:52:05.000Z
|
2021-08-16T14:52:05.000Z
|
Problems/Depth-First Search/easy/MaxDepthNaryTree/max_depth_nary_tree.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
Problems/Depth-First Search/easy/MaxDepthNaryTree/max_depth_nary_tree.py
|
dolong2110/Algorithm-By-Problems-Python
|
31ecc7367aaabdd2b0ac0af7f63ca5796d70c730
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
# Recursive
def maxDepth(root: 'Node') -> int:
if not root:
return 0
def search_max(cur_root: 'Node', depth: int):
if not cur_root.children:
return depth
return max([search_max(child, depth + 1) for child in cur_root.children])
return search_max(root, 1)
# Iterative
# def maxDepth(root: 'Node') -> int:
# if not root:
# return 0
#
# max_depth = 1
# stack = [(root, max_depth)]
# while stack:
# cur_node, cur_depth = stack.pop()
# max_depth = max(max_depth, cur_depth)
# for child in cur_node.children:
# stack.append((child, cur_depth + 1))
#
# return max_depth
| 24.875
| 81
| 0.589196
|
d49cbe8d7289e8f4355f59f8f794d5dcaa2b303b
| 2,366
|
bzl
|
Python
|
bazel/generate_cc.bzl
|
LuminateWireless/grpc
|
b21e8bfde50f6c895165b984f1d5dff58a4cc7b4
|
[
"BSD-3-Clause"
] | 1
|
2021-04-27T20:09:23.000Z
|
2021-04-27T20:09:23.000Z
|
bazel/generate_cc.bzl
|
LuminateWireless/grpc
|
b21e8bfde50f6c895165b984f1d5dff58a4cc7b4
|
[
"BSD-3-Clause"
] | null | null | null |
bazel/generate_cc.bzl
|
LuminateWireless/grpc
|
b21e8bfde50f6c895165b984f1d5dff58a4cc7b4
|
[
"BSD-3-Clause"
] | 1
|
2019-06-03T16:02:06.000Z
|
2019-06-03T16:02:06.000Z
|
"""Generates C++ grpc stubs from proto_library rules.
This is an internal rule used by cc_grpc_library, and shouldn't be used
directly.
"""
def generate_cc_impl(ctx):
"""Implementation of the generate_cc rule."""
protos = [f for src in ctx.attr.srcs for f in src.proto.direct_sources]
includes = [f for src in ctx.attr.srcs for f in src.proto.transitive_imports]
outs = []
if ctx.executable.plugin:
outs += [proto.basename[:-len(".proto")] + ".grpc.pb.h" for proto in protos]
outs += [proto.basename[:-len(".proto")] + ".grpc.pb.cc" for proto in protos]
else:
outs += [proto.basename[:-len(".proto")] + ".pb.h" for proto in protos]
outs += [proto.basename[:-len(".proto")] + ".pb.cc" for proto in protos]
out_files = [ctx.new_file(out) for out in outs]
# The following should be replaced with ctx.configuration.buildout
# whenever this is added to Skylark.
dir_out = out_files[0].dirname[:-len(protos[0].dirname)]
arguments = []
if ctx.executable.plugin:
arguments += ["--plugin=protoc-gen-PLUGIN=" + ctx.executable.plugin.path]
arguments += ["--PLUGIN_out=" + ",".join(ctx.attr.flags) + ":" + dir_out]
additional_input = [ctx.executable.plugin]
else:
arguments += ["--cpp_out=" + ",".join(ctx.attr.flags) + ":" + dir_out]
additional_input = []
arguments += ["-I{0}={0}".format(include.path) for include in includes]
arguments += [proto.path for proto in protos]
ctx.action(
inputs = protos + includes + additional_input,
outputs = out_files,
executable = ctx.executable._protoc,
arguments = arguments,
)
return struct(files=set(out_files))
generate_cc = rule(
attrs = {
"srcs": attr.label_list(
mandatory = True,
non_empty = True,
providers = ["proto"],
),
"plugin": attr.label(
executable = True,
providers = ["files_to_run"],
cfg = "host",
),
"flags": attr.string_list(
mandatory = False,
allow_empty = True,
),
"_protoc": attr.label(
default = Label("//external:protocol_compiler"),
executable = True,
cfg = "host",
),
},
# We generate .h files, so we need to output to genfiles.
output_to_genfiles = True,
implementation = generate_cc_impl,
)
| 34.289855
| 81
| 0.610313
|
4918ed56c140e3f0ce56defcfbed09d85cea59bc
| 958
|
py
|
Python
|
ops/convert_model_togroupmodel.py
|
wang3702/barlowtwins
|
6d1dc9d31f8f3c87fa4148b7dada0fe9e34805d1
|
[
"MIT"
] | null | null | null |
ops/convert_model_togroupmodel.py
|
wang3702/barlowtwins
|
6d1dc9d31f8f3c87fa4148b7dada0fe9e34805d1
|
[
"MIT"
] | null | null | null |
ops/convert_model_togroupmodel.py
|
wang3702/barlowtwins
|
6d1dc9d31f8f3c87fa4148b7dada0fe9e34805d1
|
[
"MIT"
] | null | null | null |
def convert_model_to_group(world_size ,group_norm_size ,model):
total_world_size = world_size
print("total world size %d, group num %d" % (total_world_size, group_norm_size))
if total_world_size >= group_norm_size:
cur_divide_group = 1
gpu_per_group = total_world_size // group_norm_size
else:
gpu_per_group = 1
cur_divide_group = group_norm_size // total_world_size
print("groupBN %d gpu per group" % gpu_per_group)
print("per gpu divided into %d groups" % cur_divide_group)
import apex
if cur_divide_group > 1:
from ops.convert_syncbn_model import convert_groupbn_model
model = convert_groupbn_model(model, cur_divide_group)
else:
process_group = apex.parallel.create_syncbn_process_group(gpu_per_group)
print("current process group:", process_group)
model = apex.parallel.convert_syncbn_model(model, process_group=process_group)
return model
| 43.545455
| 86
| 0.727557
|
65f347dcfe331ca0d560958359f2aac4a912c99e
| 2,669
|
py
|
Python
|
scrapy/logformatter.py
|
subhipandey/scrapy
|
b5c552d17ff9e9629434712c3d0595c02853bcfc
|
[
"BSD-3-Clause"
] | 9,953
|
2019-04-03T23:41:04.000Z
|
2022-03-31T11:54:44.000Z
|
stackoverflow/venv/lib/python3.6/site-packages/scrapy/logformatter.py
|
W4LKURE/learn_python3_spider
|
98dd354a41598b31302641f9a0ea49d1ecfa0fb1
|
[
"MIT"
] | 44
|
2019-05-27T10:59:29.000Z
|
2022-03-31T14:14:29.000Z
|
stackoverflow/venv/lib/python3.6/site-packages/scrapy/logformatter.py
|
W4LKURE/learn_python3_spider
|
98dd354a41598b31302641f9a0ea49d1ecfa0fb1
|
[
"MIT"
] | 2,803
|
2019-04-06T13:15:33.000Z
|
2022-03-31T07:42:01.000Z
|
import os
import logging
from twisted.python.failure import Failure
from scrapy.utils.request import referer_str
SCRAPEDMSG = u"Scraped from %(src)s" + os.linesep + "%(item)s"
DROPPEDMSG = u"Dropped: %(exception)s" + os.linesep + "%(item)s"
CRAWLEDMSG = u"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s"
class LogFormatter(object):
"""Class for generating log messages for different actions.
All methods must return a dictionary listing the parameters ``level``,
``msg`` and ``args`` which are going to be used for constructing the log
message when calling logging.log.
Dictionary keys for the method outputs:
* ``level`` should be the log level for that action, you can use those
from the python logging library: logging.DEBUG, logging.INFO,
logging.WARNING, logging.ERROR and logging.CRITICAL.
* ``msg`` should be a string that can contain different formatting
placeholders. This string, formatted with the provided ``args``, is
going to be the log message for that action.
* ``args`` should be a tuple or dict with the formatting placeholders
for ``msg``. The final log message is computed as output['msg'] %
output['args'].
"""
def crawled(self, request, response, spider):
request_flags = ' %s' % str(request.flags) if request.flags else ''
response_flags = ' %s' % str(response.flags) if response.flags else ''
return {
'level': logging.DEBUG,
'msg': CRAWLEDMSG,
'args': {
'status': response.status,
'request': request,
'request_flags' : request_flags,
'referer': referer_str(request),
'response_flags': response_flags,
# backward compatibility with Scrapy logformatter below 1.4 version
'flags': response_flags
}
}
def scraped(self, item, response, spider):
if isinstance(response, Failure):
src = response.getErrorMessage()
else:
src = response
return {
'level': logging.DEBUG,
'msg': SCRAPEDMSG,
'args': {
'src': src,
'item': item,
}
}
def dropped(self, item, exception, response, spider):
return {
'level': logging.WARNING,
'msg': DROPPEDMSG,
'args': {
'exception': exception,
'item': item,
}
}
@classmethod
def from_crawler(cls, crawler):
return cls()
| 34.217949
| 106
| 0.58299
|
358d5ed4e40a7840fe0f2861abc52f73022aaad0
| 3,833
|
py
|
Python
|
etc/tidy.py
|
olsonjeffery/schemers
|
74a8a6cfbf7158e489fd82e0d0e678df81948b87
|
[
"BSD-3-Clause"
] | 2
|
2015-03-10T02:30:25.000Z
|
2021-09-07T11:05:47.000Z
|
etc/tidy.py
|
olsonjeffery/schemers
|
74a8a6cfbf7158e489fd82e0d0e678df81948b87
|
[
"BSD-3-Clause"
] | null | null | null |
etc/tidy.py
|
olsonjeffery/schemers
|
74a8a6cfbf7158e489fd82e0d0e678df81948b87
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2010-2014 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import sys, fileinput, subprocess, re, os
from licenseck import *
err=0
cols=100
cr_flag="ignore-tidy-cr"
tab_flag="ignore-tidy-tab"
linelength_flag="ignore-tidy-linelength"
# Be careful to support Python 2.4, 2.6, and 3.x here!
config_proc=subprocess.Popen([ "git", "config", "core.autocrlf" ],
stdout=subprocess.PIPE)
result=config_proc.communicate()[0]
true="true".encode('utf8')
autocrlf=result.strip() == true if result is not None else False
def scrub(b):
if sys.version_info >= (3,) and type(b) == bytes:
return b.decode('ascii')
else:
return b
licenseck_disabled = scrub(os.getenv("TIDY_NO_LICENSECK")) == "1"
def report_error_name_no(name, no, s):
global err
print("%s:%d: %s" % (name, no, s))
err=1
def report_err(s):
report_error_name_no(fileinput.filename(), fileinput.filelineno(), s)
def report_warn(s):
print("%s:%d: %s" % (fileinput.filename(),
fileinput.filelineno(),
s))
def do_license_check(name, contents):
if not licenseck_disabled and not check_license(name, contents):
report_error_name_no(name, 1, "incorrect license")
file_names = [s for s in sys.argv[1:] if (not s.endswith("_gen.rs"))
and (not ".#" in s)]
current_name = ""
current_contents = ""
check_tab = True
check_cr = True
check_linelength = True
try:
for line in fileinput.input(file_names,
openhook=fileinput.hook_encoded("utf-8")):
if fileinput.filename().find("tidy.py") == -1:
if line.find(cr_flag) != -1:
check_cr = False
if line.find(tab_flag) != -1:
check_tab = False
if line.find(linelength_flag) != -1:
check_linelength = False
if line.find("// XXX") != -1:
report_err("XXX is no longer necessary, use FIXME")
if line.find("TODO") != -1:
report_err("TODO is deprecated; use FIXME")
match = re.match(r'^.*//\s*(NOTE.*)$', line)
if match:
m = match.group(1)
if "snap" in m.lower():
report_warn(match.group(1))
match = re.match(r'^.*//\s*SNAP\s+(\w+)', line)
if check_tab and (line.find('\t') != -1 and
fileinput.filename().find("Makefile") == -1):
report_err("tab character")
if check_cr and not autocrlf and line.find('\r') != -1:
report_err("CR character")
if line.endswith(" \n") or line.endswith("\t\n"):
report_err("trailing whitespace")
line_len = len(line)-2 if autocrlf else len(line)-1
if check_linelength and line_len > cols:
report_err("line longer than %d chars" % cols)
if fileinput.isfirstline() and current_name != "":
do_license_check(current_name, current_contents)
if fileinput.isfirstline():
current_name = fileinput.filename()
current_contents = ""
check_cr = True
check_tab = True
check_linelength = True
current_contents += line
if current_name != "":
do_license_check(current_name, current_contents)
except UnicodeDecodeError, e:
report_err("UTF-8 decoding error " + str(e))
sys.exit(err)
| 32.760684
| 74
| 0.600313
|
c04e591f7c07799ba41d5beee76a127faa7fc2ec
| 2,751
|
py
|
Python
|
tests/parsers/text_parser.py
|
cugu-stars/plaso
|
a205f8e52dfe4c239aeae5558d572806b7b00e81
|
[
"Apache-2.0"
] | null | null | null |
tests/parsers/text_parser.py
|
cugu-stars/plaso
|
a205f8e52dfe4c239aeae5558d572806b7b00e81
|
[
"Apache-2.0"
] | null | null | null |
tests/parsers/text_parser.py
|
cugu-stars/plaso
|
a205f8e52dfe4c239aeae5558d572806b7b00e81
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""This file contains the tests for the generic text parser."""
import unittest
import pyparsing
from plaso.parsers import text_parser
from tests.parsers import test_lib
class PyparsingConstantsTest(test_lib.ParserTestCase):
"""Tests the PyparsingConstants text parser."""
def testConstants(self):
"""Tests parsing with constants."""
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.MONTH.parseString('MMo')
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.MONTH.parseString('M')
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.MONTH.parseString('March', parseAll=True)
self.assertTrue(text_parser.PyparsingConstants.MONTH.parseString('Jan'))
line = '# This is a comment.'
parsed_line = text_parser.PyparsingConstants.COMMENT_LINE_HASH.parseString(
line)
self.assertEqual(parsed_line[-1], 'This is a comment.')
self.assertEqual(len(parsed_line), 2)
def testConstantIPv4(self):
"""Tests parsing with the IPV4_ADDRESS constant."""
self.assertTrue(
text_parser.PyparsingConstants.IPV4_ADDRESS.parseString(
'123.51.234.52'))
self.assertTrue(
text_parser.PyparsingConstants.IPV4_ADDRESS.parseString(
'255.254.23.1'))
self.assertTrue(
text_parser.PyparsingConstants.IPV4_ADDRESS.parseString('1.1.34.2'))
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.IPV4_ADDRESS.parseString('a.1.34.258')
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.IPV4_ADDRESS.parseString('.34.258')
with self.assertRaises(pyparsing.ParseException):
text_parser.PyparsingConstants.IPV4_ADDRESS.parseString('34.258')
class PyparsingSingleLineTextParserTest(unittest.TestCase):
"""Tests for the single line PyParsing-based text parser."""
# pylint: disable=protected-access
def testIsText(self):
"""Tests the _IsText function."""
parser = text_parser.PyparsingSingleLineTextParser()
bytes_in = b'this is My Weird ASCII and non whatever string.'
self.assertTrue(parser._IsText(bytes_in))
bytes_in = 'Plaso Sรญar Og Raรฐar รessu'
self.assertTrue(parser._IsText(bytes_in))
bytes_in = b'\x01\\62LSO\xFF'
self.assertFalse(parser._IsText(bytes_in))
bytes_in = b'T\x00h\x00i\x00s\x00\x20\x00'
self.assertTrue(parser._IsText(bytes_in))
bytes_in = b'Ascii\x00'
self.assertTrue(parser._IsText(bytes_in))
bytes_in = b'Ascii Open then...\x00\x99\x23'
self.assertFalse(parser._IsText(bytes_in))
if __name__ == '__main__':
unittest.main()
| 32.364706
| 79
| 0.733915
|
caa044a8c084467ddb400e9d7325af160045293b
| 1,664
|
py
|
Python
|
test/test_get_transaction_details_by_transaction_id_response_data.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
test/test_get_transaction_details_by_transaction_id_response_data.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | null | null | null |
test/test_get_transaction_details_by_transaction_id_response_data.py
|
xan187/Crypto_APIs_2.0_SDK_Python
|
a56c75df54ef037b39be1315ed6e54de35bed55b
|
[
"MIT"
] | 1
|
2021-07-21T03:35:18.000Z
|
2021-07-21T03:35:18.000Z
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.get_transaction_details_by_transaction_id_response_item import GetTransactionDetailsByTransactionIDResponseItem
globals()['GetTransactionDetailsByTransactionIDResponseItem'] = GetTransactionDetailsByTransactionIDResponseItem
from cryptoapis.model.get_transaction_details_by_transaction_id_response_data import GetTransactionDetailsByTransactionIDResponseData
class TestGetTransactionDetailsByTransactionIDResponseData(unittest.TestCase):
"""GetTransactionDetailsByTransactionIDResponseData unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetTransactionDetailsByTransactionIDResponseData(self):
"""Test GetTransactionDetailsByTransactionIDResponseData"""
# FIXME: construct object with mandatory attributes with example values
# model = GetTransactionDetailsByTransactionIDResponseData() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 42.666667
| 484
| 0.805288
|
d4c4d80736767a7fa72222f0139764b8abfcfdef
| 257
|
py
|
Python
|
python-package/exp_pb2_test.py
|
openmit/openmit
|
01e3262d69d47fbe38bad1ba95c7d1ade110d01e
|
[
"Apache-2.0"
] | 15
|
2017-06-28T08:39:51.000Z
|
2019-03-27T14:08:45.000Z
|
python-package/exp_pb2_test.py
|
openmit/openmit
|
01e3262d69d47fbe38bad1ba95c7d1ade110d01e
|
[
"Apache-2.0"
] | null | null | null |
python-package/exp_pb2_test.py
|
openmit/openmit
|
01e3262d69d47fbe38bad1ba95c7d1ade110d01e
|
[
"Apache-2.0"
] | 3
|
2017-07-30T08:50:45.000Z
|
2017-10-24T14:41:30.000Z
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
import exp_pb2 as exp
person = exp.Person()
person.name = "zhouyong";
person.id = 29;
person.xxxx = "xxxxx-yyyyy";
person.strs.append('strs0');
person.strs.append('strs1');
print('person:\n{}'.format(person))
| 18.357143
| 35
| 0.669261
|
0f595f9858043d8a23ee4fce130b33541ea622da
| 1,444
|
py
|
Python
|
Hello_Spirograph_1.py
|
TechnoTanuki/Python_BMP
|
d6f7e7a4b74f7d6e8761d618c156d37c97726038
|
[
"MIT"
] | 3
|
2022-02-24T15:46:43.000Z
|
2022-03-30T13:17:03.000Z
|
Hello_Spirograph_1.py
|
TechnoTanuki/Python_BMP
|
d6f7e7a4b74f7d6e8761d618c156d37c97726038
|
[
"MIT"
] | null | null | null |
Hello_Spirograph_1.py
|
TechnoTanuki/Python_BMP
|
d6f7e7a4b74f7d6e8761d618c156d37c97726038
|
[
"MIT"
] | null | null | null |
notice = """
Hello Spirograph Demo
-----------------------------------
| Copyright 2022 by Joel C. Alcarez |
| [joelalcarez1975@gmail.com] |
|-----------------------------------|
| We make absolutely no warranty |
| of any kind, expressed or implied |
|-----------------------------------|
| This graphics library outputs |
| to a bitmap file. |
-----------------------------------
"""
from Python_BMP.BITMAPlib import(
newBMP,
pi,
centercoord,
plotlines,
spirographvert,
saveBMP
)
import subprocess as proc
from os import path
def main():
print(notice)
imgedt = 'mspaint' # replace with another editor if Unix
rootdir = path.dirname(__file__) # get path of this script
mx = my = 500 # bitmap size
bmp = newBMP(mx, my, 4) # 16 color
(x, y) = centercoord(bmp) # How to get center of the bitmap
file = 'HelloSpirograph1.bmp' # file name
d = 1/120
lim = pi * 10 + d
color = 10
plotlines(bmp,
spirographvert(x, y, 200, d , lim, 1, .3),
color) # connect the dots with lines
saveBMP(file, bmp) # save file
print('Saved to %s in %s\nAll done close %s to finish' % \
(file, rootdir, imgedt)) # tell user we are done
ret = proc.call(f'{imgedt} {file}')
if __name__=="__main__":
main()
| 29.469388
| 67
| 0.5
|
ffae89a1834d09b18c5a2a14295250989e1b402b
| 1,047
|
py
|
Python
|
gale/balance/request.py
|
lujinda/gale
|
d86ce7f691ac4a89ab491543e37476efdb1d7e59
|
[
"MIT"
] | 4
|
2016-02-01T02:26:01.000Z
|
2021-06-01T08:43:05.000Z
|
gale/balance/request.py
|
lujinda/gale
|
d86ce7f691ac4a89ab491543e37476efdb1d7e59
|
[
"MIT"
] | null | null | null |
gale/balance/request.py
|
lujinda/gale
|
d86ce7f691ac4a89ab491543e37476efdb1d7e59
|
[
"MIT"
] | 1
|
2016-01-08T11:00:11.000Z
|
2016-01-08T11:00:11.000Z
|
#!/usr/bin/env python
#coding:utf-8
# Author : tuxpy
# Email : q8886888@qq.com.com
# Last modified : 2015-11-27 15:01:54
# Filename : request.py
# Description :
from __future__ import print_function, unicode_literals
import socket
try:
import httplib # py2
except ImportError:
from http import client as httplib
import gale
import gevent
import gevent.monkey
__all__ = ['proxy_request']
gevent.monkey.patch_socket()
def proxy_request(request, upstream):
host, port = (upstream.split(':', 1) + [80])[:2]
_conn = httplib.HTTPConnection(host = host,
port = int(port))
request.headers['X-Forwarded-For'] = request.real_ip
request.headers['X-Real-IP'] = request.real_ip
request.headers['Host'] = upstream
request.headers['Connection'] = 'close'
try:
_conn.request(request.method, request.uri, body = request.body,
headers = request.headers)
except socket.error:
raise gale.e.WorkerOffline
response = _conn.getresponse()
return response
| 26.846154
| 71
| 0.674308
|
e1e662ece8f58983c9ce163402e660de08feafe3
| 2,105
|
py
|
Python
|
nifti_gridview/ngv_io/writer.py
|
alabamagan/NIfTI-gridview
|
79d6501f78374555b85d52248b380241db53d3ab
|
[
"MIT"
] | 2
|
2020-11-26T06:49:13.000Z
|
2020-11-26T15:40:20.000Z
|
nifti_gridview/ngv_io/writer.py
|
alabamagan/NIfTI-gridview
|
79d6501f78374555b85d52248b380241db53d3ab
|
[
"MIT"
] | null | null | null |
nifti_gridview/ngv_io/writer.py
|
alabamagan/NIfTI-gridview
|
79d6501f78374555b85d52248b380241db53d3ab
|
[
"MIT"
] | null | null | null |
import numpy as np
import os
import cv2
import gc
from ngv_model import draw_grid_wrapper, NGV_Logger
class writer(object):
def __init__(self, data_loader, seg_loaders, draw_worker, outdir, **kwargs):
assert isinstance(draw_worker, draw_grid_wrapper), "Incorrect type."
self._data_loader = data_loader
self._seg_loaders = seg_loaders
self._outdir = outdir
self._draw_worker = draw_worker
self._high_res = kwargs['high_res'] if 'high_res' in kwargs else False
self._keys_to_write = kwargs['keys_to_write'] if 'keys_to_write' in kwargs else None
def write(self):
# check output dir avaialble
if not os.path.isdir(self._outdir):
os.makedirs(self._outdir, 0o755, exist_ok=True)
extension = '.png' if self._high_res else '.jpg'
for key, img in self._data_loader:
# Skip files if its not in keys-to-write if it exists
if not self._keys_to_write is None:
if not key in self._keys_to_write:
continue
tmp_config = {
'target_im': img,
'segment': []
}
for s_loader in self._seg_loaders:
tmp_config['segment'].append(s_loader[key])
self._draw_worker.update_config(tmp_config)
self._draw_worker.run()
tmp_img = self._draw_worker.get_result()
# if tmp_img.dtype == np.dtype('double') or tmp_img.dtype == np.dtype('float'):
# tmp_img = writer._float_im_to_RGB(tmp_img)
out_fnmae = os.path.join(self._outdir, key.replace('.nii', '').replace('.gz','') + extension)
cv2.imwrite(out_fnmae, cv2.cvtColor(tmp_img, cv2.COLOR_RGB2BGR))
del tmp_config['segment'], tmp_config['target_im'], tmp_config, tmp_img, img
gc.collect()
pass
@staticmethod
def _float_im_to_RGB(image):
if not isinstance(image, np.ndarray):
image = np.array(image)
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
return image
| 33.951613
| 105
| 0.615202
|
fa25dd765e4e73154bacd109c7ff8183ec31e67e
| 3,150
|
py
|
Python
|
paraphrasing/word2vec_train_and_save.py
|
Keracorn/geulstagram
|
d7964abaece0e46c2bd4796fee2ba6d836140022
|
[
"MIT"
] | 5
|
2020-04-08T01:38:31.000Z
|
2022-03-20T06:09:27.000Z
|
paraphrasing/word2vec_train_and_save.py
|
woodongk/geulstagram
|
ae375c136191774f03ac9f0f6bb8de86be9c44d3
|
[
"MIT"
] | null | null | null |
paraphrasing/word2vec_train_and_save.py
|
woodongk/geulstagram
|
ae375c136191774f03ac9f0f6bb8de86be9c44d3
|
[
"MIT"
] | 12
|
2019-09-28T05:05:30.000Z
|
2020-03-16T13:42:33.000Z
|
import pandas as pd
import os
from konlpy.tag import Okt
## word2vec
import warnings
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
from gensim.models import word2vec
# load data
def load_data(PATH):
data = pd.read_csv(PATH, index_col=1)
data.columns = ['CONTENT_ID', 'USER_ID', 'Image_Content_txt',
'Image_Content_txt_result', 'Content_txt', 'Hashtags',
'Hashtags_result', 'Spell_Checked_Content']
data = data.dropna(axis=0)
data = data.reset_index(drop=True)
return data
def tokenize_text(text):
okt = Okt()
malist = okt.pos(text,norm = True, stem = True)
# [('์นซ์', 'Noun'), ('์', 'Josa'), ('์๋ค', 'Verb'), ('์์ง', 'Noun')]
r = []
tag = ["Noun","Verb" ]
try:
for word in malist: # ํํ์ ๋ถ์ ๊ฒฐ๊ณผ
if word[1] in tag: # tag ์ ์๋ ํํ์๋ง ๋ฐ์๋ด๊ธฐ
if not word[0] in r: # ์ค๋ณต์ ๋ฃ์ง ์์
r.append(word[0])
return r
except Exception as e:
print (e)
r.append(" ")
def tokenize_all(list2d_data):
data_tokens = []
for i, txt in enumerate(list2d_data):
print("'\r{0}%".format(i/len(list2d_data)*100), end='')
data_tokens.append(tokenize_text(txt))
print(data_tokens[:3], end='')
return data_tokens
# [['์นซ์', '์๋ค', '์์ง', '์๋ชธ', 'ํผ', '๋๋ค', '์ ', '๋บ๋ปฃ', '๊ฒ', '๋ฒ์ด์ง๋ค', '์', '๋ฌธ์ง๋ฅด๋ค', '์กฐ๋ง๊ฐ', '๊ผด๋ค', 'ํ๋ค', '์๋ค'],
# ['์๋ฌด', '์๋ค', '์์ฆ', '๋ฏธ์ฐ๋ค', '๊ฒ', '๋ฒ', '๋', '์ฌ์ด'],
# ['์ด์ ', '์ด์น', '๊ฒ', '์ค๋', '๋ณด๋ฆ', '์ด๊ฒ ๋ค', '๋ง', '๋', '๋ฐคํ๋', '๋น', '๋ฌ', '๋ฒ์จ', '๊ฐ๋', '๋ณด๋ค', '๊ฐ์ฅ', 'ํฌ๋ค']]
def comment_count(token_data):
unique_comment_tokenized = [list(i) for i in set(tuple(i) for i in token_data)]
word_dic = {}
# word count
for words in unique_comment_tokenized:
for word in words:
if not (word in word_dic):
word_dic[word] = 0
word_dic[word] += 1
keys = sorted(word_dic.items(), key=lambda x: x[1], reverse=True)
for word, count in keys[:100]: # ์์ ๋ฐฑ ๊ฐ ๋ฝ๊ธฐ
print("{0}({1}) ".format(word, count), end="")
# [] ์์ ์ฃผ๋ ์ฝ๋
from itertools import chain
words = set(chain(*unique_comment_tokenized))
n_vocab = len(words)
print("\n")
print("Total Vocab: ", n_vocab)
return keys, n_vocab
if __name__ == '__main__':
print("๋ฐ์ดํฐ ๋ถ๋ฌ์ค๊ธฐ")
data = load_data('Final_Data_v2_Cleansing_v2_spell_checked.csv')
print("๋ฐ์ดํฐ ๋ถ๋ฌ์ค๊ธฐ ์๋ฃ")
# tokenize total data
print("๋ฐ์ดํฐ ํ ํฐํ ํ๊ธฐ")
data_tokens = tokenize_all(data['Spell_Checked_Content'])
print("๋ฐ์ดํฐ ํ ํฐํ ์๋ฃ")
# count unique token
#K = comment_count(data_tokens)
# train and save model
config = {
'min_count': 2, # ๋ฑ์ฅ ํ์๊ฐ 2 ์ดํ์ธ ๋จ์ด๋ ๋ฌด์
'size': 300, # n์ฐจ์์ง๋ฆฌ ๋ฒกํฐ์คํ์ด์ค์ embedding
'sg': 1, # 0์ด๋ฉด CBOW, 1์ด๋ฉด skip-gram์ ์ฌ์ฉํ๋ค
'batch_words': 10000, # ์ฌ์ ์ ๊ตฌ์ถํ ๋ ํ๋ฒ์ ์ฝ์ ๋จ์ด ์
'iter': 1000, # ๋ณดํต ๋ฅ๋ฌ๋์์ ๋งํ๋ epoch๊ณผ ๋น์ทํ, ๋ฐ๋ณต ํ์
'workers': 4, #cpu thread
}
print('word2vec ๋ชจ๋ธ ๋ง๋ค๊ธฐ ํ์ตํ๊ธฐ')
model = word2vec.Word2Vec(data_tokens, **config) # skip-gram model
model.save("paraphrasing/gamsung_txt.model")
print('word2vec ๋ชจ๋ธ ์ ์ฅ ์๋ฃ')
| 30
| 105
| 0.576825
|
71ab996580bd6c0266b7cedcba3e5442cc189578
| 346
|
py
|
Python
|
Getting_Started_With_Raspberry_Pi_Pico/built_in_led_on_off/code.py
|
claycooper/Adafruit_Learning_System_Guides
|
890431bd4b9df929bc601e5886c2a735d89814f9
|
[
"MIT"
] | null | null | null |
Getting_Started_With_Raspberry_Pi_Pico/built_in_led_on_off/code.py
|
claycooper/Adafruit_Learning_System_Guides
|
890431bd4b9df929bc601e5886c2a735d89814f9
|
[
"MIT"
] | null | null | null |
Getting_Started_With_Raspberry_Pi_Pico/built_in_led_on_off/code.py
|
claycooper/Adafruit_Learning_System_Guides
|
890431bd4b9df929bc601e5886c2a735d89814f9
|
[
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2021 Kattni Rembor for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""Example for Pico. Turns the built-in LED on and off with no delay."""
import board
import digitalio
led = digitalio.DigitalInOut(board.LED)
led.direction = digitalio.Direction.OUTPUT
while True:
led.value = True
led.value = False
| 23.066667
| 72
| 0.754335
|
03cc049b4d3ef761b71dac17669c843a2e07a619
| 937
|
py
|
Python
|
config/parameters.py
|
disruptive-technologies/cold-storage-an
|
b6d740e4f90165ed0c771b83a2869e4c8793c09a
|
[
"MIT"
] | null | null | null |
config/parameters.py
|
disruptive-technologies/cold-storage-an
|
b6d740e4f90165ed0c771b83a2869e4c8793c09a
|
[
"MIT"
] | null | null | null |
config/parameters.py
|
disruptive-technologies/cold-storage-an
|
b6d740e4f90165ed0c771b83a2869e4c8793c09a
|
[
"MIT"
] | null | null | null |
# delay
H_DELAY = 3 # hours of historic data used in thresholds (rolling median halfwidth)
S_DELAY = 60*60*H_DELAY # seconds of historic data used in thresholds
# robust sampling
S_ROBUST_CYCLE = 60*60*16 # period length for robust statistics calculation in seconds
S_ROBUST_WIDTH = 60*60*24 # window width for robust statistics calculation in seconds
N_ROBUST_DAYS = 5 # number of days back in time used for robust statistics calculation
# bounds / threshold
N_ROBUST_IN_BOUNDS = int(((60*60*24) / S_ROBUST_CYCLE) * N_ROBUST_DAYS) # number of robust windows when calculating bounds
MMAD = 1 # mad modifier
BOUND_MINVAL = 0 # minimum value allowed in bounds
STORAGE_MAXTEMP = 4 # critical temperature
| 55.117647
| 126
| 0.589114
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.