hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9639b6291e613c38380c6ef12ff591e7e7f06f5d
| 2,419
|
py
|
Python
|
challengerTest/api_init/api_user.py
|
andrequeiroz2/challengerAPI
|
df906cdd900874bb243e5d33cf745332f1fe556e
|
[
"MIT"
] | null | null | null |
challengerTest/api_init/api_user.py
|
andrequeiroz2/challengerAPI
|
df906cdd900874bb243e5d33cf745332f1fe556e
|
[
"MIT"
] | null | null | null |
challengerTest/api_init/api_user.py
|
andrequeiroz2/challengerAPI
|
df906cdd900874bb243e5d33cf745332f1fe556e
|
[
"MIT"
] | 1
|
2021-03-04T18:25:51.000Z
|
2021-03-04T18:25:51.000Z
|
from flask import request, Response
from flask_restful import Resource
from model.model_user import User
from controller.controller_user import (
one_user,
all_user,
create_user,
update_user,
delete_user,
)
from message.msg import ADD_USER_SUCCESS, UPDATE_USER_SUCCESS, DELETE_USER_SUCCESS
from mongoengine.errors import (
FieldDoesNotExist,
NotUniqueError,
DoesNotExist,
ValidationError,
InvalidQueryError,
)
from error.errors import (
SchemaValidationError,
UserAlreadyExistsError,
InternalServerError,
UpdatingUserError,
UserNotExistsError,
UserNotRegistered,
)
class UserApi(Resource):
def get(self):
users = all_user()
if (len(users) == 2) or (users is None):
raise UserNotRegistered
else:
return Response(users, mimetype="application/json", status=200)
def post(self):
try:
body = request.get_json()
create_user(**body)
return {"msg": ADD_USER_SUCCESS, "user_name": body["user_name"]}, 200
except FieldDoesNotExist:
raise SchemaValidationError
except ValidationError:
raise SchemaValidationError
except NotUniqueError:
raise UserAlreadyExistsError
except Exception:
raise InternalServerError
class UsersApi(Resource):
def get(self, name):
try:
user = one_user(name)
return Response(user, mimetype="application/json", status=200)
except DoesNotExist:
raise UserNotExistsError
except Exception:
raise InternalServerError
def put(self, name):
try:
body = request.get_json()
update_user(name, body)
return {"msg": UPDATE_USER_SUCCESS}, 200
except InvalidQueryError:
raise SchemaValidationError
except DoesNotExist:
raise UpdatingUserError
except NotUniqueError:
raise UserAlreadyExistsError
except Exception:
raise InternalServerError
def delete(self, name):
try:
delete_user(name)
return {"msg": DELETE_USER_SUCCESS}, 200
except InvalidQueryError:
raise SchemaValidationError
except DoesNotExist:
raise UpdatingUserError
except Exception:
raise InternalServerError
| 28.127907
| 82
| 0.639934
|
5cb1620854f6ca05f56a9f509d9df205a2b1f674
| 16,597
|
py
|
Python
|
google/cloud/aiplatform_v1/services/model_service/pagers.py
|
dizcology/python-aiplatform
|
1a135775966c8a2303ded529eba514dcf9db7205
|
[
"Apache-2.0"
] | 2
|
2021-10-02T02:25:44.000Z
|
2021-11-17T10:35:01.000Z
|
google/cloud/aiplatform_v1/services/model_service/pagers.py
|
pompipo/python-aiplatform
|
3612b05c62dfb46822cd2c1798fd47349dba33bc
|
[
"Apache-2.0"
] | 1
|
2021-03-02T18:25:00.000Z
|
2021-03-02T18:25:00.000Z
|
google/cloud/aiplatform_v1/services/model_service/pagers.py
|
pompipo/python-aiplatform
|
3612b05c62dfb46822cd2c1798fd47349dba33bc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.aiplatform_v1.types import model
from google.cloud.aiplatform_v1.types import model_evaluation
from google.cloud.aiplatform_v1.types import model_evaluation_slice
from google.cloud.aiplatform_v1.types import model_service
class ListModelsPager:
"""A pager for iterating through ``list_models`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.ListModelsResponse` object, and
provides an ``__iter__`` method to iterate through its
``models`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListModels`` requests and continue to iterate
through the ``models`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.ListModelsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., model_service.ListModelsResponse],
request: model_service.ListModelsRequest,
response: model_service.ListModelsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.ListModelsRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.ListModelsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = model_service.ListModelsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[model_service.ListModelsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[model.Model]:
for page in self.pages:
yield from page.models
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListModelsAsyncPager:
"""A pager for iterating through ``list_models`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.ListModelsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``models`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListModels`` requests and continue to iterate
through the ``models`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.ListModelsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[model_service.ListModelsResponse]],
request: model_service.ListModelsRequest,
response: model_service.ListModelsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.ListModelsRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.ListModelsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = model_service.ListModelsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[model_service.ListModelsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[model.Model]:
async def async_generator():
async for page in self.pages:
for response in page.models:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListModelEvaluationsPager:
"""A pager for iterating through ``list_model_evaluations`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` object, and
provides an ``__iter__`` method to iterate through its
``model_evaluations`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListModelEvaluations`` requests and continue to iterate
through the ``model_evaluations`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., model_service.ListModelEvaluationsResponse],
request: model_service.ListModelEvaluationsRequest,
response: model_service.ListModelEvaluationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = model_service.ListModelEvaluationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[model_service.ListModelEvaluationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[model_evaluation.ModelEvaluation]:
for page in self.pages:
yield from page.model_evaluations
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListModelEvaluationsAsyncPager:
"""A pager for iterating through ``list_model_evaluations`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``model_evaluations`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListModelEvaluations`` requests and continue to iterate
through the ``model_evaluations`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[model_service.ListModelEvaluationsResponse]],
request: model_service.ListModelEvaluationsRequest,
response: model_service.ListModelEvaluationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.ListModelEvaluationsRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.ListModelEvaluationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = model_service.ListModelEvaluationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[model_service.ListModelEvaluationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[model_evaluation.ModelEvaluation]:
async def async_generator():
async for page in self.pages:
for response in page.model_evaluations:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListModelEvaluationSlicesPager:
"""A pager for iterating through ``list_model_evaluation_slices`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` object, and
provides an ``__iter__`` method to iterate through its
``model_evaluation_slices`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListModelEvaluationSlices`` requests and continue to iterate
through the ``model_evaluation_slices`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., model_service.ListModelEvaluationSlicesResponse],
request: model_service.ListModelEvaluationSlicesRequest,
response: model_service.ListModelEvaluationSlicesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = model_service.ListModelEvaluationSlicesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[model_service.ListModelEvaluationSlicesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[model_evaluation_slice.ModelEvaluationSlice]:
for page in self.pages:
yield from page.model_evaluation_slices
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListModelEvaluationSlicesAsyncPager:
"""A pager for iterating through ``list_model_evaluation_slices`` requests.
This class thinly wraps an initial
:class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``model_evaluation_slices`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListModelEvaluationSlices`` requests and continue to iterate
through the ``model_evaluation_slices`` field on the
corresponding responses.
All the usual :class:`google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[
..., Awaitable[model_service.ListModelEvaluationSlicesResponse]
],
request: model_service.ListModelEvaluationSlicesRequest,
response: model_service.ListModelEvaluationSlicesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesRequest):
The initial request object.
response (google.cloud.aiplatform_v1.types.ListModelEvaluationSlicesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = model_service.ListModelEvaluationSlicesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(
self,
) -> AsyncIterable[model_service.ListModelEvaluationSlicesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[model_evaluation_slice.ModelEvaluationSlice]:
async def async_generator():
async for page in self.pages:
for response in page.model_evaluation_slices:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| 39.610979
| 93
| 0.682412
|
3a6f8e589ff6cc6d7f402421acc3296fa52f91bc
| 14,842
|
py
|
Python
|
fibonacci21decomp.py
|
gavin4d/Fibonacci-Magic
|
3e5c57e6ac6a190e5e9e6d62e34d2d8621ef47cc
|
[
"CC0-1.0"
] | 1
|
2021-12-28T19:10:58.000Z
|
2021-12-28T19:10:58.000Z
|
fibonacci21decomp.py
|
gavin4d/Fibonacci-Magic
|
3e5c57e6ac6a190e5e9e6d62e34d2d8621ef47cc
|
[
"CC0-1.0"
] | null | null | null |
fibonacci21decomp.py
|
gavin4d/Fibonacci-Magic
|
3e5c57e6ac6a190e5e9e6d62e34d2d8621ef47cc
|
[
"CC0-1.0"
] | null | null | null |
from PIL.Image import FASTOCTREE
from manim import *
from functions import *
import color
def moveEquation(equations,loop,baseText,t1,t2,t3,t4,self):
equations[loop].add(baseText.copy(), t1[loop].copy(), t2[7-loop].copy(), t3[loop].copy(), t4[7-loop].copy())
self.play(equations[loop].animate.shift(LEFT * 6 + UP * (4.75 - loop * 0.5)))
return loop + 1
class DecompDot(Scene):
def construct(self):
loop = 0
fibo = [0,1,1,2,3,5,8,13,21]
self.camera.background_color = color.BACKGROUND
dots = [Dot().set_color(color.RED).move_to(UP * 0.25 * (10-i) + RIGHT * 3) for i in range(0,21)]
baseText = Text('× + ×').scale(0.5).set_color(BLACK).move_to(DOWN * 3 + RIGHT * 3)
name = Text('Fibonacci Decomposition').set_color(BLACK).move_to(UP * 3)
t1 = VGroup()
t2 = VGroup()
t3 = VGroup()
t4 = VGroup()
for n in range(0,8):
t1.add(Text(str(fibo[8-n])).set_color(color.RED))
t2.add(Text(str(fibo[8-n])).set_color(color.RED))
t3.add(Text(str(fibo[8-n-1])).set_color(color.BLUE))
t4.add(Text(str(fibo[8-n-1])).set_color(color.BLUE))
t1.scale(0.5).arrange(DOWN).move_to(LEFT * (1.15 - 3) + DOWN * (1.75 + 3))
t2.scale(0.5).arrange(DOWN).move_to(LEFT * (0.4 - 3) + UP * (1.75 - 3))
t3.scale(0.5).arrange(DOWN).move_to(RIGHT * (0.4 + 3) + DOWN * (1.75 + 3))
t4.scale(0.5).arrange(DOWN).move_to(RIGHT * (1.15 + 3) + UP * (1.75 - 3))
self.add(t1, t2, t3, t4)
numberhidebox1 = Square().scale(2).move_to(UP * (2.25 - 3) + RIGHT * 3.5)
numberhidebox1.set_fill(color.BACKGROUND, opacity=1).set_color(color.BACKGROUND)
numberhidebox2 = Square().scale(2).move_to(DOWN * 5.25 + RIGHT * 3.5)
numberhidebox2.set_fill(color.BACKGROUND, opacity=1).set_color(color.BACKGROUND)
self.add(numberhidebox1, numberhidebox2)
decompView = Rectangle(color=color.YELLOW, width=3.5, height=4.5).move_to(LEFT * 3)
equations = [VGroup() for i in range(0,8)]
self.play(FadeIn(decompView), FadeIn(baseText), FadeIn(t1), FadeIn(t2), FadeIn(t3), FadeIn(t4), *[GrowFromCenter(dots[i]) for i in range(0,21)])
self.wait(1)
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
group1 = VGroup()
group1.add(*[dots[i] for i in range(0,8)])
group2 = VGroup()
group2.add(*[dots[i] for i in range(8,21)])
self.play(group1.animate.set_color(color.BLUE).shift(RIGHT * 0.125), group2.animate.shift(LEFT * 0.125), t1.animate.shift(UP * 0.5), t2.animate.shift(DOWN * 0.5), t3.animate.shift(UP * 0.5), t4.animate.shift(DOWN * 0.5))
self.play(group1.animate.shift(DOWN * 0.25 * 13))
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
group1.remove(*[dots[i] for i in range(0,8)])
group2.add(*[dots[i] for i in range(0,8)])
group2.remove(*[dots[i] for i in range(8,13)])
group1.add(*[dots[i] for i in range(8,13)])
self.play(group1.animate.set_color(color.BLUE).shift(RIGHT * 0.125 * 3), group2.animate.set_color(color.RED).shift(LEFT * 0.125), t1.animate.shift(UP * 0.5), t2.animate.shift(DOWN * 0.5), t3.animate.shift(UP * 0.5), t4.animate.shift(DOWN * 0.5))
self.play(group1.animate.shift(DOWN * 0.25 * 8))
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
group1.remove(*[dots[i] for i in range(8,13)])
group2.add(*[dots[i] for i in range(8,13)])
group2.remove(*[dots[i] for i in [0,1,2,13,14,15]])
group1.add(*[dots[i] for i in [0,1,2,13,14,15]])
self.play(group1.animate.set_color(color.BLUE).shift(RIGHT * 0.125 * 4), group2.animate.set_color(color.RED).shift(LEFT * 0.25), t1.animate.shift(UP * 0.5), t2.animate.shift(DOWN * 0.5), t3.animate.shift(UP * 0.5), t4.animate.shift(DOWN * 0.5))
self.play(group1.animate.shift(DOWN * 0.25 * 5))
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
group1.remove(*[dots[i] for i in [0,1,2,13,14,15]])
group2.add(*[dots[i] for i in [0,1,2,13,14,15]])
group2.remove(*[dots[i] for i in [16,17,3,4,8,9]])
group1.add(*[dots[i] for i in [16,17,3,4,8,9]])
self.play(group1.animate.set_color(color.BLUE).shift(RIGHT * 0.125 * 7), group2.animate.set_color(color.RED).shift(LEFT * 0.125 * 3), t1.animate.shift(UP * 0.5), t2.animate.shift(DOWN * 0.5), t3.animate.shift(UP * 0.5), t4.animate.shift(DOWN * 0.5))
self.play(group1.animate.shift(DOWN * 0.25 * 3))
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
group1.remove(*[dots[i] for i in [16,17,3,4,8,9]])
group2.add(*[dots[i] for i in [16,17,3,4,8,9]])
group2.remove(*[dots[i] for i in [18,5,10,0,13]])
group1.add(*[dots[i] for i in [18,5,10,0,13]])
self.play(group1.animate.set_color(color.BLUE).shift(RIGHT * 0.125 * 11), group2.animate.set_color(color.RED).shift(LEFT * 0.125 * 5), t1.animate.shift(UP * 0.5), t2.animate.shift(DOWN * 0.5), t3.animate.shift(UP * 0.5), t4.animate.shift(DOWN * 0.5))
self.play(group1.animate.shift(DOWN * 0.25 * 2))
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
group1.remove(*[dots[i] for i in [18,5,10,0,13]])
group2.add(*[dots[i] for i in [18,5,10,0,13]])
group2.remove(*[dots[i] for i in [19,6,11,1,14,16,3,8]])
group1.add(*[dots[i] for i in [19,6,11,1,14,16,3,8]])
self.play(group1.animate.set_color(color.BLUE).shift(RIGHT * 0.125 * 18), group2.animate.set_color(color.RED).shift(LEFT * 0.125 * 8), t1.animate.shift(UP * 0.5), t2.animate.shift(DOWN * 0.5), t3.animate.shift(UP * 0.5), t4.animate.shift(DOWN * 0.5))
self.play(group1.animate.shift(DOWN * 0.25))
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
group1.remove(*[dots[i] for i in [19,6,11,1,14,16,3,8]])
group2.add(*[dots[i] for i in [19,6,11,1,14,16,3,8]])
self.play(group2.animate.set_color(color.RED), t1.animate.shift(UP * 0.5), t2.animate.shift(DOWN * 0.5), t3.animate.shift(UP * 0.5), t4.animate.shift(DOWN * 0.5))
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
self.play(FadeIn(name))
self.wait(3)
self.play(FadeOut(baseText, t1[7], t2[0], t3[7], t4[0], *dots, name))
self.play(*[equations[i].animate.shift(RIGHT * 3) for i in range(0,8)], decompView.animate.shift(RIGHT * 3))
self.play(FadeOut(*[equations[i][0] for i in range(0,8)]))
class DecompDotLongEnd (Scene):
def construct(self):
loop = 0
fibo = [0,1,1,2,3,5,8,13,21]
self.camera.background_color = color.BACKGROUND
dots = [Dot().set_color(color.RED).move_to(UP * 0.25 * (10-i) + RIGHT * 3) for i in range(0,21)]
baseText = Text('× + ×').scale(0.5).set_color(BLACK).move_to(DOWN * 3 + RIGHT * 3)
name = Text('Fibonacci Decomposition').set_color(BLACK).move_to(UP * 3)
t1 = VGroup()
t2 = VGroup()
t3 = VGroup()
t4 = VGroup()
for n in range(0,8):
t1.add(Text(str(fibo[8-n])).set_color(color.RED))
t2.add(Text(str(fibo[8-n])).set_color(color.RED))
t3.add(Text(str(fibo[8-n-1])).set_color(color.BLUE))
t4.add(Text(str(fibo[8-n-1])).set_color(color.BLUE))
t1.scale(0.5).arrange(DOWN).move_to(LEFT * (1.15 - 3) + DOWN * (1.75 + 3))
t2.scale(0.5).arrange(DOWN).move_to(LEFT * (0.4 - 3) + UP * (1.75 - 3))
t3.scale(0.5).arrange(DOWN).move_to(RIGHT * (0.4 + 3) + DOWN * (1.75 + 3))
t4.scale(0.5).arrange(DOWN).move_to(RIGHT * (1.15 + 3) + UP * (1.75 - 3))
self.add(t1, t2, t3, t4)
numberhidebox1 = Square().scale(2).move_to(UP * (2.25 - 3) + RIGHT * 3.5)
numberhidebox1.set_fill(color.BACKGROUND, opacity=1).set_color(color.BACKGROUND)
numberhidebox2 = Square().scale(2).move_to(DOWN * 5.25 + RIGHT * 3.5)
numberhidebox2.set_fill(color.BACKGROUND, opacity=1).set_color(color.BACKGROUND)
self.add(numberhidebox1, numberhidebox2)
decompView = Rectangle(color=color.YELLOW, width=3.5, height=4.5).move_to(LEFT * 3)
equations = [VGroup() for i in range(0,8)]
self.play(FadeIn(decompView), FadeIn(baseText), FadeIn(t1), FadeIn(t2), FadeIn(t3), FadeIn(t4), *[GrowFromCenter(dots[i]) for i in range(0,21)])
self.wait(1)
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
group1 = VGroup()
group1.add(*[dots[i] for i in range(0,8)])
group2 = VGroup()
group2.add(*[dots[i] for i in range(8,21)])
self.play(group1.animate.set_color(color.BLUE).shift(RIGHT * 0.125), group2.animate.shift(LEFT * 0.125), t1.animate.shift(UP * 0.5), t2.animate.shift(DOWN * 0.5), t3.animate.shift(UP * 0.5), t4.animate.shift(DOWN * 0.5))
self.play(group1.animate.shift(DOWN * 0.25 * 13))
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
group1.remove(*[dots[i] for i in range(0,8)])
group2.add(*[dots[i] for i in range(0,8)])
group2.remove(*[dots[i] for i in range(8,13)])
group1.add(*[dots[i] for i in range(8,13)])
self.play(group1.animate.set_color(color.BLUE).shift(RIGHT * 0.125 * 3), group2.animate.set_color(color.RED).shift(LEFT * 0.125), t1.animate.shift(UP * 0.5), t2.animate.shift(DOWN * 0.5), t3.animate.shift(UP * 0.5), t4.animate.shift(DOWN * 0.5))
self.play(group1.animate.shift(DOWN * 0.25 * 8))
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
group1.remove(*[dots[i] for i in range(8,13)])
group2.add(*[dots[i] for i in range(8,13)])
group2.remove(*[dots[i] for i in [0,1,2,13,14,15]])
group1.add(*[dots[i] for i in [0,1,2,13,14,15]])
self.play(group1.animate.set_color(color.BLUE).shift(RIGHT * 0.125 * 4), group2.animate.set_color(color.RED).shift(LEFT * 0.25), t1.animate.shift(UP * 0.5), t2.animate.shift(DOWN * 0.5), t3.animate.shift(UP * 0.5), t4.animate.shift(DOWN * 0.5))
self.play(group1.animate.shift(DOWN * 0.25 * 5))
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
group1.remove(*[dots[i] for i in [0,1,2,13,14,15]])
group2.add(*[dots[i] for i in [0,1,2,13,14,15]])
group2.remove(*[dots[i] for i in [16,17,3,4,8,9]])
group1.add(*[dots[i] for i in [16,17,3,4,8,9]])
self.play(group1.animate.set_color(color.BLUE).shift(RIGHT * 0.125 * 7), group2.animate.set_color(color.RED).shift(LEFT * 0.125 * 3), t1.animate.shift(UP * 0.5), t2.animate.shift(DOWN * 0.5), t3.animate.shift(UP * 0.5), t4.animate.shift(DOWN * 0.5))
self.play(group1.animate.shift(DOWN * 0.25 * 3))
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
group1.remove(*[dots[i] for i in [16,17,3,4,8,9]])
group2.add(*[dots[i] for i in [16,17,3,4,8,9]])
group2.remove(*[dots[i] for i in [18,5,10,0,13]])
group1.add(*[dots[i] for i in [18,5,10,0,13]])
self.play(group1.animate.set_color(color.BLUE).shift(RIGHT * 0.125 * 11), group2.animate.set_color(color.RED).shift(LEFT * 0.125 * 5), t1.animate.shift(UP * 0.5), t2.animate.shift(DOWN * 0.5), t3.animate.shift(UP * 0.5), t4.animate.shift(DOWN * 0.5))
self.play(group1.animate.shift(DOWN * 0.25 * 2))
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
group1.remove(*[dots[i] for i in [18,5,10,0,13]])
group2.add(*[dots[i] for i in [18,5,10,0,13]])
group2.remove(*[dots[i] for i in [19,6,11,1,14,16,3,8]])
group1.add(*[dots[i] for i in [19,6,11,1,14,16,3,8]])
self.play(group1.animate.set_color(color.BLUE).shift(RIGHT * 0.125 * 18), group2.animate.set_color(color.RED).shift(LEFT * 0.125 * 8), t1.animate.shift(UP * 0.5), t2.animate.shift(DOWN * 0.5), t3.animate.shift(UP * 0.5), t4.animate.shift(DOWN * 0.5))
self.play(group1.animate.shift(DOWN * 0.25))
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
group1.remove(*[dots[i] for i in [19,6,11,1,14,16,3,8]])
group2.add(*[dots[i] for i in [19,6,11,1,14,16,3,8]])
self.play(group2.animate.set_color(color.RED), t1.animate.shift(UP * 0.5), t2.animate.shift(DOWN * 0.5), t3.animate.shift(UP * 0.5), t4.animate.shift(DOWN * 0.5))
loop = moveEquation(equations,loop,baseText,t1,t2,t3,t4,self)
self.play(FadeIn(name))
self.wait(11)
self.play(FadeIn(Square().scale(10).set_fill(color.BACKGROUND).set_opacity(1)))
class Decomp(Scene):
def construct(self):
fibo = fiboarray_extended(-18, 18)
self.camera.background_color = color.BACKGROUND
fibonacci = VGroup(*[Text(str(fibo[i])).set_color(BLACK) for i in range(0, 35)]).arrange(RIGHT * 4).move_to(UP * 2.5 + LEFT * 10)
baseText = Text('× + × = 21').scale(0.5).set_color(BLACK).move_to(RIGHT * 1.075)
decompView = Rectangle(color=color.YELLOW, width=3.5, height=4.5).move_to(ORIGIN)
t1 = VGroup()
t2 = VGroup()
t3 = VGroup()
t4 = VGroup()
for n in range(0,35):
t1.add(Text(str(fibo[-n + 7 + 1])).set_color(color.RED))
t2.add(Text(str(fibo[n+1])).set_color(color.RED))
t3.add(Text(str(fibo[-n + 7])).set_color(color.BLUE))
t4.add(Text(str(fibo[n])).set_color(color.BLUE))
t1.scale(0.5).arrange(DOWN).move_to(LEFT * (1.15))
t2.scale(0.5).arrange(DOWN).move_to(LEFT * (0.4))
t3.scale(0.5).arrange(DOWN).move_to(RIGHT * (0.4))
t4.scale(0.5).arrange(DOWN).move_to(RIGHT * (1.15))
numbers = VGroup(t1, t2, t3, t4)
numbers.shift(UP * 2.25)
numberhideboxes = VGroup(Square().scale(2).move_to(UP * (4)).set_fill(color.BACKGROUND, opacity=1).set_color(color.BACKGROUND), Square().scale(2).move_to(DOWN * 4).set_fill(color.BACKGROUND, opacity=1).set_color(color.BACKGROUND))
self.add(numbers, numberhideboxes, decompView)
self.wait(1.8)
self.play(numbers.animate.shift(DOWN), decompView.animate.shift(DOWN), numberhideboxes.animate.shift(DOWN), FadeIn(fibonacci))
self.wait(1)
self.play(fibonacci.animate.shift(RIGHT * 14), run_time=5)
self.wait(3)
self.play(FadeOut(fibonacci), numbers.animate.shift(UP * 0.75), decompView.animate.shift(UP).stretch_to_fit_height(6), numberhideboxes[0].animate.shift(UP * 2), Write(baseText))
self.wait(1)
for i in range(0,7):
self.play(numbers.animate.shift(UP * 0.5), run_time=0.75)
self.wait(2)
self.play(FadeIn(Square().scale(10).set_fill(color.BACKGROUND).set_opacity(1)))
| 50.482993
| 258
| 0.607667
|
00c9b8d879ebeab599baadb47a20cadf2a76340b
| 2,174
|
py
|
Python
|
tests/test_checker.py
|
migzpogi/PokerCalculator
|
3005e24552e465729f2aab7efea8bbbe831e736b
|
[
"MIT"
] | 4
|
2020-07-27T02:37:56.000Z
|
2021-05-27T08:33:01.000Z
|
tests/test_checker.py
|
migzpogi/PokerCalculator
|
3005e24552e465729f2aab7efea8bbbe831e736b
|
[
"MIT"
] | 1
|
2018-09-26T03:04:25.000Z
|
2018-09-26T03:30:43.000Z
|
tests/test_checker.py
|
migzpogi/PokerCalculator
|
3005e24552e465729f2aab7efea8bbbe831e736b
|
[
"MIT"
] | 2
|
2020-10-03T07:28:52.000Z
|
2021-11-16T14:36:16.000Z
|
import unittest
from lib.checkers import input_type_checker, card_checker, convert_case, is_list_unique
class TestCheckers(unittest.TestCase):
"""
Unit tests for checker methods
"""
def test_valid_input_true(self):
"""
Checks if the input is a list of len > 2 for board and len == 2 for hand
:return:
"""
board = ['As', 'Ac', 'Ad']
hand = ['Ah', 'Kd']
self.assertTrue(input_type_checker(board, hand))
def test_valid_input_false_board_wrong(self):
"""
Checks if the method can detect wrong input
:return:
"""
board = ['As', 'Ac']
hand = ['Ah', 'Kd']
self.assertFalse(input_type_checker(board, hand))
def test_valid_input_false_hand_wrong(self):
"""
Checks if the method can detect wrong input
:return:
"""
board = ['As', 'Ac', 'Ad']
hand = ['Ah']
self.assertFalse(input_type_checker(board, hand))
def test_convert_case(self):
"""
Checks is casing is properly converted
:return:
"""
board = ['aS', 'ad', 'Ac']
self.assertEqual(['As', 'Ad', 'Ac'],
convert_case(board))
def test_valid_card_true(self):
"""
Checks if valid cards are passed
:return:
"""
list_of_cards = ['aS', 'Ac']
self.assertTrue(card_checker(list_of_cards))
def test_valid_card_false(self):
"""
Checks if invalid cards are caught
:return:
"""
list_of_cards = ['Z3', 'Ax']
self.assertFalse(card_checker(list_of_cards))
def test_is_list_unique(self):
"""
Checks if the list is unique
:return:
"""
list_1 = [1,2,3,4,5]
list_2 = [1,2,2,3,3,4,5]
list_3 = ['Ac', 'Ah', 'Ad', 'As']
list_4 = ['Ac', 'Ac', 'Ah']
self.assertTrue(is_list_unique(list_1))
self.assertTrue(is_list_unique(list_3))
self.assertFalse(is_list_unique(list_2))
self.assertFalse(is_list_unique(list_4))
if __name__ == '__main__':
unittest.main()
| 23.376344
| 87
| 0.551518
|
319af097a45bd7932ffa38c9ab9991ba6a757dda
| 4,025
|
py
|
Python
|
tests/test_generic.py
|
vsaase/dicom2nifti
|
6722420a7673d36437e4358ce3cb2a7c77c91820
|
[
"MIT"
] | null | null | null |
tests/test_generic.py
|
vsaase/dicom2nifti
|
6722420a7673d36437e4358ce3cb2a7c77c91820
|
[
"MIT"
] | null | null | null |
tests/test_generic.py
|
vsaase/dicom2nifti
|
6722420a7673d36437e4358ce3cb2a7c77c91820
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
dicom2nifti
@author: abrys
"""
import os
import random
import shutil
import string
import tempfile
import unittest
import nibabel
import tests.test_data as test_data
import dicom2nifti.convert_generic as convert_generic
from dicom2nifti.common import read_dicom_directory
from dicom2nifti.compressed_dicom import is_dicom_file
import dicom2nifti.settings as settings
from dicom2nifti.exceptions import ConversionError
from tests.test_tools import assert_compare_nifti, ground_thruth_filenames
class TestConversionGeneric(unittest.TestCase):
def test_anatomical(self):
tmp_output_dir = tempfile.mkdtemp()
try:
results = convert_generic.dicom_to_nifti(read_dicom_directory(test_data.GE_ANATOMICAL),
None)
self.assertTrue(results.get('NII_FILE') is None)
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
results = convert_generic.dicom_to_nifti(read_dicom_directory(test_data.GE_ANATOMICAL),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.GE_ANATOMICAL)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
finally:
shutil.rmtree(tmp_output_dir)
@unittest.skip("Skip untill we figure out why it fails on circleci")
def test_inconsistent_slice_increment_resampling(self):
tmp_output_dir = tempfile.mkdtemp()
try:
settings.disable_validate_orthogonal()
settings.disable_validate_slice_increment()
settings.enable_resampling()
settings.set_resample_padding(0)
settings.set_resample_spline_interpolation_order(1)
results = convert_generic.dicom_to_nifti(read_dicom_directory(test_data.FAILING_SLICEINCREMENT_2),
os.path.join(tmp_output_dir, 'test.nii.gz'))
assert_compare_nifti(results['NII_FILE'],
ground_thruth_filenames(test_data.FAILING_SLICEINCREMENT_2)[0])
self.assertTrue(isinstance(results['NII'], nibabel.nifti1.Nifti1Image))
finally:
settings.disable_resampling()
settings.enable_validate_slice_increment()
settings.enable_validate_orientation()
shutil.rmtree(tmp_output_dir)
def test_not_a_volume(self):
tmp_output_dir = tempfile.mkdtemp()
try:
settings.disable_validate_orthogonal()
with self.assertRaises(ConversionError) as exception:
convert_generic.dicom_to_nifti(read_dicom_directory(test_data.FAILING_NOTAVOLUME),
os.path.join(tmp_output_dir, 'test.nii.gz'))
self.assertEqual(str(exception.exception),
'NOT_A_VOLUME')
finally:
settings.enable_validate_orthogonal()
shutil.rmtree(tmp_output_dir)
def test_is_dicom_file(self):
input_file = os.path.join(test_data.GENERIC_COMPRESSED, 'IM-0001-0001-0001.dcm')
assert is_dicom_file(input_file)
temporary_directory = tempfile.mkdtemp()
try:
# test for empty file
non_dicom1 = os.path.join(temporary_directory, 'non_dicom.dcm')
open(non_dicom1, 'a').close()
assert not is_dicom_file(non_dicom1)
# test for non empty file
non_dicom2 = os.path.join(temporary_directory, 'non_dicom2.dcm')
with open(non_dicom2, 'w') as file_2:
file_2.write(''.join(random.SystemRandom().choice(string.digits) for _ in range(300)))
assert not is_dicom_file(non_dicom2)
finally:
shutil.rmtree(temporary_directory)
if __name__ == '__main__':
unittest.main()
| 40.25
| 110
| 0.649938
|
1a68eccb312a1ae8df487ecfc214c379035f4a91
| 5,618
|
py
|
Python
|
homeassistant/components/iperf3/__init__.py
|
alemuro/home-assistant
|
9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90
|
[
"Apache-2.0"
] | 2
|
2019-10-19T15:07:32.000Z
|
2022-01-29T10:33:20.000Z
|
homeassistant/components/iperf3/__init__.py
|
alemuro/home-assistant
|
9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90
|
[
"Apache-2.0"
] | 4
|
2021-02-08T21:05:14.000Z
|
2021-09-08T02:57:03.000Z
|
homeassistant/components/iperf3/__init__.py
|
alemuro/home-assistant
|
9b1315d8e55f0ca906c4c8a1b2ae8c2ea511dc90
|
[
"Apache-2.0"
] | 2
|
2019-01-21T05:49:23.000Z
|
2019-02-19T16:30:48.000Z
|
"""Support for Iperf3 network measurement tool."""
import logging
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
CONF_PORT,
CONF_HOST,
CONF_PROTOCOL,
CONF_HOSTS,
CONF_SCAN_INTERVAL,
)
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
DOMAIN = "iperf3"
DATA_UPDATED = "{}_data_updated".format(DOMAIN)
_LOGGER = logging.getLogger(__name__)
CONF_DURATION = "duration"
CONF_PARALLEL = "parallel"
CONF_MANUAL = "manual"
DEFAULT_DURATION = 10
DEFAULT_PORT = 5201
DEFAULT_PARALLEL = 1
DEFAULT_PROTOCOL = "tcp"
DEFAULT_INTERVAL = timedelta(minutes=60)
ATTR_DOWNLOAD = "download"
ATTR_UPLOAD = "upload"
ATTR_VERSION = "Version"
ATTR_HOST = "host"
UNIT_OF_MEASUREMENT = "Mbit/s"
SENSOR_TYPES = {
ATTR_DOWNLOAD: [ATTR_DOWNLOAD.capitalize(), UNIT_OF_MEASUREMENT],
ATTR_UPLOAD: [ATTR_UPLOAD.capitalize(), UNIT_OF_MEASUREMENT],
}
PROTOCOLS = ["tcp", "udp"]
HOST_CONFIG_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_DURATION, default=DEFAULT_DURATION): vol.Range(5, 10),
vol.Optional(CONF_PARALLEL, default=DEFAULT_PARALLEL): vol.Range(1, 20),
vol.Optional(CONF_PROTOCOL, default=DEFAULT_PROTOCOL): vol.In(PROTOCOLS),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOSTS): vol.All(cv.ensure_list, [HOST_CONFIG_SCHEMA]),
vol.Optional(
CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)
): vol.All(cv.ensure_list, [vol.In(list(SENSOR_TYPES))]),
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_MANUAL, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_HOST, default=None): cv.string})
async def async_setup(hass, config):
"""Set up the iperf3 component."""
import iperf3
hass.data[DOMAIN] = {}
conf = config[DOMAIN]
for host in conf[CONF_HOSTS]:
host_name = host[CONF_HOST]
client = iperf3.Client()
client.duration = host[CONF_DURATION]
client.server_hostname = host_name
client.port = host[CONF_PORT]
client.num_streams = host[CONF_PARALLEL]
client.protocol = host[CONF_PROTOCOL]
client.verbose = False
data = hass.data[DOMAIN][host_name] = Iperf3Data(hass, client)
if not conf[CONF_MANUAL]:
async_track_time_interval(hass, data.update, conf[CONF_SCAN_INTERVAL])
def update(call):
"""Service call to manually update the data."""
called_host = call.data[ATTR_HOST]
if called_host in hass.data[DOMAIN]:
hass.data[DOMAIN][called_host].update()
else:
for iperf3_host in hass.data[DOMAIN].values():
iperf3_host.update()
hass.services.async_register(DOMAIN, "speedtest", update, schema=SERVICE_SCHEMA)
hass.async_create_task(
async_load_platform(
hass, SENSOR_DOMAIN, DOMAIN, conf[CONF_MONITORED_CONDITIONS], config
)
)
return True
class Iperf3Data:
"""Get the latest data from iperf3."""
def __init__(self, hass, client):
"""Initialize the data object."""
self._hass = hass
self._client = client
self.data = {ATTR_DOWNLOAD: None, ATTR_UPLOAD: None, ATTR_VERSION: None}
@property
def protocol(self):
"""Return the protocol used for this connection."""
return self._client.protocol
@property
def host(self):
"""Return the host connected to."""
return self._client.server_hostname
@property
def port(self):
"""Return the port on the host connected to."""
return self._client.port
def update(self, now=None):
"""Get the latest data from iperf3."""
if self.protocol == "udp":
# UDP only have 1 way attribute
result = self._run_test(ATTR_DOWNLOAD)
self.data[ATTR_DOWNLOAD] = self.data[ATTR_UPLOAD] = getattr(
result, "Mbps", None
)
self.data[ATTR_VERSION] = getattr(result, "version", None)
else:
result = self._run_test(ATTR_DOWNLOAD)
self.data[ATTR_DOWNLOAD] = getattr(result, "received_Mbps", None)
self.data[ATTR_VERSION] = getattr(result, "version", None)
self.data[ATTR_UPLOAD] = getattr(
self._run_test(ATTR_UPLOAD), "sent_Mbps", None
)
dispatcher_send(self._hass, DATA_UPDATED, self.host)
def _run_test(self, test_type):
"""Run and return the iperf3 data."""
self._client.reverse = test_type == ATTR_DOWNLOAD
try:
result = self._client.run()
except (AttributeError, OSError, ValueError) as error:
_LOGGER.error("Iperf3 error: %s", error)
return None
if result is not None and hasattr(result, "error") and result.error is not None:
_LOGGER.error("Iperf3 error: %s", result.error)
return None
return result
| 31.038674
| 88
| 0.649875
|
914e91e19ed4c4f7e5a9ccdf8b02b4d46aca28c7
| 1,089
|
py
|
Python
|
certificates/certificates/urls.py
|
iamsajjad/certificates
|
4c639e8da3a6f193ab6705b522d4d89b48b7c7d5
|
[
"MIT"
] | null | null | null |
certificates/certificates/urls.py
|
iamsajjad/certificates
|
4c639e8da3a6f193ab6705b522d4d89b48b7c7d5
|
[
"MIT"
] | null | null | null |
certificates/certificates/urls.py
|
iamsajjad/certificates
|
4c639e8da3a6f193ab6705b522d4d89b48b7c7d5
|
[
"MIT"
] | null | null | null |
"""certificates URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.conf.urls.static import static
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
#Author URLs
path('account/', include('author.urls')),
#Dashboard URLs "Home Page"
path('', include('dashboard.urls')),
#Students URLs "Home Page"
path('', include('graduates.urls')),
path('logs', include('logger.urls')),
]
| 35.129032
| 77
| 0.69697
|
22b405f3191ad6552fe07d5953c01d6408342dd7
| 8,361
|
py
|
Python
|
tests/integration/callbacks/test_layout_paths_with_callbacks.py
|
jackwiy/dash
|
5e406868be2ac17f129e61eb951a52b0bf290aca
|
[
"MIT"
] | 1
|
2020-03-20T21:44:44.000Z
|
2020-03-20T21:44:44.000Z
|
tests/integration/callbacks/test_layout_paths_with_callbacks.py
|
gwu1/dash
|
6d27808698b4be4d8c778291431f085ad4a19482
|
[
"MIT"
] | 1
|
2022-02-28T03:20:59.000Z
|
2022-02-28T03:20:59.000Z
|
tests/integration/callbacks/test_layout_paths_with_callbacks.py
|
gwu1/dash
|
6d27808698b4be4d8c778291431f085ad4a19482
|
[
"MIT"
] | null | null | null |
import os
import json
from multiprocessing import Value
import dash_core_components as dcc
import dash_html_components as html
from dash import Dash
from dash.dependencies import Input, Output
import dash.testing.wait as wait
def test_cblp001_radio_buttons_callbacks_generating_children(dash_duo):
TIMEOUT = 2
with open(os.path.join(os.path.dirname(__file__), "state_path.json")) as fp:
EXPECTED_PATHS = json.load(fp)
app = Dash(__name__)
app.layout = html.Div(
[
dcc.RadioItems(
options=[
{"label": "Chapter 1", "value": "chapter1"},
{"label": "Chapter 2", "value": "chapter2"},
{"label": "Chapter 3", "value": "chapter3"},
{"label": "Chapter 4", "value": "chapter4"},
{"label": "Chapter 5", "value": "chapter5"},
],
value="chapter1",
id="toc",
),
html.Div(id="body"),
]
)
for script in dcc._js_dist:
app.scripts.append_script(script)
chapters = {
"chapter1": html.Div(
[
html.H1("Chapter 1", id="chapter1-header"),
dcc.Dropdown(
options=[{"label": i, "value": i} for i in ["NYC", "MTL", "SF"]],
value="NYC",
id="chapter1-controls",
),
html.Label(id="chapter1-label"),
dcc.Graph(id="chapter1-graph"),
]
),
# Chapter 2 has the some of the same components in the same order
# as Chapter 1. This means that they won't get remounted
# unless they set their own keys are differently.
# Switching back and forth between 1 and 2 implicitly
# tests how components update when they aren't remounted.
"chapter2": html.Div(
[
html.H1("Chapter 2", id="chapter2-header"),
dcc.RadioItems(
options=[{"label": i, "value": i} for i in ["USA", "Canada"]],
value="USA",
id="chapter2-controls",
),
html.Label(id="chapter2-label"),
dcc.Graph(id="chapter2-graph"),
]
),
# Chapter 3 has a different layout and so the components
# should get rewritten
"chapter3": [
html.Div(
html.Div(
[
html.H3("Chapter 3", id="chapter3-header"),
html.Label(id="chapter3-label"),
dcc.Graph(id="chapter3-graph"),
dcc.RadioItems(
options=[
{"label": i, "value": i} for i in ["Summer", "Winter"]
],
value="Winter",
id="chapter3-controls",
),
]
)
)
],
# Chapter 4 doesn't have an object to recursively traverse
"chapter4": "Just a string",
}
call_counts = {
"body": Value("i", 0),
"chapter1-graph": Value("i", 0),
"chapter1-label": Value("i", 0),
"chapter2-graph": Value("i", 0),
"chapter2-label": Value("i", 0),
"chapter3-graph": Value("i", 0),
"chapter3-label": Value("i", 0),
}
@app.callback(Output("body", "children"), [Input("toc", "value")])
def display_chapter(toc_value):
call_counts["body"].value += 1
return chapters[toc_value]
app.config.suppress_callback_exceptions = True
def generate_graph_callback(counterId):
def callback(value):
call_counts[counterId].value += 1
return {
"data": [
{
"x": ["Call Counter"],
"y": [call_counts[counterId].value],
"type": "bar",
}
],
"layout": {"title": value},
}
return callback
def generate_label_callback(id_):
def update_label(value):
call_counts[id_].value += 1
return value
return update_label
for chapter in ["chapter1", "chapter2", "chapter3"]:
app.callback(
Output("{}-graph".format(chapter), "figure"),
[Input("{}-controls".format(chapter), "value")],
)(generate_graph_callback("{}-graph".format(chapter)))
app.callback(
Output("{}-label".format(chapter), "children"),
[Input("{}-controls".format(chapter), "value")],
)(generate_label_callback("{}-label".format(chapter)))
dash_duo.start_server(app)
def check_chapter(chapter):
dash_duo.wait_for_element("#{}-graph:not(.dash-graph--pending)".format(chapter))
for key in dash_duo.redux_state_paths:
assert dash_duo.find_elements(
"#{}".format(key)
), "each element should exist in the dom"
value = (
chapters[chapter][0]["{}-controls".format(chapter)].value
if chapter == "chapter3"
else chapters[chapter]["{}-controls".format(chapter)].value
)
# check the actual values
dash_duo.wait_for_text_to_equal("#{}-label".format(chapter), value)
wait.until(
lambda: (
dash_duo.driver.execute_script(
"return document."
'querySelector("#{}-graph:not(.dash-graph--pending) .js-plotly-plot").'.format(
chapter
)
+ "layout.title.text"
)
== value
),
TIMEOUT,
)
rqs = dash_duo.redux_state_rqs
assert rqs, "request queue is not empty"
assert all((rq["status"] == 200 and not rq["rejected"] for rq in rqs))
def check_call_counts(chapters, count):
for chapter in chapters:
assert call_counts[chapter + "-graph"].value == count
assert call_counts[chapter + "-label"].value == count
wait.until(lambda: call_counts["body"].value == 1, TIMEOUT)
wait.until(lambda: call_counts["chapter1-graph"].value == 1, TIMEOUT)
wait.until(lambda: call_counts["chapter1-label"].value == 1, TIMEOUT)
check_call_counts(("chapter2", "chapter3"), 0)
assert dash_duo.redux_state_paths == EXPECTED_PATHS["chapter1"]
check_chapter("chapter1")
dash_duo.percy_snapshot(name="chapter-1")
dash_duo.find_elements('input[type="radio"]')[1].click() # switch chapters
wait.until(lambda: call_counts["body"].value == 2, TIMEOUT)
wait.until(lambda: call_counts["chapter2-graph"].value == 1, TIMEOUT)
wait.until(lambda: call_counts["chapter2-label"].value == 1, TIMEOUT)
check_call_counts(("chapter1",), 1)
assert dash_duo.redux_state_paths == EXPECTED_PATHS["chapter2"]
check_chapter("chapter2")
dash_duo.percy_snapshot(name="chapter-2")
# switch to 3
dash_duo.find_elements('input[type="radio"]')[2].click()
wait.until(lambda: call_counts["body"].value == 3, TIMEOUT)
wait.until(lambda: call_counts["chapter3-graph"].value == 1, TIMEOUT)
wait.until(lambda: call_counts["chapter3-label"].value == 1, TIMEOUT)
check_call_counts(("chapter2", "chapter1"), 1)
assert dash_duo.redux_state_paths == EXPECTED_PATHS["chapter3"]
check_chapter("chapter3")
dash_duo.percy_snapshot(name="chapter-3")
dash_duo.find_elements('input[type="radio"]')[3].click() # switch to 4
dash_duo.wait_for_text_to_equal("#body", "Just a string")
dash_duo.percy_snapshot(name="chapter-4")
for key in dash_duo.redux_state_paths:
assert dash_duo.find_elements(
"#{}".format(key)
), "each element should exist in the dom"
assert dash_duo.redux_state_paths == {
"toc": ["props", "children", 0],
"body": ["props", "children", 1],
}
dash_duo.find_elements('input[type="radio"]')[0].click()
wait.until(
lambda: dash_duo.redux_state_paths == EXPECTED_PATHS["chapter1"], TIMEOUT,
)
check_chapter("chapter1")
dash_duo.percy_snapshot(name="chapter-1-again")
| 35.578723
| 99
| 0.537256
|
b23007b7d9dcd57d447fd479ffb2e409e227d168
| 5,926
|
py
|
Python
|
yara/yarascan/yarascan.py
|
malvidin/stoq-plugins-public
|
8aaf3b97dc3972ca852d2a73a7899afa7394f9bb
|
[
"Apache-2.0"
] | null | null | null |
yara/yarascan/yarascan.py
|
malvidin/stoq-plugins-public
|
8aaf3b97dc3972ca852d2a73a7899afa7394f9bb
|
[
"Apache-2.0"
] | null | null | null |
yara/yarascan/yarascan.py
|
malvidin/stoq-plugins-public
|
8aaf3b97dc3972ca852d2a73a7899afa7394f9bb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2014-present PUNCH Cyber Analytics Group
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Overview
========
Process a payload using yara
"""
import os
import yara
from pathlib import Path
from inspect import currentframe, getframeinfo
from typing import Dict, Generator, List, Optional
from stoq.helpers import StoqConfigParser
from stoq.exceptions import StoqPluginException
from stoq.plugins import WorkerPlugin, DispatcherPlugin
from stoq import Payload, Request, WorkerResponse, DispatcherResponse
class YaraPlugin(WorkerPlugin, DispatcherPlugin):
def __init__(self, config: StoqConfigParser) -> None:
super().__init__(config)
self.dispatch_rules = None
self.worker_rules = None
filename = getframeinfo(currentframe()).filename # type: ignore
parent = Path(filename).resolve().parent
self.timeout = config.getint('options', 'timeout', fallback=60)
self.strings_limit = config.getint('options', 'strings_limit', fallback=None)
self.xor_first_match = config.getboolean('options', 'xor_first_match', fallback=True)
dispatch_ruleset = config.get(
'options', 'dispatch_rules', fallback='rules/dispatcher.yar'
)
if dispatch_ruleset:
if not os.path.isabs(dispatch_ruleset):
dispatch_ruleset = os.path.join(parent, dispatch_ruleset)
self.dispatch_rules = self._compile_rules(dispatch_ruleset)
worker_ruleset = config.get(
'options', 'worker_rules', fallback='rules/stoq.yar'
)
if worker_ruleset:
if not os.path.isabs(worker_ruleset):
worker_ruleset = os.path.join(parent, worker_ruleset)
self.worker_rules = self._compile_rules(worker_ruleset)
async def scan(self, payload: Payload, request: Request) -> WorkerResponse:
results = {
'matches': [
m for m in self._yara_matches(payload.content, self.worker_rules)
]
}
return WorkerResponse(results=results)
async def get_dispatches(
self, payload: Payload, request: Request
) -> DispatcherResponse:
dr = DispatcherResponse()
for match in self._yara_matches(payload.content, self.dispatch_rules):
if match['meta'].get('save', '').lower().strip() == 'false':
payload.results.payload_meta.should_archive = False
plugin_names = self._extract_plugin_names(match)
if 'xor' in plugin_names:
self._plugin_xor_extract_key(match)
for name in plugin_names:
dr.plugin_names.append(name)
dr.meta[name] = match
return dr
def _compile_rules(self, filepath: str) -> yara:
filepath = os.path.realpath(filepath)
if not os.path.isfile(filepath):
raise StoqPluginException(
f'Nonexistent yara rules file provided: {filepath}'
)
else:
return yara.compile(filepath=filepath)
def _yara_matches(self, content: bytes, rules: yara) -> Generator[Dict, None, None]:
matches = rules.match(data=content, timeout=self.timeout)
for match in matches:
yield {
'tags': match.tags,
'namespace': match.namespace,
'rule': match.rule,
'meta': match.meta,
'strings': match.strings[: self.strings_limit],
}
def _extract_plugin_names(self, match: dict) -> set:
plugin_names = set()
if 'meta' in match:
plugin_str = match['meta'].get('plugin', '').lower().strip()
plugin_names.update({p.strip() for p in plugin_str.split(',') if p.strip()})
return plugin_names
def _plugin_xor_extract_key(self, match: dict) -> None:
# Extract XOR key using plaintext in metadata against strings, see YARA issue #1242 for known issues
if 'strings' not in match or 'meta' not in match:
return
xor_pt_prefix = 'xor_plaintext_for_string_'
xor_info = []
xor_pt = {'$' + k[len(xor_pt_prefix):]: v for k, v in match['meta'].items() if k.startswith(xor_pt_prefix) and v}
if xor_pt:
for offset, label, match_bytes in match['strings']:
if label not in xor_pt:
continue
xor_pt_bytes = bytes(xor_pt[label], 'utf8')
if len(xor_pt_bytes) != len(match_bytes):
continue
key = self._xor_extract_key(match_bytes, xor_pt_bytes)
if key and self.xor_first_match:
xorkey = key[0] if len(key) == 1 else bytes(key)
match['meta']['xorkey'] = repr(xorkey)
return
elif key:
xor_info.append((offset, label, key))
if xor_info:
match['meta']['xor_info'] = repr(xor_info)
def _xor_extract_key(self, ct_bytes, pt_bytes) -> bytes:
key_list = bytearray(a ^ b for (a, b) in zip(pt_bytes, ct_bytes))
keys_len = len(key_list)
for i in range(1, keys_len):
sub_key = key_list[:i]
overlap_key = sub_key * (1 + keys_len // i)
if overlap_key[:keys_len] == key_list:
key = bytes(sub_key)
return key
| 40.312925
| 121
| 0.616774
|
9f0a19d4a94e151885b91026b57fc94e6f53b499
| 893
|
py
|
Python
|
jelly_display_2/display_collected_values_search.py
|
chalbersma/manowar
|
023a696f7ea0458e1c2ae9a18e40a9d09e824cc4
|
[
"BSD-2-Clause"
] | 3
|
2019-02-16T03:14:11.000Z
|
2020-05-28T23:14:23.000Z
|
jelly_display_2/display_collected_values_search.py
|
chalbersma/manowar
|
023a696f7ea0458e1c2ae9a18e40a9d09e824cc4
|
[
"BSD-2-Clause"
] | 4
|
2018-08-09T22:39:59.000Z
|
2020-02-12T00:36:47.000Z
|
jelly_display_2/display_collected_values_search.py
|
chalbersma/manowar
|
023a696f7ea0458e1c2ae9a18e40a9d09e824cc4
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
'''
Copyright 2018, VDMS
Licensed under the terms of the BSD 2-clause license. See LICENSE file for terms.
'''
import json
import ast
import requests
from flask import current_app, Blueprint, g, request, jsonify, render_template
display_collected_values_search = Blueprint(
'display2_collected_values_search', __name__)
@display_collected_values_search.route("/collected/values_search")
@display_collected_values_search.route("/collected/values_search/")
def display2_collected_values_search(ctype="Insert type", csubtype="Insert Subtype"):
'''
Return the Collected Values Search Form
'''
if "ctype" in request.args:
ctype = request.args["ctype"]
if "csubtype" in request.args:
csubtype = request.args["csubtype"]
return render_template('display_V2/collected_values_search.html', ctype=ctype, csubtype=csubtype)
| 27.060606
| 101
| 0.75252
|
b81c1ea16d8da7c1f1f1d99a5520a9b382d1708c
| 5,664
|
py
|
Python
|
evidently/widgets/prob_class_prod_quality_metrics_widget.py
|
jayeshmalu/evidently
|
789f9a04827d166369d965eacbd11306eac6b961
|
[
"Apache-2.0"
] | 1
|
2021-05-08T01:58:08.000Z
|
2021-05-08T01:58:08.000Z
|
evidently/widgets/prob_class_prod_quality_metrics_widget.py
|
felipeescallon/evidently
|
f6243973998a74e3bdbfe891b02bccc35888e349
|
[
"Apache-2.0"
] | null | null | null |
evidently/widgets/prob_class_prod_quality_metrics_widget.py
|
felipeescallon/evidently
|
f6243973998a74e3bdbfe891b02bccc35888e349
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
import json
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
import math
from scipy.stats import ks_2samp
from sklearn import metrics, preprocessing
from evidently.model.widget import BaseWidgetInfo, AlertStats, AdditionalGraphInfo
from evidently.widgets.widget import Widget
red = "#ed0400"
grey = "#4d4d4d"
class ProbClassProdQualityMetricsWidget(Widget):
def __init__(self, title: str):
super().__init__()
self.title = title
def get_info(self) -> BaseWidgetInfo:
#if self.wi:
return self.wi
#raise ValueError("No reference data with target and prediction provided")
def calculate(self, reference_data: pd.DataFrame, production_data: pd.DataFrame, column_mapping):
if column_mapping:
date_column = column_mapping.get('datetime')
id_column = column_mapping.get('id')
target_column = column_mapping.get('target')
prediction_column = column_mapping.get('prediction')
num_feature_names = column_mapping.get('numerical_features')
if num_feature_names is None:
num_feature_names = []
else:
num_feature_names = [name for name in num_feature_names if is_numeric_dtype(reference_data[name])]
cat_feature_names = column_mapping.get('categorical_features')
if cat_feature_names is None:
cat_feature_names = []
else:
cat_feature_names = [name for name in cat_feature_names if is_numeric_dtype(reference_data[name])]
else:
date_column = 'datetime' if 'datetime' in reference_data.columns else None
id_column = None
target_column = 'target' if 'target' in reference_data.columns else None
prediction_column = 'prediction' if 'prediction' in reference_data.columns else None
utility_columns = [date_column, id_column, target_column, prediction_column]
num_feature_names = list(set(reference_data.select_dtypes([np.number]).columns) - set(utility_columns))
cat_feature_names = list(set(reference_data.select_dtypes([np.object]).columns) - set(utility_columns))
if production_data is not None:
if target_column is not None and prediction_column is not None:
production_data.replace([np.inf, -np.inf], np.nan, inplace=True)
production_data.dropna(axis=0, how='any', inplace=True)
binaraizer = preprocessing.LabelBinarizer()
binaraizer.fit(reference_data[target_column])
binaraized_target = binaraizer.transform(production_data[target_column])
array_prediction = production_data[prediction_column].to_numpy()
prediction_ids = np.argmax(array_prediction, axis=-1)
prediction_labels = [prediction_column[x] for x in prediction_ids]
#calculate quality metrics
if len(prediction_column) > 2:
roc_auc = metrics.roc_auc_score(binaraized_target, array_prediction, average='macro')
log_loss = metrics.log_loss(binaraized_target, array_prediction)
else:
roc_auc = metrics.roc_auc_score(binaraized_target, production_data[prediction_column[0]]) #problem!!!
log_loss = metrics.log_loss(binaraized_target, production_data[prediction_column[0]]) #problem!!!
accuracy_score = metrics.accuracy_score(production_data[target_column], prediction_labels)
avg_precision = metrics.precision_score(production_data[target_column], prediction_labels,
average='macro')
avg_recall = metrics.recall_score(production_data[target_column], prediction_labels,
average='macro')
avg_f1 = metrics.f1_score(production_data[target_column], prediction_labels,
average='macro')
self.wi = BaseWidgetInfo(
title=self.title,
type="counter",
details="",
alertStats=AlertStats(),
alerts=[],
alertsPosition="row",
insights=[],
size=2,
params={
"counters": [
{
"value": str(round(accuracy_score, 3)),
"label": "Accuracy"
},
{
"value": str(round(avg_precision, 3)),
"label": "Precision"
},
{
"value": str(round(avg_recall, 3)),
"label": "Recall"
},
{
"value": str(round(avg_f1, 3)),
"label": "F1"
},
{
"value": str(round(roc_auc, 3)),
"label": "ROC AUC"
},
{
"value": str(round(log_loss, 3)),
"label": "LogLoss"
}
]
},
additionalGraphs=[],
)
else:
self.wi = None
else:
self.wi = None
| 42.586466
| 121
| 0.543256
|
9a4a8be3da312ad182fc81ca0a9002ca69a75d31
| 189
|
py
|
Python
|
03/03_P7.py
|
endowp/Python101
|
9c29387f4ed53d10579613ecf5153b71abf7ccd7
|
[
"MIT"
] | null | null | null |
03/03_P7.py
|
endowp/Python101
|
9c29387f4ed53d10579613ecf5153b71abf7ccd7
|
[
"MIT"
] | null | null | null |
03/03_P7.py
|
endowp/Python101
|
9c29387f4ed53d10579613ecf5153b71abf7ccd7
|
[
"MIT"
] | null | null | null |
import math
x=float(input())
cosxn,cosx=1,0.0
k=0
while (cosxn>=10**-8)or(-1*cosxn>=10**-8):
cosxn=(((-1)**k)*(x**(2*k)))/(math.factorial(2*k))
cosx+=cosxn
k+=1
print(cosx,k-2)
| 18.9
| 54
| 0.560847
|
cf4c0e900802a3576843f29c51ce61c009a8e4c9
| 589
|
py
|
Python
|
examples/python.py
|
charles-l/pyinfra
|
1992d98ff31d41404427dbb3cc6095a7bebd4052
|
[
"MIT"
] | 1
|
2020-12-24T08:24:13.000Z
|
2020-12-24T08:24:13.000Z
|
examples/python.py
|
charles-l/pyinfra
|
1992d98ff31d41404427dbb3cc6095a7bebd4052
|
[
"MIT"
] | null | null | null |
examples/python.py
|
charles-l/pyinfra
|
1992d98ff31d41404427dbb3cc6095a7bebd4052
|
[
"MIT"
] | 1
|
2021-11-12T18:36:01.000Z
|
2021-11-12T18:36:01.000Z
|
from pyinfra.operations import python
# Tip: Can run try it out using: 'pyinfra @docker/python python.py'
SUDO = True
def my_callback(state, host, hello=None):
command = 'echo hello'
if hello:
command = command + ' ' + hello
status, stdout, stderr = host.run_shell_command(state, command=command, sudo=SUDO)
assert status is True # ensure the command executed OK
if 'hello ' not in str(stdout):
raise Exception('`{}` problem with callback stdout:{} stderr:{}'.format(
command, stdout, stderr))
python.call(my_callback, hello='world')
| 29.45
| 86
| 0.672326
|
6ad70a7abb52c32e5347837aa91d1cbc208f4692
| 310
|
py
|
Python
|
Problem5_10/summation_of_primes.py
|
Vaibhavi1707/Project-Euler
|
3f625c69e4289fb9f09a4d4ef3b2618ebf4c2777
|
[
"MIT"
] | 2
|
2021-05-29T16:59:30.000Z
|
2021-11-26T17:30:33.000Z
|
Problem5_10/summation_of_primes.py
|
Vaibhavi1707/Project-Euler
|
3f625c69e4289fb9f09a4d4ef3b2618ebf4c2777
|
[
"MIT"
] | null | null | null |
Problem5_10/summation_of_primes.py
|
Vaibhavi1707/Project-Euler
|
3f625c69e4289fb9f09a4d4ef3b2618ebf4c2777
|
[
"MIT"
] | 5
|
2021-05-19T13:16:21.000Z
|
2021-05-21T11:48:20.000Z
|
#Project euler problem 10
#Problem link https://projecteuler.net/problem=10
def sumPrimes(n):
sum, sieve = 0, [True] * n
for p in range(2, n):
if sieve[p]:
sum += p
for i in range(p * p, n, p):
sieve[i] = False
return sum
print(sumPrimes(2000000))
| 22.142857
| 49
| 0.545161
|
f0bfe91d4d5cb8eecf5059e0a7620678d8f21d9c
| 1,767
|
py
|
Python
|
tests/unit/test_client.py
|
zakiharis/sam-simple-chat
|
729f1d3935d480efcad52b60e3b4ff4bed3f8c12
|
[
"Unlicense"
] | 1
|
2021-07-28T06:53:12.000Z
|
2021-07-28T06:53:12.000Z
|
tests/unit/test_client.py
|
zakiharis/sam-simple-chat
|
729f1d3935d480efcad52b60e3b4ff4bed3f8c12
|
[
"Unlicense"
] | null | null | null |
tests/unit/test_client.py
|
zakiharis/sam-simple-chat
|
729f1d3935d480efcad52b60e3b4ff4bed3f8c12
|
[
"Unlicense"
] | null | null | null |
import client
import termcolor
import websocket
from click.testing import CliRunner
def test_on_message(mocker, monkeypatch):
def mock_colored(msg, color):
assert msg == 'hello world...'
assert color == 'green'
return None
monkeypatch.setattr(termcolor, 'colored', mock_colored)
message = 'hello world...'
client.on_message(mocker, message)
def test_on_error(mocker, monkeypatch):
def mock_colored(msg, color):
assert msg == 'error found'
assert color == 'red'
return None
monkeypatch.setattr(termcolor, 'colored', mock_colored)
message = 'error found'
client.on_error(mocker, message)
def test_on_close(monkeypatch):
def mock_colored(msg, color):
assert msg == 'Bye! See you again.'
assert color == 'blue'
return None
def mock_close():
return None
class Struct(object):
pass
mocker = Struct()
mocker.close = Struct()
monkeypatch.setattr(termcolor, 'colored', mock_colored)
monkeypatch.setattr(mocker, 'close', mock_close)
client.on_close(mocker)
def test_main(monkeypatch):
runner = CliRunner()
def mock_web_socket_app(ws_url, on_message, on_error, on_close):
assert ws_url == 'wss://testdomain/test?username=Test User'
class Struct(object):
def run_forever(self):
pass
mocker = Struct()
mocker.on_open = None
return mocker
monkeypatch.setattr(websocket, 'WebSocketApp', mock_web_socket_app)
mock_ws_server_url = 'wss://testdomain/test'
mock_username = 'Test User'
result = runner.invoke(client.main, ['--server-url', mock_ws_server_url, '--username', mock_username])
assert result.exit_code == 0
| 24.887324
| 106
| 0.657612
|
9cf2371d09d9f07a15abd2f8c2b022752242bf7f
| 21,660
|
py
|
Python
|
vnpy/gateway/binance/binance_gateway.py
|
whypro/vnpy
|
2403975311a0f0665e82c5f0ecb9fb1c455877a9
|
[
"MIT"
] | 4
|
2020-04-01T18:46:56.000Z
|
2021-08-29T09:45:47.000Z
|
vnpy/gateway/binance/binance_gateway.py
|
whypro/vnpy
|
2403975311a0f0665e82c5f0ecb9fb1c455877a9
|
[
"MIT"
] | 2
|
2019-08-04T01:45:37.000Z
|
2019-08-04T01:48:18.000Z
|
vnpy/gateway/binance/binance_gateway.py
|
whypro/vnpy
|
2403975311a0f0665e82c5f0ecb9fb1c455877a9
|
[
"MIT"
] | 1
|
2020-04-12T08:56:56.000Z
|
2020-04-12T08:56:56.000Z
|
"""
Gateway for Binance Crypto Exchange.
"""
import urllib
import hashlib
import hmac
import time
from copy import copy
from datetime import datetime, timedelta
from enum import Enum
from threading import Lock
from vnpy.api.rest import RestClient, Request
from vnpy.api.websocket import WebsocketClient
from vnpy.trader.constant import (
Direction,
Exchange,
Product,
Status,
OrderType,
Interval
)
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.object import (
TickData,
OrderData,
TradeData,
AccountData,
ContractData,
BarData,
OrderRequest,
CancelRequest,
SubscribeRequest,
HistoryRequest
)
from vnpy.trader.event import EVENT_TIMER
from vnpy.event import Event
REST_HOST = "https://www.binance.com"
WEBSOCKET_TRADE_HOST = "wss://stream.binance.com:9443/ws/"
WEBSOCKET_DATA_HOST = "wss://stream.binance.com:9443/stream?streams="
STATUS_BINANCE2VT = {
"NEW": Status.NOTTRADED,
"PARTIALLY_FILLED": Status.PARTTRADED,
"FILLED": Status.ALLTRADED,
"CANCELED": Status.CANCELLED,
"REJECTED": Status.REJECTED
}
ORDERTYPE_VT2BINANCE = {
OrderType.LIMIT: "LIMIT",
OrderType.MARKET: "MARKET"
}
ORDERTYPE_BINANCE2VT = {v: k for k, v in ORDERTYPE_VT2BINANCE.items()}
DIRECTION_VT2BINANCE = {
Direction.LONG: "BUY",
Direction.SHORT: "SELL"
}
DIRECTION_BINANCE2VT = {v: k for k, v in DIRECTION_VT2BINANCE.items()}
INTERVAL_VT2BINANCE = {
Interval.MINUTE: "1m",
Interval.HOUR: "1h",
Interval.DAILY: "1d",
}
TIMEDELTA_MAP = {
Interval.MINUTE: timedelta(minutes=1),
Interval.HOUR: timedelta(hours=1),
Interval.DAILY: timedelta(days=1),
}
class Security(Enum):
NONE = 0
SIGNED = 1
API_KEY = 2
symbol_name_map = {}
class BinanceGateway(BaseGateway):
"""
VN Trader Gateway for Binance connection.
"""
default_setting = {
"key": "",
"secret": "",
"session_number": 3,
"proxy_host": "",
"proxy_port": 0,
}
exchanges = [Exchange.BINANCE]
def __init__(self, event_engine):
"""Constructor"""
super().__init__(event_engine, "BINANCE")
self.trade_ws_api = BinanceTradeWebsocketApi(self)
self.market_ws_api = BinanceDataWebsocketApi(self)
self.rest_api = BinanceRestApi(self)
def connect(self, setting: dict):
""""""
key = setting["key"]
secret = setting["secret"]
session_number = setting["session_number"]
proxy_host = setting["proxy_host"]
proxy_port = setting["proxy_port"]
self.rest_api.connect(key, secret, session_number,
proxy_host, proxy_port)
self.market_ws_api.connect(proxy_host, proxy_port)
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def subscribe(self, req: SubscribeRequest):
""""""
self.market_ws_api.subscribe(req)
def send_order(self, req: OrderRequest):
""""""
return self.rest_api.send_order(req)
def cancel_order(self, req: CancelRequest):
""""""
self.rest_api.cancel_order(req)
def query_account(self):
""""""
pass
def query_position(self):
""""""
pass
def query_history(self, req: HistoryRequest):
""""""
return self.rest_api.query_history(req)
def close(self):
""""""
self.rest_api.stop()
self.trade_ws_api.stop()
self.market_ws_api.stop()
def process_timer_event(self, event: Event):
""""""
self.rest_api.keep_user_stream()
class BinanceRestApi(RestClient):
"""
BINANCE REST API
"""
def __init__(self, gateway: BinanceGateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.trade_ws_api = self.gateway.trade_ws_api
self.key = ""
self.secret = ""
self.user_stream_key = ""
self.keep_alive_count = 0
self.recv_window = 5000
self.time_offset = 0
self.order_count = 1_000_000
self.order_count_lock = Lock()
self.connect_time = 0
def sign(self, request):
"""
Generate BINANCE signature.
"""
security = request.data["security"]
if security == Security.NONE:
request.data = None
return request
if request.params:
path = request.path + "?" + urllib.parse.urlencode(request.params)
else:
request.params = dict()
path = request.path
if security == Security.SIGNED:
timestamp = int(time.time() * 1000)
if self.time_offset > 0:
timestamp -= abs(self.time_offset)
elif self.time_offset < 0:
timestamp += abs(self.time_offset)
request.params["timestamp"] = timestamp
query = urllib.parse.urlencode(sorted(request.params.items()))
signature = hmac.new(self.secret, query.encode(
"utf-8"), hashlib.sha256).hexdigest()
query += "&signature={}".format(signature)
path = request.path + "?" + query
request.path = path
request.params = {}
request.data = {}
# Add headers
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "application/json",
"X-MBX-APIKEY": self.key
}
if security in [Security.SIGNED, Security.API_KEY]:
request.headers = headers
return request
def connect(
self,
key: str,
secret: str,
session_number: int,
proxy_host: str,
proxy_port: int
):
"""
Initialize connection to REST server.
"""
self.key = key
self.secret = secret.encode()
self.proxy_port = proxy_port
self.proxy_host = proxy_host
self.connect_time = (
int(datetime.now().strftime("%y%m%d%H%M%S")) * self.order_count
)
self.init(REST_HOST, proxy_host, proxy_port)
self.start(session_number)
self.gateway.write_log("REST API启动成功")
self.query_time()
self.query_account()
self.query_order()
self.query_contract()
self.start_user_stream()
def query_time(self):
""""""
data = {
"security": Security.NONE
}
path = "/api/v1/time"
return self.add_request(
"GET",
path,
callback=self.on_query_time,
data=data
)
def query_account(self):
""""""
data = {"security": Security.SIGNED}
self.add_request(
method="GET",
path="/api/v3/account",
callback=self.on_query_account,
data=data
)
def query_order(self):
""""""
data = {"security": Security.SIGNED}
self.add_request(
method="GET",
path="/api/v3/openOrders",
callback=self.on_query_order,
data=data
)
def query_contract(self):
""""""
data = {
"security": Security.NONE
}
self.add_request(
method="GET",
path="/api/v1/exchangeInfo",
callback=self.on_query_contract,
data=data
)
def _new_order_id(self):
""""""
with self.order_count_lock:
self.order_count += 1
return self.order_count
def send_order(self, req: OrderRequest):
""""""
orderid = str(self.connect_time + self._new_order_id())
order = req.create_order_data(
orderid,
self.gateway_name
)
self.gateway.on_order(order)
data = {
"security": Security.SIGNED
}
params = {
"symbol": req.symbol,
"timeInForce": "GTC",
"side": DIRECTION_VT2BINANCE[req.direction],
"type": ORDERTYPE_VT2BINANCE[req.type],
"price": str(req.price),
"quantity": str(req.volume),
"newClientOrderId": orderid,
"newOrderRespType": "ACK"
}
self.add_request(
method="POST",
path="/api/v3/order",
callback=self.on_send_order,
data=data,
params=params,
extra=order,
on_error=self.on_send_order_error,
on_failed=self.on_send_order_failed
)
return order.vt_orderid
def cancel_order(self, req: CancelRequest):
""""""
data = {
"security": Security.SIGNED
}
params = {
"symbol": req.symbol,
"origClientOrderId": req.orderid
}
self.add_request(
method="DELETE",
path="/api/v3/order",
callback=self.on_cancel_order,
params=params,
data=data,
extra=req
)
def start_user_stream(self):
""""""
data = {
"security": Security.API_KEY
}
self.add_request(
method="POST",
path="/api/v1/userDataStream",
callback=self.on_start_user_stream,
data=data
)
def keep_user_stream(self):
""""""
self.keep_alive_count += 1
if self.keep_alive_count < 600:
return
self.keep_alive_count = 0
data = {
"security": Security.API_KEY
}
params = {
"listenKey": self.user_stream_key
}
self.add_request(
method="PUT",
path="/api/v1/userDataStream",
callback=self.on_keep_user_stream,
params=params,
data=data
)
def on_query_time(self, data, request):
""""""
local_time = int(time.time() * 1000)
server_time = int(data["serverTime"])
self.time_offset = local_time - server_time
def on_query_account(self, data, request):
""""""
for account_data in data["balances"]:
account = AccountData(
accountid=account_data["asset"],
balance=float(account_data["free"]) + float(account_data["locked"]),
frozen=float(account_data["locked"]),
gateway_name=self.gateway_name
)
if account.balance:
self.gateway.on_account(account)
self.gateway.write_log("账户资金查询成功")
def on_query_order(self, data, request):
""""""
for d in data:
dt = datetime.fromtimestamp(d["time"] / 1000)
time = dt.strftime("%Y-%m-%d %H:%M:%S")
order = OrderData(
orderid=d["clientOrderId"],
symbol=d["symbol"],
exchange=Exchange.BINANCE,
price=float(d["price"]),
volume=float(d["origQty"]),
type=ORDERTYPE_BINANCE2VT[d["type"]],
direction=DIRECTION_BINANCE2VT[d["side"]],
traded=float(d["executedQty"]),
status=STATUS_BINANCE2VT.get(d["status"], None),
time=time,
gateway_name=self.gateway_name,
)
self.gateway.on_order(order)
self.gateway.write_log("委托信息查询成功")
def on_query_contract(self, data, request):
""""""
for d in data["symbols"]:
base_currency = d["baseAsset"]
quote_currency = d["quoteAsset"]
name = f"{base_currency.upper()}/{quote_currency.upper()}"
pricetick = 1
min_volume = 1
for f in d["filters"]:
if f["filterType"] == "PRICE_FILTER":
pricetick = float(f["tickSize"])
elif f["filterType"] == "LOT_SIZE":
min_volume = float(f["stepSize"])
contract = ContractData(
symbol=d["symbol"],
exchange=Exchange.BINANCE,
name=name,
pricetick=pricetick,
size=1,
min_volume=min_volume,
product=Product.SPOT,
history_data=True,
gateway_name=self.gateway_name,
)
self.gateway.on_contract(contract)
symbol_name_map[contract.symbol] = contract.name
self.gateway.write_log("合约信息查询成功")
def on_send_order(self, data, request):
""""""
pass
def on_send_order_failed(self, status_code: str, request: Request):
"""
Callback when sending order failed on server.
"""
order = request.extra
order.status = Status.REJECTED
self.gateway.on_order(order)
msg = f"委托失败,状态码:{status_code},信息:{request.response.text}"
self.gateway.write_log(msg)
def on_send_order_error(
self, exception_type: type, exception_value: Exception, tb, request: Request
):
"""
Callback when sending order caused exception.
"""
order = request.extra
order.status = Status.REJECTED
self.gateway.on_order(order)
# Record exception if not ConnectionError
if not issubclass(exception_type, ConnectionError):
self.on_error(exception_type, exception_value, tb, request)
def on_cancel_order(self, data, request):
""""""
pass
def on_start_user_stream(self, data, request):
""""""
self.user_stream_key = data["listenKey"]
self.keep_alive_count = 0
url = WEBSOCKET_TRADE_HOST + self.user_stream_key
self.trade_ws_api.connect(url, self.proxy_host, self.proxy_port)
def on_keep_user_stream(self, data, request):
""""""
pass
def query_history(self, req: HistoryRequest):
""""""
history = []
limit = 1000
start_time = int(datetime.timestamp(req.start))
while True:
# Create query params
params = {
"symbol": req.symbol,
"interval": INTERVAL_VT2BINANCE[req.interval],
"limit": limit,
"startTime": start_time * 1000, # convert to millisecond
}
# Add end time if specified
if req.end:
end_time = int(datetime.timestamp(req.end))
params["endTime"] = end_time * 1000 # convert to millisecond
# Get response from server
resp = self.request(
"GET",
"/api/v1/klines",
data={"security": Security.NONE},
params=params
)
# Break if request failed with other status code
if resp.status_code // 100 != 2:
msg = f"获取历史数据失败,状态码:{resp.status_code},信息:{resp.text}"
self.gateway.write_log(msg)
break
else:
data = resp.json()
if not data:
msg = f"获取历史数据为空,开始时间:{start_time}"
self.gateway.write_log(msg)
break
buf = []
for l in data:
dt = datetime.fromtimestamp(l[0] / 1000) # convert to second
bar = BarData(
symbol=req.symbol,
exchange=req.exchange,
datetime=dt,
interval=req.interval,
volume=float(l[5]),
open_price=float(l[1]),
high_price=float(l[2]),
low_price=float(l[3]),
close_price=float(l[4]),
gateway_name=self.gateway_name
)
buf.append(bar)
history.extend(buf)
begin = buf[0].datetime
end = buf[-1].datetime
msg = f"获取历史数据成功,{req.symbol} - {req.interval.value},{begin} - {end}"
self.gateway.write_log(msg)
# Break if total data count less than limit (latest date collected)
if len(data) < limit:
break
# Update start time
start_dt = bar.datetime + TIMEDELTA_MAP[req.interval]
start_time = int(datetime.timestamp(start_dt))
return history
class BinanceTradeWebsocketApi(WebsocketClient):
""""""
def __init__(self, gateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
def connect(self, url, proxy_host, proxy_port):
""""""
self.init(url, proxy_host, proxy_port)
self.start()
def on_connected(self):
""""""
self.gateway.write_log("交易Websocket API连接成功")
def on_packet(self, packet: dict): # type: (dict)->None
""""""
if packet["e"] == "outboundAccountInfo":
self.on_account(packet)
elif packet["e"] == "executionReport":
self.on_order(packet)
def on_account(self, packet):
""""""
for d in packet["B"]:
account = AccountData(
accountid=d["a"],
balance=float(d["f"]) + float(d["l"]),
frozen=float(d["l"]),
gateway_name=self.gateway_name
)
if account.balance:
self.gateway.on_account(account)
def on_order(self, packet: dict):
""""""
dt = datetime.fromtimestamp(packet["O"] / 1000)
time = dt.strftime("%Y-%m-%d %H:%M:%S")
if packet["C"] == "null":
orderid = packet["c"]
else:
orderid = packet["C"]
order = OrderData(
symbol=packet["s"],
exchange=Exchange.BINANCE,
orderid=orderid,
type=ORDERTYPE_BINANCE2VT[packet["o"]],
direction=DIRECTION_BINANCE2VT[packet["S"]],
price=float(packet["p"]),
volume=float(packet["q"]),
traded=float(packet["z"]),
status=STATUS_BINANCE2VT[packet["X"]],
time=time,
gateway_name=self.gateway_name
)
self.gateway.on_order(order)
# Push trade event
trade_volume = float(packet["l"])
if not trade_volume:
return
trade_dt = datetime.fromtimestamp(packet["T"] / 1000)
trade_time = trade_dt.strftime("%Y-%m-%d %H:%M:%S")
trade = TradeData(
symbol=order.symbol,
exchange=order.exchange,
orderid=order.orderid,
tradeid=packet["t"],
direction=order.direction,
price=float(packet["L"]),
volume=trade_volume,
time=trade_time,
gateway_name=self.gateway_name,
)
self.gateway.on_trade(trade)
class BinanceDataWebsocketApi(WebsocketClient):
""""""
def __init__(self, gateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.ticks = {}
def connect(self, proxy_host: str, proxy_port: int):
""""""
self.proxy_host = proxy_host
self.proxy_port = proxy_port
def on_connected(self):
""""""
self.gateway.write_log("行情Websocket API连接刷新")
def subscribe(self, req: SubscribeRequest):
""""""
if req.symbol not in symbol_name_map:
self.gateway.write_log(f"找不到该合约代码{req.symbol}")
return
# Create tick buf data
tick = TickData(
symbol=req.symbol,
name=symbol_name_map.get(req.symbol, ""),
exchange=Exchange.BINANCE,
datetime=datetime.now(),
gateway_name=self.gateway_name,
)
self.ticks[req.symbol.lower()] = tick
# Close previous connection
if self._active:
self.stop()
self.join()
# Create new connection
channels = []
for ws_symbol in self.ticks.keys():
channels.append(ws_symbol + "@ticker")
channels.append(ws_symbol + "@depth5")
url = WEBSOCKET_DATA_HOST + "/".join(channels)
self.init(url, self.proxy_host, self.proxy_port)
self.start()
def on_packet(self, packet):
""""""
stream = packet["stream"]
data = packet["data"]
symbol, channel = stream.split("@")
tick = self.ticks[symbol]
if channel == "ticker":
tick.volume = float(data['v'])
tick.open_price = float(data['o'])
tick.high_price = float(data['h'])
tick.low_price = float(data['l'])
tick.last_price = float(data['c'])
tick.datetime = datetime.fromtimestamp(float(data['E']) / 1000)
else:
bids = data["bids"]
for n in range(5):
price, volume = bids[n]
tick.__setattr__("bid_price_" + str(n + 1), float(price))
tick.__setattr__("bid_volume_" + str(n + 1), float(volume))
asks = data["asks"]
for n in range(5):
price, volume = asks[n]
tick.__setattr__("ask_price_" + str(n + 1), float(price))
tick.__setattr__("ask_volume_" + str(n + 1), float(volume))
if tick.last_price:
self.gateway.on_tick(copy(tick))
| 27.804878
| 85
| 0.536611
|
eee5f7549e7f3a5a6f39df7b815aca93bc536f48
| 1,328
|
py
|
Python
|
hackerrank/artificial-intelligence/stack-exchange-question-classifier/stack-exchange-question-classifier.py
|
yasserglez/programming-problems
|
08cef1186b182430b231ed9772d8f92ec1d2365b
|
[
"MIT"
] | 2
|
2017-02-17T01:40:27.000Z
|
2018-04-22T12:47:28.000Z
|
hackerrank/artificial-intelligence/stack-exchange-question-classifier/stack-exchange-question-classifier.py
|
yasserglez/programming-problems
|
08cef1186b182430b231ed9772d8f92ec1d2365b
|
[
"MIT"
] | null | null | null |
hackerrank/artificial-intelligence/stack-exchange-question-classifier/stack-exchange-question-classifier.py
|
yasserglez/programming-problems
|
08cef1186b182430b231ed9772d8f92ec1d2365b
|
[
"MIT"
] | 1
|
2016-10-14T06:00:42.000Z
|
2016-10-14T06:00:42.000Z
|
# https://www.hackerrank.com/challenges/stack-exchange-question-classifier
import sys
import json
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import cross_val_score
def load_corpus(f):
n = int(f.readline())
corpus = ([], [])
for i in range(n):
doc = json.loads(f.readline())
corpus[0].append('{} {}'.format(doc['question'], doc['excerpt']))
if 'topic' in doc:
corpus[1].append(doc['topic'])
return corpus
def build_model(corpus):
model = Pipeline([
('tfidf', TfidfVectorizer(stop_words='english')),
('classifier', SGDClassifier(loss='log', penalty='none', max_iter=100)),
])
# scores = cross_val_score(model, corpus[0], corpus[1], cv=10, n_jobs=-1)
# print('CV score:', np.mean(scores))
model.fit(corpus[0], corpus[1])
return model
if __name__ == '__main__':
np.random.seed(sum(map(ord, 'stack-exchange-question-classifier')))
with open('training.json') as f:
training_data = load_corpus(f)
model = build_model(training_data)
test_data = load_corpus(sys.stdin)
topics = model.predict(test_data[0])
print('\n'.join(str(topic) for topic in topics))
| 30.883721
| 80
| 0.670934
|
5882ad569166c2bba4227e91833ff722bc1115c9
| 307
|
py
|
Python
|
dandy/config/docs.py
|
erpcloudsystems/dandy
|
6721c297644627b85e58419a36570095681df72c
|
[
"MIT"
] | null | null | null |
dandy/config/docs.py
|
erpcloudsystems/dandy
|
6721c297644627b85e58419a36570095681df72c
|
[
"MIT"
] | null | null | null |
dandy/config/docs.py
|
erpcloudsystems/dandy
|
6721c297644627b85e58419a36570095681df72c
|
[
"MIT"
] | null | null | null |
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/dandy"
# docs_base_url = "https://[org_name].github.io/dandy"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Dandy"
| 25.583333
| 68
| 0.716612
|
c818f53ae191012f825cf475328e092ed7c78dfc
| 7,665
|
py
|
Python
|
preprocessing/ned.py
|
flathers/soilCarbonFramework
|
5d1d4fcd45eb1699b13d683cbaced75d3a341f60
|
[
"MIT"
] | 1
|
2019-08-18T06:04:09.000Z
|
2019-08-18T06:04:09.000Z
|
preprocessing/ned.py
|
flathers/soilCarbonFramework
|
5d1d4fcd45eb1699b13d683cbaced75d3a341f60
|
[
"MIT"
] | null | null | null |
preprocessing/ned.py
|
flathers/soilCarbonFramework
|
5d1d4fcd45eb1699b13d683cbaced75d3a341f60
|
[
"MIT"
] | 1
|
2021-02-23T00:18:36.000Z
|
2021-02-23T00:18:36.000Z
|
"""
Get an extracted mosaic of the elevation data
"""
import arcpy
from arcpy import env
from arcpy.sa import *
import glob
class LicenseError(Exception):
pass
def mosaic(workspace, out_location, out_raster):
# after http://desktop.arcgis.com/en/arcmap/10.4/tools/data-management-toolbox/mosaic-to-new-raster.htm
# Description: Mosaics rasters together
# Set environment settings
env.workspace = workspace
# Set local variables
in_rasters = ';'.join(glob.glob(workspace + '*.tif'))
coordinate_system = arcpy.SpatialReference("NAD 1983 Contiguous USA Albers")
data_type = '32_BIT_SIGNED'
cell_size = '30'
bands = '1'
# Execute MosaicToNewRaster
arcpy.MosaicToNewRaster_management(in_rasters, out_location, out_raster,
coordinate_system, data_type, cell_size, bands)
def extract(workspace, in_raster, mask, out_raster):
# after http://desktop.arcgis.com/en/arcmap/10.4/tools/spatial-analyst-toolbox/extract-by-mask.htm
# Set environment settings
env.workspace = workspace
# Execute ExtractByMask
outExtractByMask = ExtractByMask(in_raster, mask)
# Save the output
outExtractByMask.save(out_raster)
arcpy.BuildPyramids_management(out_raster)
def fill_sinks(workspace, in_raster, out_raster):
# after http://desktop.arcgis.com/en/arcmap/10.4/tools/spatial-analyst-toolbox/fill.htm
# Set environment settings
env.workspace = workspace
# Execute ExtractByMask
outFill = Fill(in_raster)
# Save the output
outFill.save(out_raster)
arcpy.BuildPyramids_management(out_raster)
def flow_direction(workspace, in_raster, out_raster):
# after http://desktop.arcgis.com/en/arcmap/10.4/tools/spatial-analyst-toolbox/flow-direction.htm
# Description: Creates a raster of flow direction from each cell to its
# steepest downslope neighbor.
# Requirements: Spatial Analyst Extension
# Set environment settings
env.workspace = workspace
# Set local variables
inSurfaceRaster = in_raster
# Execute FlowDirection
outFlowDirection = FlowDirection(inSurfaceRaster, "NORMAL")
# Save the output
outFlowDirection.save(out_raster)
arcpy.BuildPyramids_management(out_raster)
def flow_accumulation(workspace, in_raster, out_raster):
# after http://desktop.arcgis.com/en/arcmap/10.4/tools/spatial-analyst-toolbox/flow-accumulation.htm
# Description: Creates a raster of accumulated flow to each cell.
# Requirements: Spatial Analyst Extension
# Set environment settings
env.workspace = workspace
# Set local variables
inFlowDirRaster = in_raster
inWeightRaster = ''
dataType = 'INTEGER'
# Execute FlowDirection
outFlowAccumulation = FlowAccumulation(inFlowDirRaster, inWeightRaster, dataType)
# Save the output
outFlowAccumulation.save(out_raster)
arcpy.BuildPyramids_management(out_raster)
def slope(workspace, in_raster, out_raster):
# after http://desktop.arcgis.com/en/arcmap/10.4/tools/spatial-analyst-toolbox/slope.htm
# Description: Identifies the rate of maximum change
# in z-value from each cell.
# Requirements: Spatial Analyst Extension
# Set environment settings
env.workspace = workspace
# Set local variables
inRaster = in_raster
outMeasurement = 'DEGREE'
# Execute Slope
slopeDeg = Slope(inRaster, outMeasurement)
slopeRad = Times(slopeDeg, math.pi / 180)
# Save the output
slopeRad.save(out_raster)
arcpy.BuildPyramids_management(out_raster)
def topographic_wetness_index(workspace, flow_accumulation_raster, slope_raster, out_raster):
# Description: Computes topographic wetness index using flow accumulation
# and slope after Quin et al. 1991 (note that we assume 30m
# cell size, so cell_size_squared = 30^2 = 900)
#
# Quinn, P. F. B. J., et al.
# "The prediction of hillslope flow paths for distributed hydrological
# modelling using digital terrain models."
# Hydrological processes 5.1 (1991): 59-79.
# DOI: 10.1002/hyp.3360050106
#
# Requirements: Spatial Analyst Extension
# Set environment settings
env.workspace = workspace
# Execute math processors
# Note that each one of these creates a raster file in the workspace,
# but we only save the last one.
cell_size_squared = 900
tan_slope_raster = Tan(slope_raster)
squared_flow_accumulation_raster = Times(flow_accumulation_raster, cell_size_squared)
quotient = Divide(squared_flow_accumulation_raster, tan_slope_raster)
twi = Ln(quotient)
# We need to normalize the twi values: (twi - twi_min) / (twi_max - twi_min)
twi_min_result = arcpy.GetRasterProperties_management(twi, "MINIMUM")
twi_max_result = arcpy.GetRasterProperties_management(twi, "MAXIMUM")
twi_min = float(twi_min_result.getOutput(0))
twi_max = float(twi_max_result.getOutput(0))
twi_top = Minus(twi, twi_min)
twi_bottom = twi_max - twi_min
twi_norm = Divide(twi_top, twi_bottom)
# Save the output
twi_norm.save(out_raster)
arcpy.BuildPyramids_management(out_raster)
if __name__ == "__main__":
try:
# Grab a license for spatial analyst--we'll need it for just about
# everything we do here
print 'Checking out ArcGIS Spatial Analyst extension license'
if arcpy.CheckExtension("Spatial") == "Available":
arcpy.CheckOutExtension("Spatial")
print 'CheckOutExtension complete'
else:
raise LicenseError
# Initialize path and file names
base_input_path = 'F:/soilCarbon/extractedData/elevation/'
base_output_path = 'F:/soilCarbon/inputData/elevation/'
mask = 'F:/soilCarbon/extractedData/boundaries/envelope/envelope.shp'
mosaic_raster = 'nedMosaic.tif'
ned_raster = 'ned.tif'
filled_raster = 'nedf'
slope_raster = 'slope'
flow_direction_raster = 'flowdir'
flow_accumulation_raster = 'flowacc'
topographic_wetness_index_raster = 'twi'
# Build mosaic
print 'Building NED mosaic'
mosaic(base_input_path, base_output_path, mosaic_raster)
# Extract
print 'Extracting by mask'
extract(base_output_path, mosaic_raster, mask, ned_raster)
# Extract
print 'Filling sinks'
fill_sinks(base_output_path, ned_raster, filled_raster)
# Compute derived rasters: slope, flow direction and accumulation, twi
print 'Computing slope raster'
slope(base_output_path,
filled_raster,
base_output_path + slope_raster)
print 'Computing flow direction raster'
flow_direction(base_output_path,
filled_raster,
base_output_path + flow_direction_raster)
print 'Computing flow accumulation raster'
flow_accumulation(base_output_path,
flow_direction_raster,
base_output_path + flow_accumulation_raster)
print 'Computing topographic wetness index raster'
topographic_wetness_index(base_output_path,
flow_accumulation_raster,
slope_raster,
base_output_path + topographic_wetness_index_raster)
except LicenseError:
print 'ArcGIS extension license unavailable'
except Exception as e:
print e
finally:
# Check the Spatial Analyst Extension license in
print 'Checking in ArcGIS Spatial Analyst extension license'
status = arcpy.CheckInExtension("Spatial")
print 'CheckInExtension complete: ' + status
| 32.896996
| 107
| 0.705545
|
a2461b6b37d5145bcee8c41758569bde75cc554c
| 719
|
py
|
Python
|
01_Introductions/06_Multiple_Linear_Regression.py
|
ivanbgd/Udacity-Deep-Learning-ND101
|
05f8fe15654f51e4d770af39ee0195f22a84e65c
|
[
"MIT"
] | 1
|
2017-12-06T23:23:26.000Z
|
2017-12-06T23:23:26.000Z
|
01_Introductions/06_Multiple_Linear_Regression.py
|
ivanbgd/Udacity-Deep-Learning-ND101
|
05f8fe15654f51e4d770af39ee0195f22a84e65c
|
[
"MIT"
] | null | null | null |
01_Introductions/06_Multiple_Linear_Regression.py
|
ivanbgd/Udacity-Deep-Learning-ND101
|
05f8fe15654f51e4d770af39ee0195f22a84e65c
|
[
"MIT"
] | 2
|
2019-09-02T05:27:35.000Z
|
2020-03-28T18:27:07.000Z
|
from sklearn.linear_model import LinearRegression
from sklearn.datasets import load_boston
# Load the data from the the boston house-prices dataset
boston_data = load_boston()
X = boston_data['data']
y = boston_data['target']
# Make and fit the linear regression model
model = LinearRegression()
model.fit(X, y)
# Make a prediction using the model
sample_house = [[2.29690000e-01, 0.00000000e+00, 1.05900000e+01, 0.00000000e+00, 4.89000000e-01,
6.32600000e+00, 5.25000000e+01, 4.35490000e+00, 4.00000000e+00, 2.77000000e+02,
1.86000000e+01, 3.94870000e+02, 1.09700000e+01]]
prediction = model.predict(sample_house) # 23.68420569227329 is the correct prediction!
print(prediction)
| 37.842105
| 96
| 0.737135
|
7a593c3e7e9cb9af148a2a6a39698511ab078d20
| 646
|
py
|
Python
|
print_all_links.py
|
jjtoledo/Treinamento-Data-Science
|
5117975109695b1de06ae43b416972e66a4b7773
|
[
"MIT"
] | null | null | null |
print_all_links.py
|
jjtoledo/Treinamento-Data-Science
|
5117975109695b1de06ae43b416972e66a4b7773
|
[
"MIT"
] | null | null | null |
print_all_links.py
|
jjtoledo/Treinamento-Data-Science
|
5117975109695b1de06ae43b416972e66a4b7773
|
[
"MIT"
] | null | null | null |
def get_next_target(page):
start_link = page.find('<a href=')
# Insert your code below here
if (start_link == -1):
return None, 0
else:
start_quote = page.find('"', start_link)
end_quote = page.find('"', start_quote + 1)
url = page[start_quote + 1:end_quote]
return url, end_quote
def print_all_links(page):
while True:
url, endpos = get_next_target(page)
if url:
print url
page = page[endpos:]
else:
break
page = '<a href="www.testes.com" fiopajidoa jiopafdopafho <a href="www.jfioafp.com" fdsaf'
print_all_links(page)
| 23.925926
| 90
| 0.589783
|
9e8d68ab952bafe73e60bce00e854daa9d594505
| 136,135
|
py
|
Python
|
src/sage/categories/pushout.py
|
bopopescu/Sage-8
|
71be00ad5f25ca95381fae7cce96421ffdd43425
|
[
"BSL-1.0"
] | null | null | null |
src/sage/categories/pushout.py
|
bopopescu/Sage-8
|
71be00ad5f25ca95381fae7cce96421ffdd43425
|
[
"BSL-1.0"
] | null | null | null |
src/sage/categories/pushout.py
|
bopopescu/Sage-8
|
71be00ad5f25ca95381fae7cce96421ffdd43425
|
[
"BSL-1.0"
] | null | null | null |
"""
Coercion via Construction Functors
"""
import six
from functor import Functor
from basic import *
from sage.structure.parent import CoercionException
# TODO, think through the rankings, and override pushout where necessary.
class ConstructionFunctor(Functor):
"""
Base class for construction functors.
A construction functor is a functorial algebraic construction,
such as the construction of a matrix ring over a given ring
or the fraction field of a given ring.
In addition to the class :class:`~sage.categories.functor.Functor`,
construction functors provide rules for combining and merging
constructions. This is an important part of Sage's coercion model,
namely the pushout of two constructions: When a polynomial ``p`` in
a variable ``x`` with integer coefficients is added to a rational
number ``q``, then Sage finds that the parents ``ZZ['x']`` and
``QQ`` are obtained from ``ZZ`` by applying a polynomial ring
construction respectively the fraction field construction. Each
construction functor has an attribute ``rank``, and the rank of
the polynomial ring construction is higher than the rank of the
fraction field construction. This means that the pushout of ``QQ``
and ``ZZ['x']``, and thus a common parent in which ``p`` and ``q``
can be added, is ``QQ['x']``, since the construction functor with
a lower rank is applied first.
::
sage: F1, R = QQ.construction()
sage: F1
FractionField
sage: R
Integer Ring
sage: F2, R = (ZZ['x']).construction()
sage: F2
Poly[x]
sage: R
Integer Ring
sage: F3 = F2.pushout(F1)
sage: F3
Poly[x](FractionField(...))
sage: F3(R)
Univariate Polynomial Ring in x over Rational Field
sage: from sage.categories.pushout import pushout
sage: P.<x> = ZZ[]
sage: pushout(QQ,P)
Univariate Polynomial Ring in x over Rational Field
sage: ((x+1) + 1/2).parent()
Univariate Polynomial Ring in x over Rational Field
When composing two construction functors, they are sometimes
merged into one, as is the case in the Quotient construction::
sage: Q15, R = (ZZ.quo(15*ZZ)).construction()
sage: Q15
QuotientFunctor
sage: Q35, R = (ZZ.quo(35*ZZ)).construction()
sage: Q35
QuotientFunctor
sage: Q15.merge(Q35)
QuotientFunctor
sage: Q15.merge(Q35)(ZZ)
Ring of integers modulo 5
Functors can not only be applied to objects, but also to morphisms in the
respective categories. For example::
sage: P.<x,y> = ZZ[]
sage: F = P.construction()[0]; F
MPoly[x,y]
sage: A.<a,b> = GF(5)[]
sage: f = A.hom([a+b,a-b],A)
sage: F(A)
Multivariate Polynomial Ring in x, y over Multivariate Polynomial Ring in a, b over Finite Field of size 5
sage: F(f)
Ring endomorphism of Multivariate Polynomial Ring in x, y over Multivariate Polynomial Ring in a, b over Finite Field of size 5
Defn: Induced from base ring by
Ring endomorphism of Multivariate Polynomial Ring in a, b over Finite Field of size 5
Defn: a |--> a + b
b |--> a - b
sage: F(f)(F(A)(x)*a)
(a + b)*x
"""
def __mul__(self, other):
"""
Compose ``self`` and ``other`` to a composite construction
functor, unless one of them is the identity.
NOTE:
The product is in functorial notation, i.e., when applying the
product to an object, the second factor is applied first.
TESTS::
sage: from sage.categories.pushout import IdentityConstructionFunctor
sage: I = IdentityConstructionFunctor()
sage: F = QQ.construction()[0]
sage: P = ZZ['t'].construction()[0]
sage: F*P
FractionField(Poly[t](...))
sage: P*F
Poly[t](FractionField(...))
sage: (F*P)(ZZ)
Fraction Field of Univariate Polynomial Ring in t over Integer Ring
sage: I*P is P
True
sage: F*I is F
True
"""
if not isinstance(self, ConstructionFunctor) and not isinstance(other, ConstructionFunctor):
raise CoercionException("Non-constructive product")
if isinstance(other,IdentityConstructionFunctor):
return self
if isinstance(self,IdentityConstructionFunctor):
return other
return CompositeConstructionFunctor(other, self)
def pushout(self, other):
"""
Composition of two construction functors, ordered by their ranks.
NOTE:
- This method seems not to be used in the coercion model.
- By default, the functor with smaller rank is applied first.
TESTS::
sage: F = QQ.construction()[0]
sage: P = ZZ['t'].construction()[0]
sage: F.pushout(P)
Poly[t](FractionField(...))
sage: P.pushout(F)
Poly[t](FractionField(...))
"""
if self.rank > other.rank:
return self * other
else:
return other * self
def __cmp__(self, other):
"""
Equality here means that they are mathematically equivalent, though they may have
specific implementation data. This method will usually be overloaded in subclasses.
by default, only the types of the functors are compared. Also see the \code{merge}
function.
TESTS::
sage: from sage.categories.pushout import IdentityConstructionFunctor
sage: I = IdentityConstructionFunctor()
sage: F = QQ.construction()[0]
sage: P = ZZ['t'].construction()[0]
sage: I == F # indirect doctest
False
sage: I == I # indirect doctest
True
"""
return cmp(type(self), type(other))
def __str__(self):
"""
NOTE:
By default, it returns the name of the construction functor's class.
Usually, this method will be overloaded.
TEST::
sage: F = QQ.construction()[0]
sage: F # indirect doctest
FractionField
sage: Q = ZZ.quo(2).construction()[0]
sage: Q # indirect doctest
QuotientFunctor
"""
s = str(type(self))
import re
return re.sub("<.*'.*\.([^.]*)'>", "\\1", s)
def _repr_(self):
"""
NOTE:
By default, it returns the name of the construction functor's class.
Usually, this method will be overloaded.
TEST::
sage: F = QQ.construction()[0]
sage: F # indirect doctest
FractionField
sage: Q = ZZ.quo(2).construction()[0]
sage: Q # indirect doctest
QuotientFunctor
"""
return str(self)
def merge(self, other):
"""
Merge ``self`` with another construction functor, or return None.
NOTE:
The default is to merge only if the two functors coincide. But this
may be overloaded for subclasses, such as the quotient functor.
EXAMPLES::
sage: F = QQ.construction()[0]
sage: P = ZZ['t'].construction()[0]
sage: F.merge(F)
FractionField
sage: F.merge(P)
sage: P.merge(F)
sage: P.merge(P)
Poly[t]
"""
if self == other:
return self
else:
return None
def commutes(self, other):
"""
Determine whether ``self`` commutes with another construction functor.
NOTE:
By default, ``False`` is returned in all cases (even if the two
functors are the same, since in this case :meth:`merge` will apply
anyway). So far there is no construction functor that overloads
this method. Anyway, this method only becomes relevant if two
construction functors have the same rank.
EXAMPLES::
sage: F = QQ.construction()[0]
sage: P = ZZ['t'].construction()[0]
sage: F.commutes(P)
False
sage: P.commutes(F)
False
sage: F.commutes(F)
False
"""
return False
def expand(self):
"""
Decompose ``self`` into a list of construction functors.
NOTE:
The default is to return the list only containing ``self``.
EXAMPLE::
sage: F = QQ.construction()[0]
sage: F.expand()
[FractionField]
sage: Q = ZZ.quo(2).construction()[0]
sage: Q.expand()
[QuotientFunctor]
sage: P = ZZ['t'].construction()[0]
sage: FP = F*P
sage: FP.expand()
[FractionField, Poly[t]]
"""
return [self]
# See the pushout() function below for explanation.
coercion_reversed = False
class CompositeConstructionFunctor(ConstructionFunctor):
"""
A Construction Functor composed by other Construction Functors.
INPUT:
``F1, F2,...``: A list of Construction Functors. The result is the
composition ``F1`` followed by ``F2`` followed by ...
EXAMPLES::
sage: from sage.categories.pushout import CompositeConstructionFunctor
sage: F = CompositeConstructionFunctor(QQ.construction()[0],ZZ['x'].construction()[0],QQ.construction()[0],ZZ['y'].construction()[0])
sage: F
Poly[y](FractionField(Poly[x](FractionField(...))))
sage: F == loads(dumps(F))
True
sage: F == CompositeConstructionFunctor(*F.all)
True
sage: F(GF(2)['t'])
Univariate Polynomial Ring in y over Fraction Field of Univariate Polynomial Ring in x over Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 2 (using NTL)
"""
def __init__(self, *args):
"""
TESTS::
sage: from sage.categories.pushout import CompositeConstructionFunctor
sage: F = CompositeConstructionFunctor(QQ.construction()[0],ZZ['x'].construction()[0],QQ.construction()[0],ZZ['y'].construction()[0])
sage: F
Poly[y](FractionField(Poly[x](FractionField(...))))
sage: F == CompositeConstructionFunctor(*F.all)
True
"""
self.all = []
for c in args:
if isinstance(c, list):
self.all += c
elif isinstance(c, CompositeConstructionFunctor):
self.all += c.all
else:
self.all.append(c)
Functor.__init__(self, self.all[0].domain(), self.all[-1].codomain())
def _apply_functor_to_morphism(self, f):
"""
Apply the functor to an object of ``self``'s domain.
TESTS::
sage: from sage.categories.pushout import CompositeConstructionFunctor
sage: F = CompositeConstructionFunctor(QQ.construction()[0],ZZ['x'].construction()[0],QQ.construction()[0],ZZ['y'].construction()[0])
sage: R.<a,b> = QQ[]
sage: f = R.hom([a+b, a-b])
sage: F(f) # indirect doctest
Ring endomorphism of Univariate Polynomial Ring in y over Fraction Field of Univariate Polynomial Ring in x over Fraction Field of Multivariate Polynomial Ring in a, b over Rational Field
Defn: Induced from base ring by
Ring endomorphism of Fraction Field of Univariate Polynomial Ring in x over Fraction Field of Multivariate Polynomial Ring in a, b over Rational Field
Defn: Induced from base ring by
Ring endomorphism of Univariate Polynomial Ring in x over Fraction Field of Multivariate Polynomial Ring in a, b over Rational Field
Defn: Induced from base ring by
Ring endomorphism of Fraction Field of Multivariate Polynomial Ring in a, b over Rational Field
Defn: a |--> a + b
b |--> a - b
"""
for c in self.all:
f = c(f)
return f
def _apply_functor(self, R):
"""
Apply the functor to an object of ``self``'s domain.
TESTS::
sage: from sage.categories.pushout import CompositeConstructionFunctor
sage: F = CompositeConstructionFunctor(QQ.construction()[0],ZZ['x'].construction()[0],QQ.construction()[0],ZZ['y'].construction()[0])
sage: R.<a,b> = QQ[]
sage: F(R) # indirect doctest
Univariate Polynomial Ring in y over Fraction Field of Univariate Polynomial Ring in x over Fraction Field of Multivariate Polynomial Ring in a, b over Rational Field
"""
for c in self.all:
R = c(R)
return R
def __cmp__(self, other):
"""
TESTS::
sage: from sage.categories.pushout import CompositeConstructionFunctor
sage: F = CompositeConstructionFunctor(QQ.construction()[0],ZZ['x'].construction()[0],QQ.construction()[0],ZZ['y'].construction()[0])
sage: F == loads(dumps(F)) # indirect doctest
True
"""
if isinstance(other, CompositeConstructionFunctor):
return cmp(self.all, other.all)
else:
return cmp(type(self), type(other))
def __mul__(self, other):
"""
Compose construction functors to a composit construction functor, unless one of them is the identity.
NOTE:
The product is in functorial notation, i.e., when applying the product to an object
then the second factor is applied first.
EXAMPLES::
sage: from sage.categories.pushout import CompositeConstructionFunctor
sage: F1 = CompositeConstructionFunctor(QQ.construction()[0],ZZ['x'].construction()[0])
sage: F2 = CompositeConstructionFunctor(QQ.construction()[0],ZZ['y'].construction()[0])
sage: F1*F2
Poly[x](FractionField(Poly[y](FractionField(...))))
"""
if isinstance(self, CompositeConstructionFunctor):
all = [other] + self.all
elif isinstance(other,IdentityConstructionFunctor):
return self
else:
all = other.all + [self]
return CompositeConstructionFunctor(*all)
def __str__(self):
"""
TESTS::
sage: from sage.categories.pushout import CompositeConstructionFunctor
sage: F = CompositeConstructionFunctor(QQ.construction()[0],ZZ['x'].construction()[0],QQ.construction()[0],ZZ['y'].construction()[0])
sage: F # indirect doctest
Poly[y](FractionField(Poly[x](FractionField(...))))
"""
s = "..."
for c in self.all:
s = "%s(%s)" % (c,s)
return s
def expand(self):
"""
Return expansion of a CompositeConstructionFunctor.
NOTE:
The product over the list of components, as returned by
the ``expand()`` method, is equal to ``self``.
EXAMPLES::
sage: from sage.categories.pushout import CompositeConstructionFunctor
sage: F = CompositeConstructionFunctor(QQ.construction()[0],ZZ['x'].construction()[0],QQ.construction()[0],ZZ['y'].construction()[0])
sage: F
Poly[y](FractionField(Poly[x](FractionField(...))))
sage: prod(F.expand()) == F
True
"""
return list(reversed(self.all))
class IdentityConstructionFunctor(ConstructionFunctor):
"""
A construction functor that is the identity functor.
TESTS::
sage: from sage.categories.pushout import IdentityConstructionFunctor
sage: I = IdentityConstructionFunctor()
sage: I(RR) is RR
True
sage: I == loads(dumps(I))
True
"""
rank = -100
def __init__(self):
"""
TESTS::
sage: from sage.categories.pushout import IdentityConstructionFunctor
sage: I = IdentityConstructionFunctor()
sage: IdentityFunctor(Sets()) == I
True
sage: I(RR) is RR
True
"""
ConstructionFunctor.__init__(self, Sets(), Sets())
def _apply_functor(self, x):
"""
Return the argument unaltered.
TESTS::
sage: from sage.categories.pushout import IdentityConstructionFunctor
sage: I = IdentityConstructionFunctor()
sage: I(RR) is RR # indirect doctest
True
"""
return x
def _apply_functor_to_morphism(self, f):
"""
Return the argument unaltered.
TESTS::
sage: from sage.categories.pushout import IdentityConstructionFunctor
sage: I = IdentityConstructionFunctor()
sage: f = ZZ['t'].hom(['x'],QQ['x'])
sage: I(f) is f # indirect doctest
True
"""
return f
def __cmp__(self, other):
"""
TESTS::
sage: from sage.categories.pushout import IdentityConstructionFunctor
sage: I = IdentityConstructionFunctor()
sage: I == IdentityFunctor(Sets()) # indirect doctest
True
sage: I == QQ.construction()[0]
False
"""
c = cmp(type(self),type(other))
if c:
from sage.categories.functor import IdentityFunctor_generic
if isinstance(other,IdentityFunctor_generic):
return 0
return c
def __mul__(self, other):
"""
Compose construction functors to a composit construction functor, unless one of them is the identity.
NOTE:
The product is in functorial notation, i.e., when applying the product to an object
then the second factor is applied first.
TESTS::
sage: from sage.categories.pushout import IdentityConstructionFunctor
sage: I = IdentityConstructionFunctor()
sage: F = QQ.construction()[0]
sage: P = ZZ['t'].construction()[0]
sage: I*F is F # indirect doctest
True
sage: F*I is F
True
sage: I*P is P
True
sage: P*I is P
True
"""
if isinstance(self, IdentityConstructionFunctor):
return other
else:
return self
class PolynomialFunctor(ConstructionFunctor):
"""
Construction functor for univariate polynomial rings.
EXAMPLE::
sage: P = ZZ['t'].construction()[0]
sage: P(GF(3))
Univariate Polynomial Ring in t over Finite Field of size 3
sage: P == loads(dumps(P))
True
sage: R.<x,y> = GF(5)[]
sage: f = R.hom([x+2*y,3*x-y],R)
sage: P(f)((x+y)*P(R).0)
(-x + y)*t
By trac ticket #9944, the construction functor distinguishes sparse and
dense polynomial rings. Before, the following example failed::
sage: R.<x> = PolynomialRing(GF(5), sparse=True)
sage: F,B = R.construction()
sage: F(B) is R
True
sage: S.<x> = PolynomialRing(ZZ)
sage: R.has_coerce_map_from(S)
False
sage: S.has_coerce_map_from(R)
False
sage: S.0 + R.0
2*x
sage: (S.0 + R.0).parent()
Univariate Polynomial Ring in x over Finite Field of size 5
sage: (S.0 + R.0).parent().is_sparse()
False
"""
rank = 9
def __init__(self, var, multi_variate=False, sparse=False):
"""
TESTS::
sage: from sage.categories.pushout import PolynomialFunctor
sage: P = PolynomialFunctor('x')
sage: P(GF(3))
Univariate Polynomial Ring in x over Finite Field of size 3
There is an optional parameter ``multi_variate``, but
apparently it is not used::
sage: Q = PolynomialFunctor('x',multi_variate=True)
sage: Q(ZZ)
Univariate Polynomial Ring in x over Integer Ring
sage: Q == P
True
"""
from rings import Rings
Functor.__init__(self, Rings(), Rings())
self.var = var
self.multi_variate = multi_variate
self.sparse = sparse
def _apply_functor(self, R):
"""
Apply the functor to an object of ``self``'s domain.
TEST::
sage: P = ZZ['x'].construction()[0]
sage: P(GF(3)) # indirect doctest
Univariate Polynomial Ring in x over Finite Field of size 3
"""
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
return PolynomialRing(R, self.var, sparse=self.sparse)
def _apply_functor_to_morphism(self, f):
"""
Apply the functor ``self`` to the morphism `f`.
TEST::
sage: P = ZZ['x'].construction()[0]
sage: P(ZZ.hom(GF(3)))
Ring morphism:
From: Univariate Polynomial Ring in x over Integer Ring
To: Univariate Polynomial Ring in x over Finite Field of size 3
Defn: Induced from base ring by
Ring Coercion morphism:
From: Integer Ring
To: Finite Field of size 3
"""
from sage.rings.polynomial.polynomial_ring_homomorphism import PolynomialRingHomomorphism_from_base
R = self._apply_functor(f.domain())
S = self._apply_functor(f.codomain())
return PolynomialRingHomomorphism_from_base(R.Hom(S), f)
def __cmp__(self, other):
"""
TESTS::
sage: from sage.categories.pushout import MultiPolynomialFunctor
sage: Q = MultiPolynomialFunctor(('x',),'lex')
sage: P = ZZ['x'].construction()[0]
sage: P
Poly[x]
sage: Q
MPoly[x]
sage: P == Q
True
sage: P == loads(dumps(P))
True
sage: P == QQ.construction()[0]
False
"""
c = cmp(type(self), type(other))
if c == 0:
c = cmp(self.var, other.var)
elif isinstance(other, MultiPolynomialFunctor):
return -cmp(other, self)
return c
def merge(self, other):
"""
Merge ``self`` with another construction functor, or return None.
NOTE:
Internally, the merging is delegated to the merging of
multipolynomial construction functors. But in effect,
this does the same as the default implementation, that
returns ``None`` unless the to-be-merged functors coincide.
EXAMPLE::
sage: P = ZZ['x'].construction()[0]
sage: Q = ZZ['y','x'].construction()[0]
sage: P.merge(Q)
sage: P.merge(P) is P
True
"""
if isinstance(other, MultiPolynomialFunctor):
return other.merge(self)
elif self == other:
# i.e., they only differ in sparsity
if not self.sparse:
return self
return other
else:
return None
def __str__(self):
"""
TEST::
sage: P = ZZ['x'].construction()[0]
sage: P # indirect doctest
Poly[x]
"""
return "Poly[%s]" % self.var
class MultiPolynomialFunctor(ConstructionFunctor):
"""
A constructor for multivariate polynomial rings.
EXAMPLES::
sage: P.<x,y> = ZZ[]
sage: F = P.construction()[0]; F
MPoly[x,y]
sage: A.<a,b> = GF(5)[]
sage: F(A)
Multivariate Polynomial Ring in x, y over Multivariate Polynomial Ring in a, b over Finite Field of size 5
sage: f = A.hom([a+b,a-b],A)
sage: F(f)
Ring endomorphism of Multivariate Polynomial Ring in x, y over Multivariate Polynomial Ring in a, b over Finite Field of size 5
Defn: Induced from base ring by
Ring endomorphism of Multivariate Polynomial Ring in a, b over Finite Field of size 5
Defn: a |--> a + b
b |--> a - b
sage: F(f)(F(A)(x)*a)
(a + b)*x
"""
rank = 9
def __init__(self, vars, term_order):
"""
EXAMPLES::
sage: F = sage.categories.pushout.MultiPolynomialFunctor(['x','y'], None)
sage: F
MPoly[x,y]
sage: F(ZZ)
Multivariate Polynomial Ring in x, y over Integer Ring
sage: F(CC)
Multivariate Polynomial Ring in x, y over Complex Field with 53 bits of precision
"""
Functor.__init__(self, Rings(), Rings())
self.vars = vars
self.term_order = term_order
def _apply_functor(self, R):
"""
Apply the functor to an object of ``self``'s domain.
EXAMPLES::
sage: R.<x,y,z> = QQ[]
sage: F = R.construction()[0]; F
MPoly[x,y,z]
sage: type(F)
<class 'sage.categories.pushout.MultiPolynomialFunctor'>
sage: F(ZZ) # indirect doctest
Multivariate Polynomial Ring in x, y, z over Integer Ring
sage: F(RR) # indirect doctest
Multivariate Polynomial Ring in x, y, z over Real Field with 53 bits of precision
"""
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
return PolynomialRing(R, self.vars)
def __cmp__(self, other):
"""
EXAMPLES::
sage: F = ZZ['x,y,z'].construction()[0]
sage: G = QQ['x,y,z'].construction()[0]
sage: F == G
True
sage: G == loads(dumps(G))
True
sage: G = ZZ['x,y'].construction()[0]
sage: F == G
False
"""
c = cmp(type(self), type(other))
if c == 0:
c = cmp(self.vars, other.vars) or cmp(self.term_order, other.term_order)
elif isinstance(other, PolynomialFunctor):
c = cmp(self.vars, (other.var,))
return c
def __mul__(self, other):
"""
If two MPoly functors are given in a row, form a single MPoly functor
with all of the variables.
EXAMPLES::
sage: F = sage.categories.pushout.MultiPolynomialFunctor(['x','y'], None)
sage: G = sage.categories.pushout.MultiPolynomialFunctor(['t'], None)
sage: G*F
MPoly[x,y,t]
"""
if isinstance(other,IdentityConstructionFunctor):
return self
if isinstance(other, MultiPolynomialFunctor):
if self.term_order != other.term_order:
raise CoercionException("Incompatible term orders (%s,%s)." % (self.term_order, other.term_order))
if set(self.vars).intersection(other.vars):
raise CoercionException("Overlapping variables (%s,%s)" % (self.vars, other.vars))
return MultiPolynomialFunctor(other.vars + self.vars, self.term_order)
elif isinstance(other, CompositeConstructionFunctor) \
and isinstance(other.all[-1], MultiPolynomialFunctor):
return CompositeConstructionFunctor(other.all[:-1], self * other.all[-1])
else:
return CompositeConstructionFunctor(other, self)
def merge(self, other):
"""
Merge ``self`` with another construction functor, or return None.
EXAMPLES::
sage: F = sage.categories.pushout.MultiPolynomialFunctor(['x','y'], None)
sage: G = sage.categories.pushout.MultiPolynomialFunctor(['t'], None)
sage: F.merge(G) is None
True
sage: F.merge(F)
MPoly[x,y]
"""
if self == other:
return self
else:
return None
def expand(self):
"""
Decompose ``self`` into a list of construction functors.
EXAMPLES::
sage: F = QQ['x,y,z,t'].construction()[0]; F
MPoly[x,y,z,t]
sage: F.expand()
[MPoly[t], MPoly[z], MPoly[y], MPoly[x]]
Now an actual use case::
sage: R.<x,y,z> = ZZ[]
sage: S.<z,t> = QQ[]
sage: x+t
x + t
sage: parent(x+t)
Multivariate Polynomial Ring in x, y, z, t over Rational Field
sage: T.<y,s> = QQ[]
sage: x + s
Traceback (most recent call last):
...
TypeError: unsupported operand parent(s) for '+': 'Multivariate Polynomial Ring in x, y, z over Integer Ring' and 'Multivariate Polynomial Ring in y, s over Rational Field'
sage: R = PolynomialRing(ZZ, 'x', 500)
sage: S = PolynomialRing(GF(5), 'x', 200)
sage: R.gen(0) + S.gen(0)
2*x0
"""
if len(self.vars) <= 1:
return [self]
else:
return [MultiPolynomialFunctor((x,), self.term_order) for x in reversed(self.vars)]
def __str__(self):
"""
TEST::
sage: QQ['x,y,z,t'].construction()[0]
MPoly[x,y,z,t]
"""
return "MPoly[%s]" % ','.join(self.vars)
class InfinitePolynomialFunctor(ConstructionFunctor):
"""
A Construction Functor for Infinite Polynomial Rings (see :mod:`~sage.rings.polynomial.infinite_polynomial_ring`).
AUTHOR:
-- Simon King
This construction functor is used to provide uniqueness of infinite polynomial rings as parent structures.
As usual, the construction functor allows for constructing pushouts.
Another purpose is to avoid name conflicts of variables of the to-be-constructed infinite polynomial ring with
variables of the base ring, and moreover to keep the internal structure of an Infinite Polynomial Ring as simple
as possible: If variables `v_1,...,v_n` of the given base ring generate an *ordered* sub-monoid of the monomials
of the ambient Infinite Polynomial Ring, then they are removed from the base ring and merged with the generators
of the ambient ring. However, if the orders don't match, an error is raised, since there was a name conflict
without merging.
EXAMPLES::
sage: A.<a,b> = InfinitePolynomialRing(ZZ['t'])
sage: A.construction()
[InfPoly{[a,b], "lex", "dense"},
Univariate Polynomial Ring in t over Integer Ring]
sage: type(_[0])
<class 'sage.categories.pushout.InfinitePolynomialFunctor'>
sage: B.<x,y,a_3,a_1> = PolynomialRing(QQ, order='lex')
sage: B.construction()
(MPoly[x,y,a_3,a_1], Rational Field)
sage: A.construction()[0]*B.construction()[0]
InfPoly{[a,b], "lex", "dense"}(MPoly[x,y](...))
Apparently the variables `a_1,a_3` of the polynomial ring are merged with the variables
`a_0, a_1, a_2, ...` of the infinite polynomial ring; indeed, they form an ordered sub-structure.
However, if the polynomial ring was given a different ordering, merging would not be allowed,
resulting in a name conflict::
sage: A.construction()[0]*PolynomialRing(QQ,names=['x','y','a_3','a_1']).construction()[0]
Traceback (most recent call last):
...
CoercionException: Incompatible term orders lex, degrevlex
In an infinite polynomial ring with generator `a_\\ast`, the variable `a_3` will always be greater
than the variable `a_1`. Hence, the orders are incompatible in the next example as well::
sage: A.construction()[0]*PolynomialRing(QQ,names=['x','y','a_1','a_3'], order='lex').construction()[0]
Traceback (most recent call last):
...
CoercionException: Overlapping variables (('a', 'b'),['a_1', 'a_3']) are incompatible
Another requirement is that after merging the order of the remaining variables must be unique.
This is not the case in the following example, since it is not clear whether the variables `x,y`
should be greater or smaller than the variables `b_\\ast`::
sage: A.construction()[0]*PolynomialRing(QQ,names=['a_3','a_1','x','y'], order='lex').construction()[0]
Traceback (most recent call last):
...
CoercionException: Overlapping variables (('a', 'b'),['a_3', 'a_1']) are incompatible
Since the construction functors are actually used to construct infinite polynomial rings, the following
result is no surprise::
sage: C.<a,b> = InfinitePolynomialRing(B); C
Infinite polynomial ring in a, b over Multivariate Polynomial Ring in x, y over Rational Field
There is also an overlap in the next example::
sage: X.<w,x,y> = InfinitePolynomialRing(ZZ)
sage: Y.<x,y,z> = InfinitePolynomialRing(QQ)
`X` and `Y` have an overlapping generators `x_\\ast, y_\\ast`. Since the default lexicographic order is
used in both rings, it gives rise to isomorphic sub-monoids in both `X` and `Y`. They are merged in the
pushout, which also yields a common parent for doing arithmetic::
sage: P = sage.categories.pushout.pushout(Y,X); P
Infinite polynomial ring in w, x, y, z over Rational Field
sage: w[2]+z[3]
w_2 + z_3
sage: _.parent() is P
True
"""
# We do provide merging with polynomial rings. However, it seems that it is better
# to have a greater rank, since we want to apply InfinitePolynomialFunctor *after*
# [Multi]PolynomialFunctor, which have rank 9. But there is the MatrixFunctor, which
# has rank 10. So, do fine tuning...
rank = 9.5
def __init__(self, gens, order, implementation):
"""
TEST::
sage: F = sage.categories.pushout.InfinitePolynomialFunctor(['a','b','x'],'degrevlex','sparse'); F # indirect doctest
InfPoly{[a,b,x], "degrevlex", "sparse"}
sage: F == loads(dumps(F))
True
"""
if len(gens)<1:
raise ValueError("Infinite Polynomial Rings have at least one generator")
ConstructionFunctor.__init__(self, Rings(), Rings())
self._gens = tuple(gens)
self._order = order
self._imple = implementation
def _apply_functor_to_morphism(self, f):
"""
Morphisms for inifinite polynomial rings are not implemented yet.
TEST::
sage: P.<x,y> = QQ[]
sage: R.<alpha> = InfinitePolynomialRing(P)
sage: f = P.hom([x+y,x-y],P)
sage: R.construction()[0](f) # indirect doctest
Traceback (most recent call last):
...
NotImplementedError: Morphisms for inifinite polynomial rings are not implemented yet.
"""
raise NotImplementedError("Morphisms for inifinite polynomial rings are not implemented yet.")
def _apply_functor(self, R):
"""
Apply the functor to an object of ``self``'s domain.
TEST::
sage: F = sage.categories.pushout.InfinitePolynomialFunctor(['a','b','x'],'degrevlex','sparse'); F
InfPoly{[a,b,x], "degrevlex", "sparse"}
sage: F(QQ['t']) # indirect doctest
Infinite polynomial ring in a, b, x over Univariate Polynomial Ring in t over Rational Field
"""
from sage.rings.polynomial.infinite_polynomial_ring import InfinitePolynomialRing
return InfinitePolynomialRing(R, self._gens, order=self._order, implementation=self._imple)
def __str__(self):
"""
TEST::
sage: F = sage.categories.pushout.InfinitePolynomialFunctor(['a','b','x'],'degrevlex','sparse'); F # indirect doctest
InfPoly{[a,b,x], "degrevlex", "sparse"}
"""
return 'InfPoly{[%s], "%s", "%s"}'%(','.join(self._gens), self._order, self._imple)
def __cmp__(self, other):
"""
TEST::
sage: F = sage.categories.pushout.InfinitePolynomialFunctor(['a','b','x'],'degrevlex','sparse'); F # indirect doctest
InfPoly{[a,b,x], "degrevlex", "sparse"}
sage: F == loads(dumps(F)) # indirect doctest
True
sage: F == sage.categories.pushout.InfinitePolynomialFunctor(['a','b','x'],'deglex','sparse')
False
"""
c = cmp(type(self), type(other))
if c == 0:
c = cmp(self._gens, other._gens) or cmp(self._order, other._order) or cmp(self._imple, other._imple)
return c
def __mul__(self, other):
"""
Compose construction functors to a composit construction functor, unless one of them is the identity.
NOTE:
The product is in functorial notation, i.e., when applying the product to an object
then the second factor is applied first.
TESTS::
sage: F1 = QQ['a','x_2','x_1','y_3','y_2'].construction()[0]; F1
MPoly[a,x_2,x_1,y_3,y_2]
sage: F2 = InfinitePolynomialRing(QQ, ['x','y'],order='degrevlex').construction()[0]; F2
InfPoly{[x,y], "degrevlex", "dense"}
sage: F3 = InfinitePolynomialRing(QQ, ['x','y'],order='degrevlex',implementation='sparse').construction()[0]; F3
InfPoly{[x,y], "degrevlex", "sparse"}
sage: F2*F1
InfPoly{[x,y], "degrevlex", "dense"}(Poly[a](...))
sage: F3*F1
InfPoly{[x,y], "degrevlex", "sparse"}(Poly[a](...))
sage: F4 = sage.categories.pushout.FractionField()
sage: F2*F4
InfPoly{[x,y], "degrevlex", "dense"}(FractionField(...))
"""
if isinstance(other,IdentityConstructionFunctor):
return self
if isinstance(other, self.__class__): #
INT = set(self._gens).intersection(other._gens)
if INT:
# if there is overlap of generators, it must only be at the ends, so that
# the resulting order after the merging is unique
if other._gens[-len(INT):] != self._gens[:len(INT)]:
raise CoercionException("Overlapping variables (%s,%s) are incompatible" % (self._gens, other._gens))
OUTGENS = list(other._gens) + list(self._gens[len(INT):])
else:
OUTGENS = list(other._gens) + list(self._gens)
# the orders must coincide
if self._order != other._order:
return CompositeConstructionFunctor(other, self)
# the implementations must coincide
if self._imple != other._imple:
return CompositeConstructionFunctor(other, self)
return InfinitePolynomialFunctor(OUTGENS, self._order, self._imple)
# Polynomial Constructor
# Idea: We merge into self, if the polynomial functor really provides a substructure,
# even respecting the order. Note that, if the pushout is computed, only *one* variable
# will occur in the polynomial constructor. Hence, any order is fine, which is exactly
# what we need in order to have coercion maps for different orderings.
if isinstance(other, MultiPolynomialFunctor) or isinstance(other, PolynomialFunctor):
if isinstance(other, MultiPolynomialFunctor):
othervars = other.vars
else:
othervars = [other.var]
OverlappingGens = [] ## Generator names of variable names of the MultiPolynomialFunctor
## that can be interpreted as variables in self
OverlappingVars = [] ## The variable names of the MultiPolynomialFunctor
## that can be interpreted as variables in self
RemainingVars = [x for x in othervars]
IsOverlap = False
BadOverlap = False
for x in othervars:
if x.count('_') == 1:
g,n = x.split('_')
if n.isdigit():
if g.isalnum(): # we can interprete x in any InfinitePolynomialRing
if g in self._gens: # we can interprete x in self, hence, we will not use it as a variable anymore.
RemainingVars.pop(RemainingVars.index(x))
IsOverlap = True # some variables of other can be interpreted in self.
if OverlappingVars:
# Is OverlappingVars in the right order?
g0,n0 = OverlappingVars[-1].split('_')
i = self._gens.index(g)
i0 = self._gens.index(g0)
if i<i0: # wrong order
BadOverlap = True
if i==i0 and int(n)>int(n0): # wrong order
BadOverlap = True
OverlappingVars.append(x)
else:
if IsOverlap: # The overlap must be on the right end of the variable list
BadOverlap = True
else:
if IsOverlap: # The overlap must be on the right end of the variable list
BadOverlap = True
else:
if IsOverlap: # The overlap must be on the right end of the variable list
BadOverlap = True
else:
if IsOverlap: # The overlap must be on the right end of the variable list
BadOverlap = True
if BadOverlap: # the overlapping variables appear in the wrong order
raise CoercionException("Overlapping variables (%s,%s) are incompatible" % (self._gens, OverlappingVars))
if len(OverlappingVars)>1: # multivariate, hence, the term order matters
if other.term_order.name()!=self._order:
raise CoercionException("Incompatible term orders %s, %s" % (self._order, other.term_order.name()))
# ok, the overlap is fine, we will return something.
if RemainingVars: # we can only partially merge other into self
if len(RemainingVars)>1:
return CompositeConstructionFunctor(MultiPolynomialFunctor(RemainingVars,term_order=other.term_order), self)
return CompositeConstructionFunctor(PolynomialFunctor(RemainingVars[0]), self)
return self
return CompositeConstructionFunctor(other, self)
def merge(self,other):
"""
Merge two construction functors of infinite polynomial rings, regardless of monomial order and implementation.
The purpose is to have a pushout (and thus, arithmetic) even in cases when the parents are isomorphic as
rings, but not as ordered rings.
EXAMPLES::
sage: X.<x,y> = InfinitePolynomialRing(QQ,implementation='sparse')
sage: Y.<x,y> = InfinitePolynomialRing(QQ,order='degrevlex')
sage: X.construction()
[InfPoly{[x,y], "lex", "sparse"}, Rational Field]
sage: Y.construction()
[InfPoly{[x,y], "degrevlex", "dense"}, Rational Field]
sage: Y.construction()[0].merge(Y.construction()[0])
InfPoly{[x,y], "degrevlex", "dense"}
sage: y[3] + X(x[2])
x_2 + y_3
sage: _.parent().construction()
[InfPoly{[x,y], "degrevlex", "dense"}, Rational Field]
"""
# Merging is only done if the ranks of self and other are the same.
# It may happen that other is a substructure of self up to the monomial order
# and the implementation. And this is when we want to merge, in order to
# provide multiplication for rings with different term orderings.
if not isinstance(other, InfinitePolynomialFunctor):
return None
if set(other._gens).issubset(self._gens):
return self
return None
try:
OUT = self*other
# The following happens if "other" has the same order type etc.
if not isinstance(OUT, CompositeConstructionFunctor):
return OUT
except CoercionException:
pass
if isinstance(other,InfinitePolynomialFunctor):
# We don't require that the orders coincide. This is a difference to self*other
# We only merge if other's generators are an ordered subset of self's generators
for g in other._gens:
if g not in self._gens:
return None
# The sequence of variables is part of the ordering. It must coincide in both rings
Ind = [self._gens.index(g) for g in other._gens]
if sorted(Ind)!=Ind:
return None
# OK, other merges into self. Now, chose the default dense implementation,
# unless both functors refer to the sparse implementation
if self._imple != other._imple:
return InfinitePolynomialFunctor(self._gens, self._order, 'dense')
return self
return None
def expand(self):
"""
Decompose the functor `F` into sub-functors, whose product returns `F`.
EXAMPLES::
sage: F = InfinitePolynomialRing(QQ, ['x','y'],order='degrevlex').construction()[0]; F
InfPoly{[x,y], "degrevlex", "dense"}
sage: F.expand()
[InfPoly{[y], "degrevlex", "dense"}, InfPoly{[x], "degrevlex", "dense"}]
sage: F = InfinitePolynomialRing(QQ, ['x','y','z'],order='degrevlex').construction()[0]; F
InfPoly{[x,y,z], "degrevlex", "dense"}
sage: F.expand()
[InfPoly{[z], "degrevlex", "dense"},
InfPoly{[y], "degrevlex", "dense"},
InfPoly{[x], "degrevlex", "dense"}]
sage: prod(F.expand())==F
True
"""
if len(self._gens)==1:
return [self]
return [InfinitePolynomialFunctor((x,), self._order, self._imple) for x in reversed(self._gens)]
class MatrixFunctor(ConstructionFunctor):
"""
A construction functor for matrices over rings.
EXAMPLES::
sage: MS = MatrixSpace(ZZ,2, 3)
sage: F = MS.construction()[0]; F
MatrixFunctor
sage: MS = MatrixSpace(ZZ,2)
sage: F = MS.construction()[0]; F
MatrixFunctor
sage: P.<x,y> = QQ[]
sage: R = F(P); R
Full MatrixSpace of 2 by 2 dense matrices over Multivariate Polynomial Ring in x, y over Rational Field
sage: f = P.hom([x+y,x-y],P); F(f)
Ring endomorphism of Full MatrixSpace of 2 by 2 dense matrices over Multivariate Polynomial Ring in x, y over Rational Field
Defn: Induced from base ring by
Ring endomorphism of Multivariate Polynomial Ring in x, y over Rational Field
Defn: x |--> x + y
y |--> x - y
sage: M = R([x,y,x*y,x+y])
sage: F(f)(M)
[ x + y x - y]
[x^2 - y^2 2*x]
"""
rank = 10
def __init__(self, nrows, ncols, is_sparse=False):
"""
TEST::
sage: from sage.categories.pushout import MatrixFunctor
sage: F = MatrixFunctor(2,3)
sage: F == MatrixSpace(ZZ,2,3).construction()[0]
True
sage: F.codomain()
Category of commutative additive groups
sage: R = MatrixSpace(ZZ,2,2).construction()[0]
sage: R.codomain()
Category of rings
sage: F(ZZ)
Full MatrixSpace of 2 by 3 dense matrices over Integer Ring
sage: F(ZZ) in F.codomain()
True
sage: R(GF(2))
Full MatrixSpace of 2 by 2 dense matrices over Finite Field of size 2
sage: R(GF(2)) in R.codomain()
True
"""
if nrows == ncols:
Functor.__init__(self, Rings(), Rings()) # Algebras() takes a base ring
else:
# Functor.__init__(self, Rings(), MatrixAlgebras()) # takes a base ring
Functor.__init__(self, Rings(), CommutativeAdditiveGroups()) # not a nice solution, but the best we can do.
self.nrows = nrows
self.ncols = ncols
self.is_sparse = is_sparse
def _apply_functor(self, R):
"""
Apply the functor to an object of ``self``'s domain.
TEST:
The following is a test against a bug discussed at ticket #8800
sage: F = MatrixSpace(ZZ,2,3).construction()[0]
sage: F(RR) # indirect doctest
Full MatrixSpace of 2 by 3 dense matrices over Real Field with 53 bits of precision
sage: F(RR) in F.codomain()
True
"""
from sage.matrix.matrix_space import MatrixSpace
return MatrixSpace(R, self.nrows, self.ncols, sparse=self.is_sparse)
def __cmp__(self, other):
"""
TEST::
sage: F = MatrixSpace(ZZ,2,3).construction()[0]
sage: F == loads(dumps(F))
True
sage: F == MatrixSpace(ZZ,2,2).construction()[0]
False
"""
c = cmp(type(self), type(other))
if c == 0:
c = cmp((self.nrows, self.ncols), (other.nrows, other.ncols))
return c
def merge(self, other):
"""
Merging is only happening if both functors are matrix functors of the same dimension.
The result is sparse if and only if both given functors are sparse.
EXAMPLE::
sage: F1 = MatrixSpace(ZZ,2,2).construction()[0]
sage: F2 = MatrixSpace(ZZ,2,3).construction()[0]
sage: F3 = MatrixSpace(ZZ,2,2,sparse=True).construction()[0]
sage: F1.merge(F2)
sage: F1.merge(F3)
MatrixFunctor
sage: F13 = F1.merge(F3)
sage: F13.is_sparse
False
sage: F1.is_sparse
False
sage: F3.is_sparse
True
sage: F3.merge(F3).is_sparse
True
"""
if self != other:
return None
else:
return MatrixFunctor(self.nrows, self.ncols, self.is_sparse and other.is_sparse)
class LaurentPolynomialFunctor(ConstructionFunctor):
"""
Construction functor for Laurent polynomial rings.
EXAMPLES::
sage: L.<t> = LaurentPolynomialRing(ZZ)
sage: F = L.construction()[0]
sage: F
LaurentPolynomialFunctor
sage: F(QQ)
Univariate Laurent Polynomial Ring in t over Rational Field
sage: K.<x> = LaurentPolynomialRing(ZZ)
sage: F(K)
Univariate Laurent Polynomial Ring in t over Univariate Laurent Polynomial Ring in x over Integer Ring
sage: P.<x,y> = ZZ[]
sage: f = P.hom([x+2*y,3*x-y],P)
sage: F(f)
Ring endomorphism of Univariate Laurent Polynomial Ring in t over Multivariate Polynomial Ring in x, y over Integer Ring
Defn: Induced from base ring by
Ring endomorphism of Multivariate Polynomial Ring in x, y over Integer Ring
Defn: x |--> x + 2*y
y |--> 3*x - y
sage: F(f)(x*F(P).gen()^-2+y*F(P).gen()^3)
(x + 2*y)*t^-2 + (3*x - y)*t^3
"""
rank = 9
def __init__(self, var, multi_variate=False):
"""
INPUT:
- ``var``, a string or a list of strings
- ``multi_variate``, optional bool, default ``False`` if ``var`` is a string
and ``True`` otherwise: If ``True``, application to a Laurent polynomial
ring yields a multivariate Laurent polynomial ring.
TESTS::
sage: from sage.categories.pushout import LaurentPolynomialFunctor
sage: F1 = LaurentPolynomialFunctor('t')
sage: F2 = LaurentPolynomialFunctor('s', multi_variate=True)
sage: F3 = LaurentPolynomialFunctor(['s','t'])
sage: F1(F2(QQ))
Univariate Laurent Polynomial Ring in t over Univariate Laurent Polynomial Ring in s over Rational Field
sage: F2(F1(QQ))
Multivariate Laurent Polynomial Ring in t, s over Rational Field
sage: F3(QQ)
Multivariate Laurent Polynomial Ring in s, t over Rational Field
"""
Functor.__init__(self, Rings(), Rings())
if not isinstance(var, (six.string_types,tuple,list)):
raise TypeError("variable name or list of variable names expected")
self.var = var
self.multi_variate = multi_variate or not isinstance(var, six.string_types)
def _apply_functor(self, R):
"""
Apply the functor to an object of ``self``'s domain.
TESTS::
sage: from sage.categories.pushout import LaurentPolynomialFunctor
sage: F1 = LaurentPolynomialFunctor('t')
sage: F2 = LaurentPolynomialFunctor('s', multi_variate=True)
sage: F3 = LaurentPolynomialFunctor(['s','t'])
sage: F1(F2(QQ)) # indirect doctest
Univariate Laurent Polynomial Ring in t over Univariate Laurent Polynomial Ring in s over Rational Field
sage: F2(F1(QQ))
Multivariate Laurent Polynomial Ring in t, s over Rational Field
sage: F3(QQ)
Multivariate Laurent Polynomial Ring in s, t over Rational Field
"""
from sage.rings.polynomial.laurent_polynomial_ring import LaurentPolynomialRing, is_LaurentPolynomialRing
if self.multi_variate and is_LaurentPolynomialRing(R):
return LaurentPolynomialRing(R.base_ring(), (list(R.variable_names()) + [self.var]))
else:
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
return LaurentPolynomialRing(R, self.var)
def __cmp__(self, other):
"""
TESTS::
sage: from sage.categories.pushout import LaurentPolynomialFunctor
sage: F1 = LaurentPolynomialFunctor('t')
sage: F2 = LaurentPolynomialFunctor('t', multi_variate=True)
sage: F3 = LaurentPolynomialFunctor(['s','t'])
sage: F1 == F2
True
sage: F1 == loads(dumps(F1))
True
sage: F1 == F3
False
sage: F1 == QQ.construction()[0]
False
"""
c = cmp(type(self), type(other))
if c == 0:
c = cmp(self.var, other.var)
return c
def merge(self, other):
"""
Two Laurent polynomial construction functors merge if the variable names coincide.
The result is multivariate if one of the arguments is multivariate.
EXAMPLE::
sage: from sage.categories.pushout import LaurentPolynomialFunctor
sage: F1 = LaurentPolynomialFunctor('t')
sage: F2 = LaurentPolynomialFunctor('t', multi_variate=True)
sage: F1.merge(F2)
LaurentPolynomialFunctor
sage: F1.merge(F2)(LaurentPolynomialRing(GF(2),'a'))
Multivariate Laurent Polynomial Ring in a, t over Finite Field of size 2
sage: F1.merge(F1)(LaurentPolynomialRing(GF(2),'a'))
Univariate Laurent Polynomial Ring in t over Univariate Laurent Polynomial Ring in a over Finite Field of size 2
"""
if self == other or isinstance(other, PolynomialFunctor) and self.var == other.var:
return LaurentPolynomialFunctor(self.var, (self.multi_variate or other.multi_variate))
else:
return None
class VectorFunctor(ConstructionFunctor):
"""
A construction functor for free modules over commutative rings.
EXAMPLE::
sage: F = (ZZ^3).construction()[0]
sage: F
VectorFunctor
sage: F(GF(2)['t'])
Ambient free module of rank 3 over the principal ideal domain Univariate Polynomial Ring in t over Finite Field of size 2 (using NTL)
"""
rank = 10 # ranking of functor, not rank of module.
# This coincides with the rank of the matrix construction functor, but this is OK since they can not both be applied in any order
def __init__(self, n, is_sparse=False, inner_product_matrix=None):
"""
INPUT:
- ``n``, the rank of the to-be-created modules (non-negative integer)
- ``is_sparse`` (optional bool, default ``False``), create sparse implementation of modules
- ``inner_product_matrix``: ``n`` by ``n`` matrix, used to compute inner products in the
to-be-created modules
TEST::
sage: from sage.categories.pushout import VectorFunctor
sage: F1 = VectorFunctor(3, inner_product_matrix = Matrix(3,3,range(9)))
sage: F1.domain()
Category of commutative rings
sage: F1.codomain()
Category of commutative additive groups
sage: M1 = F1(ZZ)
sage: M1.is_sparse()
False
sage: v = M1([3, 2, 1])
sage: v*Matrix(3,3,range(9))*v.column()
(96)
sage: v.inner_product(v)
96
sage: F2 = VectorFunctor(3, is_sparse=True)
sage: M2 = F2(QQ); M2; M2.is_sparse()
Sparse vector space of dimension 3 over Rational Field
True
"""
# Functor.__init__(self, Rings(), FreeModules()) # FreeModules() takes a base ring
# Functor.__init__(self, Objects(), Objects()) # Object() makes no sence, since FreeModule raises an error, e.g., on Set(['a',1]).
## FreeModule requires a commutative ring. Thus, we have
Functor.__init__(self, CommutativeRings(), CommutativeAdditiveGroups())
self.n = n
self.is_sparse = is_sparse
self.inner_product_matrix = inner_product_matrix
def _apply_functor(self, R):
"""
Apply the functor to an object of ``self``'s domain.
TESTS::
sage: from sage.categories.pushout import VectorFunctor
sage: F1 = VectorFunctor(3, inner_product_matrix = Matrix(3,3,range(9)))
sage: M1 = F1(ZZ) # indirect doctest
sage: M1.is_sparse()
False
sage: v = M1([3, 2, 1])
sage: v*Matrix(3,3,range(9))*v.column()
(96)
sage: v.inner_product(v)
96
sage: F2 = VectorFunctor(3, is_sparse=True)
sage: M2 = F2(QQ); M2; M2.is_sparse()
Sparse vector space of dimension 3 over Rational Field
True
sage: v = M2([3, 2, 1])
sage: v.inner_product(v)
14
"""
from sage.modules.free_module import FreeModule
return FreeModule(R, self.n, sparse=self.is_sparse, inner_product_matrix=self.inner_product_matrix)
def _apply_functor_to_morphism(self, f):
"""
This is not implemented yet.
TEST::
sage: F = (ZZ^3).construction()[0]
sage: P.<x,y> = ZZ[]
sage: f = P.hom([x+2*y,3*x-y],P)
sage: F(f) # indirect doctest
Traceback (most recent call last):
...
NotImplementedError: Can not create induced morphisms of free modules yet
"""
## TODO: Implement this!
raise NotImplementedError("Can not create induced morphisms of free modules yet")
def __cmp__(self, other):
"""
Only the rank of the to-be-created modules is compared, *not* the inner product matrix.
TESTS::
sage: from sage.categories.pushout import VectorFunctor
sage: F1 = VectorFunctor(3, inner_product_matrix = Matrix(3,3,range(9)))
sage: F2 = (ZZ^3).construction()[0]
sage: F1 == F2
True
sage: F1(QQ) == F2(QQ)
True
sage: F1(QQ).inner_product_matrix() == F2(QQ).inner_product_matrix()
False
sage: F1 == loads(dumps(F1))
True
"""
c = cmp(type(self), type(other))
if c == 0:
c = cmp(self.n, other.n)
return c
def merge(self, other):
"""
Two constructors of free modules merge, if the module ranks coincide. If both
have explicitly given inner product matrices, they must coincide as well.
EXAMPLE:
Two modules without explicitly given inner product allow coercion::
sage: M1 = QQ^3
sage: P.<t> = ZZ[]
sage: M2 = FreeModule(P,3)
sage: M1([1,1/2,1/3]) + M2([t,t^2+t,3]) # indirect doctest
(t + 1, t^2 + t + 1/2, 10/3)
If only one summand has an explicit inner product, the result will be provided
with it::
sage: M3 = FreeModule(P,3, inner_product_matrix = Matrix(3,3,range(9)))
sage: M1([1,1/2,1/3]) + M3([t,t^2+t,3])
(t + 1, t^2 + t + 1/2, 10/3)
sage: (M1([1,1/2,1/3]) + M3([t,t^2+t,3])).parent().inner_product_matrix()
[0 1 2]
[3 4 5]
[6 7 8]
If both summands have an explicit inner product (even if it is the standard
inner product), then the products must coincide. The only difference between
``M1`` and ``M4`` in the following example is the fact that the default
inner product was *explicitly* requested for ``M4``. It is therefore not
possible to coerce with a different inner product::
sage: M4 = FreeModule(QQ,3, inner_product_matrix = Matrix(3,3,1))
sage: M4 == M1
True
sage: M4.inner_product_matrix() == M1.inner_product_matrix()
True
sage: M4([1,1/2,1/3]) + M3([t,t^2+t,3]) # indirect doctest
Traceback (most recent call last):
...
TypeError: unsupported operand parent(s) for '+': 'Ambient quadratic space of dimension 3 over Rational Field
Inner product matrix:
[1 0 0]
[0 1 0]
[0 0 1]' and 'Ambient free quadratic module of rank 3 over the integral domain Univariate Polynomial Ring in t over Integer Ring
Inner product matrix:
[0 1 2]
[3 4 5]
[6 7 8]'
"""
if self != other:
return None
if self.inner_product_matrix is None:
return VectorFunctor(self.n, self.is_sparse and other.is_sparse, other.inner_product_matrix)
if other.inner_product_matrix is None:
return VectorFunctor(self.n, self.is_sparse and other.is_sparse, self.inner_product_matrix)
# At this point, we know that the user wants to take care of the inner product.
# So, we only merge if both coincide:
if self.inner_product_matrix != other.inner_product_matrix:
return None
else:
return VectorFunctor(self.n, self.is_sparse and other.is_sparse, self.inner_product_matrix)
class SubspaceFunctor(ConstructionFunctor):
"""
Constructing a subspace of an ambient free module, given by a basis.
NOTE:
This construction functor keeps track of the basis. It can only be applied
to free modules into which this basis coerces.
EXAMPLES::
sage: M = ZZ^3
sage: S = M.submodule([(1,2,3),(4,5,6)]); S
Free module of degree 3 and rank 2 over Integer Ring
Echelon basis matrix:
[1 2 3]
[0 3 6]
sage: F = S.construction()[0]
sage: F(GF(2)^3)
Vector space of degree 3 and dimension 2 over Finite Field of size 2
User basis matrix:
[1 0 1]
[0 1 0]
"""
rank = 11 # ranking of functor, not rank of module
# The subspace construction returns an object admitting a coercion
# map into the original, not vice versa.
coercion_reversed = True
def __init__(self, basis):
"""
INPUT:
``basis``: a list of elements of a free module.
TEST::
sage: from sage.categories.pushout import SubspaceFunctor
sage: M = ZZ^3
sage: F = SubspaceFunctor([M([1,2,3]),M([4,5,6])])
sage: F(GF(5)^3)
Vector space of degree 3 and dimension 2 over Finite Field of size 5
User basis matrix:
[1 2 3]
[4 0 1]
"""
## Functor.__init__(self, FreeModules(), FreeModules()) # takes a base ring
## Functor.__init__(self, Objects(), Objects()) # is too general
## It seems that the category of commutative additive groups
## currently is the smallest base ring free category that
## contains in- and output
Functor.__init__(self, CommutativeAdditiveGroups(), CommutativeAdditiveGroups())
self.basis = basis
def _apply_functor(self, ambient):
"""
Apply the functor to an object of ``self``'s domain.
TESTS::
sage: M = ZZ^3
sage: S = M.submodule([(1,2,3),(4,5,6)]); S
Free module of degree 3 and rank 2 over Integer Ring
Echelon basis matrix:
[1 2 3]
[0 3 6]
sage: F = S.construction()[0]
sage: F(GF(2)^3) # indirect doctest
Vector space of degree 3 and dimension 2 over Finite Field of size 2
User basis matrix:
[1 0 1]
[0 1 0]
"""
return ambient.span_of_basis(self.basis)
def _apply_functor_to_morphism(self, f):
"""
This is not implemented yet.
TEST::
sage: F = (ZZ^3).span([(1,2,3),(4,5,6)]).construction()[0]
sage: P.<x,y> = ZZ[]
sage: f = P.hom([x+2*y,3*x-y],P)
sage: F(f) # indirect doctest
Traceback (most recent call last):
...
NotImplementedError: Can not create morphisms of free sub-modules yet
"""
raise NotImplementedError("Can not create morphisms of free sub-modules yet")
def __cmp__(self, other):
"""
TEST::
sage: F1 = (GF(5)^3).span([(1,2,3),(4,5,6)]).construction()[0]
sage: F2 = (ZZ^3).span([(1,2,3),(4,5,6)]).construction()[0]
sage: F3 = (QQ^3).span([(1,2,3),(4,5,6)]).construction()[0]
sage: F4 = (ZZ^3).span([(1,0,-1),(0,1,2)]).construction()[0]
sage: F1 == loads(dumps(F1))
True
The ``span`` method automatically transforms the given basis into
echelon form. The bases look like that::
sage: F1.basis
[
(1, 0, 4),
(0, 1, 2)
]
sage: F2.basis
[
(1, 2, 3),
(0, 3, 6)
]
sage: F3.basis
[
(1, 0, -1),
(0, 1, 2)
]
sage: F4.basis
[
(1, 0, -1),
(0, 1, 2)
]
The basis of ``F2`` is modulo 5 different from the other bases.
So, we have::
sage: F1 != F2 != F3
True
The bases of ``F1``, ``F3`` and ``F4`` are the same modulo 5; however,
there is no coercion from ``QQ^3`` to ``GF(5)^3``. Therefore, we have::
sage: F1 == F3
False
But there are coercions from ``ZZ^3`` to ``QQ^3`` and ``GF(5)^3``, thus::
sage: F1 == F4 == F3
True
"""
c = cmp(type(self), type(other))
if c == 0:
# since comparing the basis involves constructing the pushout
# of the ambient module, we can not do:
#c = cmp(self.basis, other.basis)
# Instead, we only test whether there are coercions.
L = self.basis.universe()
R = other.basis.universe()
c = cmp(L,R)
if L.has_coerce_map_from(R):
c = cmp(tuple(self.basis),tuple(L(x) for x in other.basis))
elif R.has_coerce_map_from(L):
c = cmp(tuple(other.basis),tuple(R(x) for x in self.basis))
return c
def merge(self, other):
"""
Two Subspace Functors are merged into a construction functor of the sum of two subspaces.
EXAMPLE::
sage: M = GF(5)^3
sage: S1 = M.submodule([(1,2,3),(4,5,6)])
sage: S2 = M.submodule([(2,2,3)])
sage: F1 = S1.construction()[0]
sage: F2 = S2.construction()[0]
sage: F1.merge(F2)
SubspaceFunctor
sage: F1.merge(F2)(GF(5)^3) == S1+S2
True
sage: F1.merge(F2)(GF(5)['t']^3)
Free module of degree 3 and rank 3 over Univariate Polynomial Ring in t over Finite Field of size 5
User basis matrix:
[1 0 0]
[0 1 0]
[0 0 1]
TEST::
sage: P.<t> = ZZ[]
sage: S1 = (ZZ^3).submodule([(1,2,3),(4,5,6)])
sage: S2 = (Frac(P)^3).submodule([(t,t^2,t^3+1),(4*t,0,1)])
sage: v = S1([0,3,6]) + S2([2,0,1/(2*t)]); v # indirect doctest
(2, 3, (-12*t - 1)/(-2*t))
sage: v.parent()
Vector space of degree 3 and dimension 3 over Fraction Field of Univariate Polynomial Ring in t over Integer Ring
User basis matrix:
[1 0 0]
[0 1 0]
[0 0 1]
"""
if isinstance(other, SubspaceFunctor):
# in order to remove linear dependencies, and in
# order to test compatibility of the base rings,
# we try to construct a sample submodule
if not other.basis:
return self
if not self.basis:
return other
try:
P = pushout(self.basis[0].parent().ambient_module(),other.basis[0].parent().ambient_module())
except CoercionException:
return None
try:
# Use span instead of submodule because we want to
# allow denominators.
submodule = P.span
except AttributeError:
return None
S = submodule(self.basis+other.basis).echelonized_basis()
return SubspaceFunctor(S)
else:
return None
class FractionField(ConstructionFunctor):
"""
Construction functor for fraction fields.
EXAMPLE::
sage: F = QQ.construction()[0]
sage: F
FractionField
sage: F.domain()
Category of integral domains
sage: F.codomain()
Category of fields
sage: F(GF(5)) is GF(5)
True
sage: F(ZZ['t'])
Fraction Field of Univariate Polynomial Ring in t over Integer Ring
sage: P.<x,y> = QQ[]
sage: f = P.hom([x+2*y,3*x-y],P)
sage: F(f)
Ring endomorphism of Fraction Field of Multivariate Polynomial Ring in x, y over Rational Field
Defn: x |--> x + 2*y
y |--> 3*x - y
sage: F(f)(1/x)
1/(x + 2*y)
sage: F == loads(dumps(F))
True
"""
rank = 5
def __init__(self):
"""
TEST::
sage: from sage.categories.pushout import FractionField
sage: F = FractionField()
sage: F
FractionField
sage: F(ZZ['t'])
Fraction Field of Univariate Polynomial Ring in t over Integer Ring
"""
Functor.__init__(self, IntegralDomains(), Fields())
def _apply_functor(self, R):
"""
Apply the functor to an object of ``self``'s domain.
TEST::
sage: F = QQ.construction()[0]
sage: F(GF(5)['t']) # indirect doctest
Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 5
"""
return R.fraction_field()
# This isn't used anywhere in Sage, and so I remove it (Simon King, 2010-05)
#
#class LocalizationFunctor(ConstructionFunctor):
#
# rank = 6
#
# def __init__(self, t):
# Functor.__init__(self, Rings(), Rings())
# self.t = t
# def _apply_functor(self, R):
# return R.localize(t)
# def __cmp__(self, other):
# c = cmp(type(self), type(other))
# if c == 0:
# c = cmp(self.t, other.t)
# return c
class CompletionFunctor(ConstructionFunctor):
"""
Completion of a ring with respect to a given prime (including infinity).
EXAMPLES::
sage: R = Zp(5)
sage: R
5-adic Ring with capped relative precision 20
sage: F1 = R.construction()[0]
sage: F1
Completion[5]
sage: F1(ZZ) is R
True
sage: F1(QQ)
5-adic Field with capped relative precision 20
sage: F2 = RR.construction()[0]
sage: F2
Completion[+Infinity]
sage: F2(QQ) is RR
True
sage: P.<x> = ZZ[]
sage: Px = P.completion(x) # currently the only implemented completion of P
sage: Px
Power Series Ring in x over Integer Ring
sage: F3 = Px.construction()[0]
sage: F3(GF(3)['x'])
Power Series Ring in x over Finite Field of size 3
TEST::
sage: R1.<a> = Zp(5,prec=20)[]
sage: R2 = Qp(5,prec=40)
sage: R2(1) + a
(1 + O(5^20))*a + (1 + O(5^40))
sage: 1/2 + a
(1 + O(5^20))*a + (3 + 2*5 + 2*5^2 + 2*5^3 + 2*5^4 + 2*5^5 + 2*5^6 + 2*5^7 + 2*5^8 + 2*5^9 + 2*5^10 + 2*5^11 + 2*5^12 + 2*5^13 + 2*5^14 + 2*5^15 + 2*5^16 + 2*5^17 + 2*5^18 + 2*5^19 + O(5^20))
"""
rank = 4
def __init__(self, p, prec, extras=None):
"""
INPUT:
- ``p``: A prime number, the generator of a univariate polynomial ring, or ``+Infinity``
- ``prec``: an integer, yielding the precision in bits. Note that
if ``p`` is prime then the ``prec`` is the *capped* precision,
while it is the *set* precision if ``p`` is ``+Infinity``.
- ``extras`` (optional dictionary): Information on how to print elements, etc.
If 'type' is given as a key, the corresponding value should be a string among the following:
- 'RDF', 'Interval', 'RLF', or 'RR' for completions at infinity
- 'capped-rel', 'capped-abs', 'fixed-mod' or 'lazy' for completions at a finite place
or ideal of a DVR.
TESTS::
sage: from sage.categories.pushout import CompletionFunctor
sage: F1 = CompletionFunctor(5,100)
sage: F1(QQ)
5-adic Field with capped relative precision 100
sage: F1(ZZ)
5-adic Ring with capped relative precision 100
sage: F2 = RR.construction()[0]
sage: F2
Completion[+Infinity]
sage: F2.extras
{'rnd': 'RNDN', 'sci_not': False, 'type': 'MPFR'}
"""
Functor.__init__(self, Rings(), Rings())
self.p = p
self.prec = prec
if extras is None:
self.extras = {}
self.type = None
else:
self.extras = dict(extras)
self.type = extras.get('type', None)
from sage.rings.infinity import Infinity
if self.p == Infinity:
if self.type not in self._real_types:
raise ValueError("completion type must be one of %s"%(", ".join(self._real_types)))
else:
if self.type not in self._dvr_types:
raise ValueError("completion type must be one of %s"%(", ".join(self._dvr_types)))
def __str__(self):
"""
TEST::
sage: Zp(7).construction() # indirect doctest
(Completion[7], Integer Ring)
"""
return 'Completion[%s]'%repr(self.p)
def _apply_functor(self, R):
"""
Apply the functor to an object of ``self``'s domain.
TEST::
sage: R = Zp(5)
sage: F1 = R.construction()[0]
sage: F1(ZZ) is R # indirect doctest
True
sage: F1(QQ)
5-adic Field with capped relative precision 20
"""
try:
if len(self.extras) == 0:
if self.type is None:
try:
return R.completion(self.p, self.prec)
except TypeError:
return R.completion(self.p, self.prec, {})
else:
return R.completion(self.p, self.prec, {'type':self.type})
else:
extras = self.extras.copy()
extras['type'] = self.type
return R.completion(self.p, self.prec, extras)
except (NotImplementedError,AttributeError):
if R.construction() is None:
raise NotImplementedError("Completion is not implemented for %s"%R.__class__)
F, BR = R.construction()
M = self.merge(F) or F.merge(self)
if M is not None:
return M(BR)
if self.commutes(F) or F.commutes(self):
return F(self(BR))
raise NotImplementedError("Don't know how to apply %s to %s"%(repr(self),repr(R)))
def __cmp__(self, other):
"""
NOTE:
Only the prime used in the completion is relevant to comparison
of Completion functors, although the resulting rings also take
the precision into account.
TEST::
sage: R1 = Zp(5,prec=30)
sage: R2 = Zp(5,prec=40)
sage: F1 = R1.construction()[0]
sage: F2 = R2.construction()[0]
sage: F1 == loads(dumps(F1)) # indirect doctest
True
sage: F1==F2
True
sage: F1(QQ)==F2(QQ)
False
sage: R3 = Zp(7)
sage: F3 = R3.construction()[0]
sage: F1==F3
False
"""
c = cmp(type(self), type(other))
if c == 0:
c = cmp(self.p, other.p)
return c
_real_types = ['Interval','Ball','MPFR','RDF','RLF']
_dvr_types = [None, 'fixed-mod','capped-abs','capped-rel','lazy']
def merge(self, other):
"""
Two Completion functors are merged, if they are equal. If the precisions of
both functors coincide, then a Completion functor is returned that results
from updating the ``extras`` dictionary of ``self`` by ``other.extras``.
Otherwise, if the completion is at infinity then merging does not increase
the set precision, and if the completion is at a finite prime, merging
does not decrease the capped precision.
EXAMPLE::
sage: R1.<a> = Zp(5,prec=20)[]
sage: R2 = Qp(5,prec=40)
sage: R2(1)+a # indirect doctest
(1 + O(5^20))*a + (1 + O(5^40))
sage: R3 = RealField(30)
sage: R4 = RealField(50)
sage: R3(1) + R4(1) # indirect doctest
2.0000000
sage: (R3(1) + R4(1)).parent()
Real Field with 30 bits of precision
TESTS:
We check that #12353 has been resolved::
sage: RealIntervalField(53)(-1) > RR(1)
False
sage: RealIntervalField(54)(-1) > RR(1)
False
sage: RealIntervalField(54)(1) > RR(-1)
True
sage: RealIntervalField(53)(1) > RR(-1)
True
We check that various pushouts work::
sage: R0 = RealIntervalField(30)
sage: R1 = RealIntervalField(30, sci_not=True)
sage: R2 = RealIntervalField(53)
sage: R3 = RealIntervalField(53, sci_not = True)
sage: R4 = RealIntervalField(90)
sage: R5 = RealIntervalField(90, sci_not = True)
sage: R6 = RealField(30)
sage: R7 = RealField(30, sci_not=True)
sage: R8 = RealField(53, rnd = 'RNDD')
sage: R9 = RealField(53, sci_not = True, rnd = 'RNDZ')
sage: R10 = RealField(53, sci_not = True)
sage: R11 = RealField(90, sci_not = True, rnd = 'RNDZ')
sage: Rlist = [R0,R1,R2,R3,R4,R5,R6,R7,R8,R9,R10,R11]
sage: from sage.categories.pushout import pushout
sage: pushouts = [R0,R0,R0,R1,R0,R1,R0,R1,R0,R1,R1,R1,R1,R1,R1,R1,R1,R1,R1,R1,R1,R1,R1,R1,R0,R1,R2,R2,R2,R3,R0,R1,R2,R3,R3,R3,R1,R1,R3,R3,R3,R3,R1,R1,R3,R3,R3,R3,R0,R1,R2,R3,R4,R4,R0,R1,R2,R3,R3,R5,R1,R1,R3,R3,R5,R5,R1,R1,R3,R3,R3,R5,R0,R1,R0,R1,R0,R1,R6,R6,R6,R7,R7,R7,R1,R1,R1,R1,R1,R1,R7,R7,R7,R7,R7,R7,R0,R1,R2,R3,R2,R3,R6,R7,R8,R9,R10,R9,R1,R1,R3,R3,R3,R3,R7,R7,R9,R9,R10,R9,R1,R1,R3,R3,R3,R3,R7,R7,R10,R10,R10,R10,R1,R1,R3,R3,R5,R5,R7,R7,R9,R9,R10,R11]
sage: all([R is S for R, S in zip(pushouts, [pushout(a, b) for a in Rlist for b in Rlist])])
True
::
sage: P0 = ZpFM(5, 10)
sage: P1 = ZpFM(5, 20)
sage: P2 = ZpCR(5, 10)
sage: P3 = ZpCR(5, 20)
sage: P4 = ZpCA(5, 10)
sage: P5 = ZpCA(5, 20)
sage: P6 = Qp(5, 10)
sage: P7 = Qp(5, 20)
sage: Plist = [P2,P3,P4,P5,P6,P7]
sage: from sage.categories.pushout import pushout
sage: pushouts = [P2,P3,P4,P5,P6,P7,P3,P3,P5,P5,P7,P7,P4,P5,P4,P5,P6,P7,P5,P5,P5,P5,P7,P7,P6,P7,P6,P7,P6,P7,P7,P7,P7,P7,P7,P7]
sage: all([P is Q for P, Q in zip(pushouts, [pushout(a, b) for a in Plist for b in Plist])])
True
"""
if self == other: # both are Completion functors with the same p
from sage.all import Infinity
if self.p == Infinity:
new_prec = min(self.prec, other.prec)
new_type = self._real_types[min(self._real_types.index(self.type), \
self._real_types.index(other.type))]
new_scinot = max(self.extras.get('sci_not',0), other.extras.get('sci_not',0))
from sage.rings.real_mpfr import _rounding_modes
new_rnd = _rounding_modes[min(_rounding_modes.index(self.extras.get('rnd', 'RNDN')), \
_rounding_modes.index(other.extras.get('rnd', 'RNDN')))]
return CompletionFunctor(self.p, new_prec, {'type': new_type, 'sci_not':new_scinot, 'rnd':new_rnd})
else:
new_type = self._dvr_types[min(self._dvr_types.index(self.type), self._dvr_types.index(other.type))]
if new_type == 'fixed-mod':
if self.type != 'fixed-mod' or other.type != 'fixed-mod':
return None # no coercion into fixed-mod
new_prec = min(self.prec, other.prec)
else:
new_prec = max(self.prec, other.prec) # since elements track their own precision, we don't want to truncate them
extras = self.extras.copy()
extras.update(other.extras)
extras['type'] = new_type
return CompletionFunctor(self.p, new_prec, extras)
## Completion has a lower rank than FractionField
## and is thus applied first. However, fact is that
## both commute. This is used in the call method,
## since some fraction fields have no completion method
## implemented.
def commutes(self,other):
"""
Completion commutes with fraction fields.
EXAMPLE::
sage: F1 = Qp(5).construction()[0]
sage: F2 = QQ.construction()[0]
sage: F1.commutes(F2)
True
TEST:
The fraction field ``R`` in the example below has no completion
method. But completion commutes with the fraction field functor,
and so it is tried internally whether applying the construction
functors in opposite order works. It does::
sage: P.<x> = ZZ[]
sage: C = P.completion(x).construction()[0]
sage: R = FractionField(P)
sage: hasattr(R,'completion')
False
sage: C(R) is Frac(C(P))
True
sage: F = R.construction()[0]
sage: (C*F)(ZZ['x']) is (F*C)(ZZ['x'])
True
The following was fixed in :trac:`15329` (it used to result
in an infinite recursion)::
sage: from sage.categories.pushout import pushout
sage: pushout(Qp(7),RLF)
Traceback (most recent call last):
...
CoercionException: ('Ambiguous Base Extension', 7-adic Field with capped relative precision 20, Real Lazy Field)
"""
return isinstance(other,FractionField)
class QuotientFunctor(ConstructionFunctor):
"""
Construction functor for quotient rings.
NOTE:
The functor keeps track of variable names.
EXAMPLE::
sage: P.<x,y> = ZZ[]
sage: Q = P.quo([x^2+y^2]*P)
sage: F = Q.construction()[0]
sage: F(QQ['x','y'])
Quotient of Multivariate Polynomial Ring in x, y over Rational Field by the ideal (x^2 + y^2)
sage: F(QQ['x','y']) == QQ['x','y'].quo([x^2+y^2]*QQ['x','y'])
True
sage: F(QQ['x','y','z'])
Traceback (most recent call last):
...
CoercionException: Can not apply this quotient functor to Multivariate Polynomial Ring in x, y, z over Rational Field
sage: F(QQ['y','z'])
Traceback (most recent call last):
...
TypeError: Could not find a mapping of the passed element to this ring.
"""
rank = 4.5
def __init__(self, I, names=None, as_field=False):
"""
INPUT:
- ``I``, an ideal (the modulus)
- ``names`` (optional string or list of strings), the names for the quotient ring generators
- ``as_field`` (optional bool, default false), return the quotient ring as field (if available).
TESTS::
sage: from sage.categories.pushout import QuotientFunctor
sage: P.<t> = ZZ[]
sage: F = QuotientFunctor([5+t^2]*P)
sage: F(P)
Univariate Quotient Polynomial Ring in tbar over Integer Ring with modulus t^2 + 5
sage: F(QQ['t'])
Univariate Quotient Polynomial Ring in tbar over Rational Field with modulus t^2 + 5
sage: F = QuotientFunctor([5+t^2]*P,names='s')
sage: F(P)
Univariate Quotient Polynomial Ring in s over Integer Ring with modulus t^2 + 5
sage: F(QQ['t'])
Univariate Quotient Polynomial Ring in s over Rational Field with modulus t^2 + 5
sage: F = QuotientFunctor([5]*ZZ,as_field=True)
sage: F(ZZ)
Finite Field of size 5
sage: F = QuotientFunctor([5]*ZZ)
sage: F(ZZ)
Ring of integers modulo 5
"""
Functor.__init__(self, Rings(), Rings()) # much more general...
self.I = I
if names is None:
self.names = None
elif isinstance(names, six.string_types):
self.names = (names,)
else:
self.names = tuple(names)
self.as_field = as_field
def _apply_functor(self, R):
"""
Apply the functor to an object of ``self``'s domain.
TESTS::
sage: P.<x,y> = ZZ[]
sage: Q = P.quo([2+x^2,3*x+y^2])
sage: F = Q.construction()[0]; F
QuotientFunctor
sage: F(QQ['x','y']) # indirect doctest
Quotient of Multivariate Polynomial Ring in x, y over Rational Field by the ideal (x^2 + 2, y^2 + 3*x)
Note that the ``quo()`` method of a field used to return the
integer zero. That strange behaviour was removed in trac
ticket :trac:`9138`. It now returns a trivial quotient ring
when applied to a field::
sage: F = ZZ.quo([5]*ZZ).construction()[0]
sage: F(QQ)
Ring of integers modulo 1
sage: QQ.quo(5)
Quotient of Rational Field by the ideal (1)
"""
I = self.I
from sage.all import QQ
if not I.is_zero():
from sage.categories.fields import Fields
if R in Fields():
from sage.all import Integers
return Integers(1)
if I.ring() != R:
if I.ring().has_coerce_map_from(R):
R = I.ring()
else:
R = pushout(R,I.ring().base_ring())
I = [R(1)*t for t in I.gens()]*R
try:
Q = R.quo(I,names=self.names)
except IndexError: # That may happen!
raise CoercionException("Can not apply this quotient functor to %s"%R)
if self.as_field:# and hasattr(Q, 'field'):
try:
Q = Q.field()
except AttributeError:
pass
return Q
def __cmp__(self, other):
"""
The types, the names and the moduli are compared.
TESTS::
sage: P.<x> = QQ[]
sage: F = P.quo([(x^2+1)^2*(x^2-3),(x^2+1)^2*(x^5+3)]).construction()[0]
sage: F == loads(dumps(F))
True
sage: P2.<x,y> = QQ[]
sage: F == P2.quo([(x^2+1)^2*(x^2-3),(x^2+1)^2*(x^5+3)]).construction()[0]
False
sage: P3.<x> = ZZ[]
sage: F == P3.quo([(x^2+1)^2*(x^2-3),(x^2+1)^2*(x^5+3)]).construction()[0]
True
"""
c = cmp(type(self), type(other))
if c == 0:
c = cmp(self.names, other.names)
if c == 0:
c = cmp(self.I, other.I)
return c
def merge(self, other):
"""
Two quotient functors with coinciding names are merged by taking the gcd of their moduli.
EXAMPLE::
sage: P.<x> = QQ[]
sage: Q1 = P.quo([(x^2+1)^2*(x^2-3)])
sage: Q2 = P.quo([(x^2+1)^2*(x^5+3)])
sage: from sage.categories.pushout import pushout
sage: pushout(Q1,Q2) # indirect doctest
Univariate Quotient Polynomial Ring in xbar over Rational Field with modulus x^4 + 2*x^2 + 1
The following was fixed in trac ticket #8800::
sage: pushout(GF(5), Integers(5))
Finite Field of size 5
"""
if not isinstance(self, type(other)):
return None
if self.names != other.names:
return None
if self == other:
if self.as_field == other.as_field:
return self
return QuotientFunctor(self.I, names=self.names, as_field=True) # one of them yields a field!
try:
gcd = self.I + other.I
except (TypeError, NotImplementedError):
try:
gcd = self.I.gcd(other.I)
except (TypeError, NotImplementedError):
return None
if gcd.is_trivial() and not gcd.is_zero():
# quotient by gcd would result in the trivial ring/group/...
# Rather than create the zero ring, we claim they can't be merged
# TODO: Perhaps this should be detected at a higher level...
raise TypeError("Trivial quotient intersection.")
# GF(p) has a coercion from Integers(p). Hence, merging should
# yield a field if either self or other yields a field.
return QuotientFunctor(gcd, names=self.names, as_field=self.as_field or other.as_field)
class AlgebraicExtensionFunctor(ConstructionFunctor):
"""
Algebraic extension (univariate polynomial ring modulo principal ideal).
EXAMPLE::
sage: K.<a> = NumberField(x^3+x^2+1)
sage: F = K.construction()[0]
sage: F(ZZ['t'])
Univariate Quotient Polynomial Ring in a over Univariate Polynomial Ring in t over Integer Ring with modulus a^3 + a^2 + 1
Note that, even if a field is algebraically closed, the algebraic
extension will be constructed as the quotient of a univariate
polynomial ring::
sage: F(CC)
Univariate Quotient Polynomial Ring in a over Complex Field with 53 bits of precision with modulus a^3 + a^2 + 1.00000000000000
sage: F(RR)
Univariate Quotient Polynomial Ring in a over Real Field with 53 bits of precision with modulus a^3 + a^2 + 1.00000000000000
Note that the construction functor of a number field applied to
the integers returns an order (not necessarily maximal) of that
field, similar to the behaviour of ``ZZ.extension(...)``::
sage: F(ZZ)
Order in Number Field in a with defining polynomial x^3 + x^2 + 1
This also holds for non-absolute number fields::
sage: K.<a,b> = NumberField([x^3+x^2+1,x^2+x+1])
sage: F = K.construction()[0]
sage: O = F(ZZ); O
Relative Order in Number Field in a with defining polynomial x^3 + x^2 + 1 over its base field
Unfortunately, the relative number field is not a unique parent::
sage: O.ambient() is K
False
sage: O.ambient() == K
True
"""
rank = 3
def __init__(self, polys, names, embeddings, cyclotomic=None, **kwds):
"""
INPUT:
- ``polys``: a list of polynomials (or of integers, for
finite fields and unramified local extensions)
- ``names``: a list of strings of the same length as the
list ``polys``
- ``embeddings``: a list of approximate complex values,
determining an embedding of the generators into the
complex field, or ``None`` for each generator whose
embedding is not prescribed.
- ``cyclotomic``: optional integer. If it is provided,
application of the functor to the rational field yields
a cyclotomic field, rather than just a number field.
- ``**kwds``: further keywords; when the functor is applied to
a ring `R`, these are passed to the ``extension()`` method
of `R`.
REMARK:
Currently, an embedding can only be provided for the last
generator, and only when the construction functor is applied
to the rational field. There will be no error when constructing
the functor, but when applying it.
TESTS::
sage: from sage.categories.pushout import AlgebraicExtensionFunctor
sage: P.<x> = ZZ[]
sage: F1 = AlgebraicExtensionFunctor([x^3 - x^2 + 1], ['a'], [None])
sage: F2 = AlgebraicExtensionFunctor([x^3 - x^2 + 1], ['a'], [0])
sage: F1==F2
False
sage: F1(QQ)
Number Field in a with defining polynomial x^3 - x^2 + 1
sage: F1(QQ).coerce_embedding()
sage: phi = F2(QQ).coerce_embedding().__copy__(); phi
Generic morphism:
From: Number Field in a with defining polynomial x^3 - x^2 + 1
To: Real Lazy Field
Defn: a -> -0.7548776662466928?
sage: F1(QQ)==F2(QQ)
False
sage: F1(GF(5))
Univariate Quotient Polynomial Ring in a over Finite Field of size 5 with modulus a^3 + 4*a^2 + 1
sage: F2(GF(5))
Traceback (most recent call last):
...
NotImplementedError: ring extension with prescripted embedding is not implemented
When applying a number field constructor to the ring of
integers, an order (not necessarily maximal) of that field is
returned, similar to the behaviour of ``ZZ.extension``::
sage: F1(ZZ)
Order in Number Field in a with defining polynomial x^3 - x^2 + 1
The cyclotomic fields form a special case of number fields
with prescribed embeddings::
sage: C = CyclotomicField(8)
sage: F,R = C.construction()
sage: F
AlgebraicExtensionFunctor
sage: R
Rational Field
sage: F(R)
Cyclotomic Field of order 8 and degree 4
sage: F(ZZ)
Maximal Order in Cyclotomic Field of order 8 and degree 4
"""
Functor.__init__(self, Rings(), Rings())
if not (isinstance(polys,(list,tuple)) and isinstance(names,(list,tuple)) and isinstance(embeddings,(list,tuple))):
raise ValueError("Arguments must be lists or tuples")
if not (len(names)==len(polys)==len(embeddings)):
raise ValueError("The three arguments must be of the same length")
self.polys = list(polys)
self.names = list(names)
self.embeddings = list(embeddings)
self.cyclotomic = int(cyclotomic) if cyclotomic is not None else None
self.kwds = kwds
def _apply_functor(self, R):
"""
Apply the functor to an object of ``self``'s domain.
TESTS::
sage: K.<a>=NumberField(x^3+x^2+1)
sage: F = K.construction()[0]
sage: F(ZZ) # indirect doctest
Order in Number Field in a with defining polynomial x^3 + x^2 + 1
sage: F(ZZ['t']) # indirect doctest
Univariate Quotient Polynomial Ring in a over Univariate Polynomial Ring in t over Integer Ring with modulus a^3 + a^2 + 1
sage: F(RR) # indirect doctest
Univariate Quotient Polynomial Ring in a over Real Field with 53 bits of precision with modulus a^3 + a^2 + 1.00000000000000
Check that :trac:`13538` is fixed::
sage: K = Qp(3,3)
sage: R.<a> = K[]
sage: AEF = sage.categories.pushout.AlgebraicExtensionFunctor([a^2-3], ['a'], [None])
sage: AEF(K)
Eisenstein Extension of 3-adic Field with capped relative precision 3 in a defined by (1 + O(3^3))*a^2 + (O(3^4))*a + (2*3 + 2*3^2 + 2*3^3 + O(3^4))
"""
from sage.all import QQ, ZZ, CyclotomicField
if self.cyclotomic:
if R==QQ:
return CyclotomicField(self.cyclotomic)
if R==ZZ:
return CyclotomicField(self.cyclotomic).maximal_order()
if len(self.polys) == 1:
return R.extension(self.polys[0], names=self.names[0], embedding=self.embeddings[0], **self.kwds)
return R.extension(self.polys, names=self.names, embedding=self.embeddings)
def __cmp__(self, other):
"""
TEST::
sage: K.<a>=NumberField(x^3+x^2+1)
sage: F = K.construction()[0]
sage: F == loads(dumps(F))
True
"""
c = cmp(type(self), type(other))
if c == 0:
c = cmp(self.polys, other.polys)
if c == 0:
c = cmp(self.embeddings, other.embeddings)
return c
def merge(self,other):
"""
Merging with another :class:`AlgebraicExtensionFunctor`.
INPUT:
``other`` -- Construction Functor.
OUTPUT:
- If ``self==other``, ``self`` is returned.
- If ``self`` and ``other`` are simple extensions
and both provide an embedding, then it is tested
whether one of the number fields provided by
the functors coerces into the other; the functor
associated with the target of the coercion is
returned. Otherwise, the construction functor
associated with the pushout of the codomains
of the two embeddings is returned, provided that
it is a number field.
- If these two extensions are defined by Conway polynomials
over finite fields, merges them into a single extension of
degree the lcm of the two degrees.
- Otherwise, None is returned.
REMARK:
Algebraic extension with embeddings currently only
works when applied to the rational field. This is
why we use the admittedly strange rule above for
merging.
EXAMPLES:
The following demonstrate coercions for finite fields using Conway or
pseudo-Conway polynomials::
sage: k = GF(3^2, conway=True, prefix='z'); a = k.gen()
sage: l = GF(3^3, conway=True, prefix='z'); b = l.gen()
sage: a + b # indirect doctest
z6^5 + 2*z6^4 + 2*z6^3 + z6^2 + 2*z6 + 1
Note that embeddings are compatible in lattices of such finite fields::
sage: m = GF(3^5, conway=True, prefix='z'); c = m.gen()
sage: (a+b)+c == a+(b+c) # indirect doctest
True
sage: from sage.categories.pushout import pushout
sage: n = pushout(k, l)
sage: o = pushout(l, m)
sage: q = pushout(n, o)
sage: q(o(b)) == q(n(b)) # indirect doctest
True
Coercion is also available for number fields::
sage: P.<x> = QQ[]
sage: L.<b> = NumberField(x^8-x^4+1, embedding=CDF.0)
sage: M1.<c1> = NumberField(x^2+x+1, embedding=b^4-1)
sage: M2.<c2> = NumberField(x^2+1, embedding=-b^6)
sage: M1.coerce_map_from(M2)
sage: M2.coerce_map_from(M1)
sage: c1+c2; parent(c1+c2) #indirect doctest
-b^6 + b^4 - 1
Number Field in b with defining polynomial x^8 - x^4 + 1
sage: pushout(M1['x'],M2['x'])
Univariate Polynomial Ring in x over Number Field in b with defining polynomial x^8 - x^4 + 1
In the previous example, the number field ``L`` becomes the pushout
of ``M1`` and ``M2`` since both are provided with an embedding into
``L``, *and* since ``L`` is a number field. If two number fields
are embedded into a field that is not a numberfield, no merging
occurs::
sage: K.<a> = NumberField(x^3-2, embedding=CDF(1/2*I*2^(1/3)*sqrt(3) - 1/2*2^(1/3)))
sage: L.<b> = NumberField(x^6-2, embedding=1.1)
sage: L.coerce_map_from(K)
sage: K.coerce_map_from(L)
sage: pushout(K,L)
Traceback (most recent call last):
...
CoercionException: ('Ambiguous Base Extension', Number Field in a with defining polynomial x^3 - 2, Number Field in b with defining polynomial x^6 - 2)
"""
if isinstance(other, AlgebraicClosureFunctor):
return other
elif not isinstance(other, AlgebraicExtensionFunctor):
return None
if self == other:
return self
# This method is supposed to be used in pushout(),
# *after* expanding the functors. Hence, we can
# assume that both functors have a single variable.
# But for being on the safe side...:
if len(self.names)!=1 or len(other.names)!=1:
return None
## We don't accept a forgetful coercion, since, together
## with bidirectional coercions between two embedded
## number fields, it would yield to contradictions in
## the coercion system.
# if self.polys==other.polys and self.names==other.names:
# # We have a forgetful functor:
# if self.embeddings==[None]:
# return self
# if other.embeddings==[None]:
# return other
# ... or we may use the given embeddings:
if self.embeddings!=[None] and other.embeddings!=[None]:
from sage.all import QQ
KS = self(QQ)
KO = other(QQ)
if KS.has_coerce_map_from(KO):
return self
if KO.has_coerce_map_from(KS):
return other
# nothing else helps, hence, we move to the pushout of the codomains of the embeddings
try:
P = pushout(self.embeddings[0].parent(), other.embeddings[0].parent())
from sage.rings.number_field.number_field import is_NumberField
if is_NumberField(P):
return P.construction()[0]
except CoercionException:
return None
# Finite fields and unramified local extensions may use
# integers to encode degrees of extensions.
from sage.rings.integer import Integer
if (isinstance(self.polys[0], Integer) and isinstance(other.polys[0], Integer)
and self.embeddings == [None] and other.embeddings == [None] and self.kwds == other.kwds):
return AlgebraicExtensionFunctor([self.polys[0].lcm(other.polys[0])], [None], [None], **self.kwds)
def __mul__(self, other):
"""
Compose construction functors to a composit construction functor, unless one of them is the identity.
NOTE:
The product is in functorial notation, i.e., when applying the product to an object
then the second factor is applied first.
TESTS::
sage: P.<x> = QQ[]
sage: K.<a> = NumberField(x^3-5,embedding=0)
sage: L.<b> = K.extension(x^2+a)
sage: F,R = L.construction()
sage: prod(F.expand())(R) == L #indirect doctest
True
"""
if isinstance(other,IdentityConstructionFunctor):
return self
if isinstance(other, AlgebraicExtensionFunctor):
if set(self.names).intersection(other.names):
raise CoercionException("Overlapping names (%s,%s)" % (self.names, other.names))
return AlgebraicExtensionFunctor(self.polys + other.polys, self.names + other.names,
self.embeddings + other.embeddings, **self.kwds)
elif isinstance(other, CompositeConstructionFunctor) \
and isinstance(other.all[-1], AlgebraicExtensionFunctor):
return CompositeConstructionFunctor(other.all[:-1], self * other.all[-1])
else:
return CompositeConstructionFunctor(other, self)
def expand(self):
"""
Decompose the functor `F` into sub-functors, whose product returns `F`.
EXAMPLES::
sage: P.<x> = QQ[]
sage: K.<a> = NumberField(x^3-5,embedding=0)
sage: L.<b> = K.extension(x^2+a)
sage: F,R = L.construction()
sage: prod(F.expand())(R) == L
True
sage: K = NumberField([x^2-2, x^2-3],'a')
sage: F, R = K.construction()
sage: F
AlgebraicExtensionFunctor
sage: L = F.expand(); L
[AlgebraicExtensionFunctor, AlgebraicExtensionFunctor]
sage: L[-1](QQ)
Number Field in a1 with defining polynomial x^2 - 3
"""
if len(self.polys)==1:
return [self]
return [AlgebraicExtensionFunctor([self.polys[i]], [self.names[i]], [self.embeddings[i]], **self.kwds)
for i in xrange(len(self.polys))]
class AlgebraicClosureFunctor(ConstructionFunctor):
"""
Algebraic Closure.
EXAMPLE::
sage: F = CDF.construction()[0]
sage: F(QQ)
Algebraic Field
sage: F(RR)
Complex Field with 53 bits of precision
sage: F(F(QQ)) is F(QQ)
True
"""
rank = 3
def __init__(self):
"""
TEST::
sage: from sage.categories.pushout import AlgebraicClosureFunctor
sage: F = AlgebraicClosureFunctor()
sage: F(QQ)
Algebraic Field
sage: F(RR)
Complex Field with 53 bits of precision
sage: F == loads(dumps(F))
True
"""
Functor.__init__(self, Rings(), Rings())
def _apply_functor(self, R):
"""
Apply the functor to an object of ``self``'s domain.
TEST::
sage: F = CDF.construction()[0]
sage: F(QQ) # indirect doctest
Algebraic Field
"""
try:
c = R.construction()
if c is not None and c[0]==self:
return R
except AttributeError:
pass
return R.algebraic_closure()
def merge(self, other):
"""
Mathematically, Algebraic Closure subsumes Algebraic Extension.
However, it seems that people do want to work with algebraic
extensions of ``RR``. Therefore, we do not merge with algebraic extension.
TEST::
sage: K.<a>=NumberField(x^3+x^2+1)
sage: CDF.construction()[0].merge(K.construction()[0]) is None
True
sage: CDF.construction()[0].merge(CDF.construction()[0])
AlgebraicClosureFunctor
"""
if self==other:
return self
return None
# Mathematically, Algebraic Closure subsumes Algebraic Extension.
# However, it seems that people do want to work with
# algebraic extensions of RR (namely RR/poly*RR). So, we don't do:
# if isinstance(other,AlgebraicExtensionFunctor):
# return self
class PermutationGroupFunctor(ConstructionFunctor):
rank = 10
def __init__(self, gens, domain):
"""
EXAMPLES::
sage: from sage.categories.pushout import PermutationGroupFunctor
sage: PF = PermutationGroupFunctor([PermutationGroupElement([(1,2)])], [1,2]); PF
PermutationGroupFunctor[(1,2)]
"""
Functor.__init__(self, Groups(), Groups())
self._gens = gens
self._domain = domain
def _repr_(self):
"""
EXAMPLES::
sage: P1 = PermutationGroup([[(1,2)]])
sage: PF, P = P1.construction()
sage: PF
PermutationGroupFunctor[(1,2)]
"""
return "PermutationGroupFunctor%s"%self.gens()
def __call__(self, R):
"""
EXAMPLES::
sage: P1 = PermutationGroup([[(1,2)]])
sage: PF, P = P1.construction()
sage: PF(P)
Permutation Group with generators [(1,2)]
"""
from sage.groups.perm_gps.permgroup import PermutationGroup
return PermutationGroup([g for g in (R.gens() + self.gens()) if not g.is_one()],
domain=self._domain)
def gens(self):
"""
EXAMPLES::
sage: P1 = PermutationGroup([[(1,2)]])
sage: PF, P = P1.construction()
sage: PF.gens()
[(1,2)]
"""
return self._gens
def merge(self, other):
"""
Merge ``self`` with another construction functor, or return None.
EXAMPLES::
sage: P1 = PermutationGroup([[(1,2)]])
sage: PF1, P = P1.construction()
sage: P2 = PermutationGroup([[(1,3)]])
sage: PF2, P = P2.construction()
sage: PF1.merge(PF2)
PermutationGroupFunctor[(1,2), (1,3)]
"""
if self.__class__ != other.__class__:
return None
from sage.sets.all import FiniteEnumeratedSet
new_domain = set(self._domain).union(set(other._domain))
new_domain = FiniteEnumeratedSet(sorted(new_domain))
return PermutationGroupFunctor(self.gens() + other.gens(),
new_domain)
class BlackBoxConstructionFunctor(ConstructionFunctor):
"""
Construction functor obtained from any callable object.
EXAMPLES::
sage: from sage.categories.pushout import BlackBoxConstructionFunctor
sage: FG = BlackBoxConstructionFunctor(gap)
sage: FS = BlackBoxConstructionFunctor(singular)
sage: FG
BlackBoxConstructionFunctor
sage: FG(ZZ)
Integers
sage: FG(ZZ).parent()
Gap
sage: FS(QQ['t'])
// characteristic : 0
// number of vars : 1
// block 1 : ordering lp
// : names t
// block 2 : ordering C
sage: FG == FS
False
sage: FG == loads(dumps(FG))
True
"""
rank = 100
def __init__(self, box):
"""
TESTS::
sage: from sage.categories.pushout import BlackBoxConstructionFunctor
sage: FG = BlackBoxConstructionFunctor(gap)
sage: FM = BlackBoxConstructionFunctor(maxima)
sage: FM == FG
False
sage: FM == loads(dumps(FM))
True
"""
ConstructionFunctor.__init__(self,Objects(),Objects())
if not callable(box):
raise TypeError("input must be callable")
self.box = box
def _apply_functor(self, R):
"""
Apply the functor to an object of ``self``'s domain.
TESTS::
sage: from sage.categories.pushout import BlackBoxConstructionFunctor
sage: f = lambda x: x^2
sage: F = BlackBoxConstructionFunctor(f)
sage: F(ZZ) # indirect doctest
Ambient free module of rank 2 over the principal ideal domain Integer Ring
"""
return self.box(R)
def __cmp__(self, other):
"""
TESTS::
sage: from sage.categories.pushout import BlackBoxConstructionFunctor
sage: FG = BlackBoxConstructionFunctor(gap)
sage: FM = BlackBoxConstructionFunctor(maxima)
sage: FM == FG # indirect doctest
False
sage: FM == loads(dumps(FM))
True
"""
c = cmp(type(self), type(other))
if c == 0:
c = cmp(self.box, other.box)
#return self.box == other.box
return c
def pushout(R, S):
r"""
Given a pair of objects `R` and `S`, try to construct a
reasonable object `Y` and return maps such that
canonically `R \leftarrow Y \rightarrow S`.
ALGORITHM:
This incorporates the idea of functors discussed at Sage Days 4.
Every object `R` can be viewed as an initial object and a series
of functors (e.g. polynomial, quotient, extension, completion,
vector/matrix, etc.). Call the series of increasingly simple
objects (with the associated functors) the "tower" of `R`. The
construction method is used to create the tower.
Given two objects `R` and `S`, try to find a common initial object
`Z`. If the towers of `R` and `S` meet, let `Z` be their join.
Otherwise, see if the top of one coerces naturally into the other.
Now we have an initial object and two ordered lists of functors to
apply. We wish to merge these in an unambiguous order, popping
elements off the top of one or the other tower as we apply them to
`Z`.
- If the functors are of distinct types, there is an absolute
ordering given by the rank attribute. Use this.
- Otherwise:
- If the tops are equal, we (try to) merge them.
- If exactly one occurs lower in the other tower, we may
unambiguously apply the other (hoping for a later merge).
- If the tops commute, we can apply either first.
- Otherwise fail due to ambiguity.
The algorithm assumes by default that when a construction `F` is
applied to an object `X`, the object `F(X)` admits a coercion map
from `X`. However, the algorithm can also handle the case where
`F(X)` has a coercion map *to* `X` instead. In this case, the
attribute ``coercion_reversed`` of the class implementing `F`
should be set to ``True``.
EXAMPLES:
Here our "towers" are `R = Complete_7(Frac(\ZZ))` and `Frac(Poly_x(\ZZ))`,
which give us `Frac(Poly_x(Complete_7(Frac(\ZZ))))`::
sage: from sage.categories.pushout import pushout
sage: pushout(Qp(7), Frac(ZZ['x']))
Fraction Field of Univariate Polynomial Ring in x over 7-adic Field with capped relative precision 20
Note we get the same thing with
::
sage: pushout(Zp(7), Frac(QQ['x']))
Fraction Field of Univariate Polynomial Ring in x over 7-adic Field with capped relative precision 20
sage: pushout(Zp(7)['x'], Frac(QQ['x']))
Fraction Field of Univariate Polynomial Ring in x over 7-adic Field with capped relative precision 20
Note that polynomial variable ordering must be unambiguously determined.
::
sage: pushout(ZZ['x,y,z'], QQ['w,z,t'])
Traceback (most recent call last):
...
CoercionException: ('Ambiguous Base Extension', Multivariate Polynomial Ring in x, y, z over Integer Ring, Multivariate Polynomial Ring in w, z, t over Rational Field)
sage: pushout(ZZ['x,y,z'], QQ['w,x,z,t'])
Multivariate Polynomial Ring in w, x, y, z, t over Rational Field
Some other examples::
sage: pushout(Zp(7)['y'], Frac(QQ['t'])['x,y,z'])
Multivariate Polynomial Ring in x, y, z over Fraction Field of Univariate Polynomial Ring in t over 7-adic Field with capped relative precision 20
sage: pushout(ZZ['x,y,z'], Frac(ZZ['x'])['y'])
Multivariate Polynomial Ring in y, z over Fraction Field of Univariate Polynomial Ring in x over Integer Ring
sage: pushout(MatrixSpace(RDF, 2, 2), Frac(ZZ['x']))
Full MatrixSpace of 2 by 2 dense matrices over Fraction Field of Univariate Polynomial Ring in x over Real Double Field
sage: pushout(ZZ, MatrixSpace(ZZ[['x']], 3, 3))
Full MatrixSpace of 3 by 3 dense matrices over Power Series Ring in x over Integer Ring
sage: pushout(QQ['x,y'], ZZ[['x']])
Univariate Polynomial Ring in y over Power Series Ring in x over Rational Field
sage: pushout(Frac(ZZ['x']), QQ[['x']])
Laurent Series Ring in x over Rational Field
A construction with ``coercion_reversed = True`` (currently only
the :class:`SubspaceFunctor` construction) is only applied if it
leads to a valid coercion::
sage: A = ZZ^2
sage: V = span([[1, 2]], QQ)
sage: P = sage.categories.pushout.pushout(A, V)
sage: P
Vector space of dimension 2 over Rational Field
sage: P.has_coerce_map_from(A)
True
sage: V = (QQ^3).span([[1, 2, 3/4]])
sage: A = ZZ^3
sage: pushout(A, V)
Vector space of dimension 3 over Rational Field
sage: B = A.span([[0, 0, 2/3]])
sage: pushout(B, V)
Vector space of degree 3 and dimension 2 over Rational Field
User basis matrix:
[1 2 0]
[0 0 1]
Some more tests with ``coercion_reversed = True``::
sage: from sage.categories.pushout import ConstructionFunctor
sage: class EvenPolynomialRing(type(QQ['x'])):
....: def __init__(self, base, var):
....: super(EvenPolynomialRing, self).__init__(base, var)
....: self.register_embedding(base[var])
....: def __repr__(self):
....: return "Even Power " + super(EvenPolynomialRing, self).__repr__()
....: def construction(self):
....: return EvenPolynomialFunctor(), self.base()[self.variable_name()]
....: def _coerce_map_from_(self, R):
....: return self.base().has_coerce_map_from(R)
....:
sage: class EvenPolynomialFunctor(ConstructionFunctor):
....: rank = 10
....: coercion_reversed = True
....: def __init__(self):
....: ConstructionFunctor.__init__(self, Rings(), Rings())
....: def __call__(self, R):
....: return EvenPolynomialRing(R.base(), R.variable_name())
....:
sage: pushout(EvenPolynomialRing(QQ, 'x'), ZZ)
Even Power Univariate Polynomial Ring in x over Rational Field
sage: pushout(EvenPolynomialRing(QQ, 'x'), QQ)
Even Power Univariate Polynomial Ring in x over Rational Field
sage: pushout(EvenPolynomialRing(QQ, 'x'), RR)
Even Power Univariate Polynomial Ring in x over Real Field with 53 bits of precision
sage: pushout(EvenPolynomialRing(QQ, 'x'), ZZ['x'])
Univariate Polynomial Ring in x over Rational Field
sage: pushout(EvenPolynomialRing(QQ, 'x'), QQ['x'])
Univariate Polynomial Ring in x over Rational Field
sage: pushout(EvenPolynomialRing(QQ, 'x'), RR['x'])
Univariate Polynomial Ring in x over Real Field with 53 bits of precision
sage: pushout(EvenPolynomialRing(QQ, 'x'), EvenPolynomialRing(QQ, 'x'))
Even Power Univariate Polynomial Ring in x over Rational Field
sage: pushout(EvenPolynomialRing(QQ, 'x'), EvenPolynomialRing(RR, 'x'))
Even Power Univariate Polynomial Ring in x over Real Field with 53 bits of precision
sage: pushout(EvenPolynomialRing(QQ, 'x')^2, RR^2)
Ambient free module of rank 2 over the principal ideal domain Even Power Univariate Polynomial Ring in x over Real Field with 53 bits of precision
sage: pushout(EvenPolynomialRing(QQ, 'x')^2, RR['x']^2)
Ambient free module of rank 2 over the principal ideal domain Univariate Polynomial Ring in x over Real Field with 53 bits of precision
AUTHORS:
-- Robert Bradshaw
"""
if R is S or R == S:
return R
if isinstance(R, type):
R = type_to_parent(R)
if isinstance(S, type):
S = type_to_parent(S)
R_tower = construction_tower(R)
S_tower = construction_tower(S)
Rs = [c[1] for c in R_tower]
Ss = [c[1] for c in S_tower]
if R in Ss:
if not any(c[0].coercion_reversed for c in S_tower[1:]):
return S
elif S in Rs:
if not any(c[0].coercion_reversed for c in R_tower[1:]):
return R
if Rs[-1] in Ss:
Rs, Ss = Ss, Rs
R_tower, S_tower = S_tower, R_tower
# look for join
if Ss[-1] in Rs:
if Rs[-1] == Ss[-1]:
while Rs and Ss and Rs[-1] == Ss[-1]:
Rs.pop()
Z = Ss.pop()
else:
Rs = Rs[:Rs.index(Ss[-1])]
Z = Ss.pop()
# look for topmost coercion
elif S.has_coerce_map_from(Rs[-1]):
while not Ss[-1].has_coerce_map_from(Rs[-1]):
Ss.pop()
while len(Rs) > 0 and Ss[-1].has_coerce_map_from(Rs[-1]):
Rs.pop()
Z = Ss.pop()
elif R.has_coerce_map_from(Ss[-1]):
while not Rs[-1].has_coerce_map_from(Ss[-1]):
Rs.pop()
while len(Ss) > 0 and Rs[-1].has_coerce_map_from(Ss[-1]):
Ss.pop()
Z = Rs.pop()
else:
raise CoercionException("No common base")
# Rc is a list of functors from Z to R and Sc is a list of functors from Z to S
R_tower = expand_tower(R_tower[:len(Rs)+1])
S_tower = expand_tower(S_tower[:len(Ss)+1])
Rc = [c[0] for c in R_tower[1:]]
Sc = [c[0] for c in S_tower[1:]]
all = IdentityConstructionFunctor()
def apply_from(Xc):
c = Xc.pop()
if c.coercion_reversed:
Yc = Sc if Xc is Rc else Rc
Y_tower = S_tower if Xc is Rc else R_tower
Y_partial = Y_tower[len(Yc)][1]
if not (c * all)(Z).has_coerce_map_from(Y_partial):
return all
return c * all
try:
while len(Rc) > 0 or len(Sc) > 0:
# print Z
# if we are out of functors in either tower, there is no ambiguity
if len(Sc) == 0:
all = apply_from(Rc)
elif len(Rc) == 0:
all = apply_from(Sc)
# if one of the functors has lower rank, do it first
elif Rc[-1].rank < Sc[-1].rank:
all = apply_from(Rc)
elif Sc[-1].rank < Rc[-1].rank:
all = apply_from(Sc)
else:
# the ranks are the same, so things are a bit subtler
if Rc[-1] == Sc[-1]:
# If they are indeed the same operation, we only do it once.
# The \code{merge} function here takes into account non-mathematical
# distinctions (e.g. single vs. multivariate polynomials).
cR = Rc.pop()
cS = Sc.pop()
c = cR.merge(cS) or cS.merge(cR)
if c:
all = c * all
else:
raise CoercionException("Incompatible Base Extension %r, %r (on %r, %r)" % (R, S, cR, cS))
else:
# Now we look ahead to see if either top functor is
# applied later on in the other tower.
# If this is the case for exactly one of them, we unambiguously
# postpone that operation, but if both then we abort.
if Rc[-1] in Sc:
if Sc[-1] in Rc:
raise CoercionException("Ambiguous Base Extension", R, S)
else:
all = apply_from(Sc)
elif Sc[-1] in Rc:
all = apply_from(Rc)
# If, perchance, the two functors commute, then we may do them in any order.
elif Rc[-1].commutes(Sc[-1]) or Sc[-1].commutes(Rc[-1]):
all = Sc.pop() * Rc.pop() * all
else:
# try and merge (default merge is failure for unequal functors)
cR = Rc.pop()
cS = Sc.pop()
c = cR.merge(cS) or cS.merge(cR)
if c is not None:
all = c * all
else:
# Otherwise, we cannot proceed.
raise CoercionException("Ambiguous Base Extension", R, S)
return all(Z)
except CoercionException:
raise
except (TypeError, ValueError, AttributeError, NotImplementedError) as ex:
# We do this because we may be trying all kinds of things that don't
# make sense, and in this case simply want to return that a pushout
# couldn't be found.
raise CoercionException(ex)
def pushout_lattice(R, S):
r"""
Given a pair of objects `R` and `S`, try to construct a
reasonable object `Y` and return maps such that
canonically `R \leftarrow Y \rightarrow S`.
ALGORITHM:
This is based on the model that arose from much discussion at
Sage Days 4. Going up the tower of constructions of `R` and `S`
(e.g. the reals come from the rationals come from the integers),
try to find a common parent, and then try to fill in a lattice
with these two towers as sides with the top as the common ancestor
and the bottom will be the desired ring.
See the code for a specific worked-out example.
EXAMPLES::
sage: from sage.categories.pushout import pushout_lattice
sage: A, B = pushout_lattice(Qp(7), Frac(ZZ['x']))
sage: A.codomain()
Fraction Field of Univariate Polynomial Ring in x over 7-adic Field with capped relative precision 20
sage: A.codomain() is B.codomain()
True
sage: A, B = pushout_lattice(ZZ, MatrixSpace(ZZ[['x']], 3, 3))
sage: B
Identity endomorphism of Full MatrixSpace of 3 by 3 dense matrices over Power Series Ring in x over Integer Ring
AUTHOR:
- Robert Bradshaw
"""
R_tower = construction_tower(R)
S_tower = construction_tower(S)
Rs = [c[1] for c in R_tower]
Ss = [c[1] for c in S_tower]
# look for common ancestor
start = None
for Z in Rs:
if Z in Ss:
start = Z
if start is None:
# Should I test for a map between the tops of the towers?
# Or, if they're both not ZZ, is it hopeless?
return None
# truncate at common ancestor
R_tower = list(reversed(R_tower[:Rs.index(start)+1]))
S_tower = list(reversed(S_tower[:Ss.index(start)+1]))
Rs = [c[1] for c in R_tower] # the list of objects
Ss = [c[1] for c in S_tower]
Rc = [c[0] for c in R_tower] # the list of functors
Sc = [c[0] for c in S_tower]
# Here we try and construct a 2-dimensional lattice as follows.
# Suppose our towers are Z -> Q -> Qp = R and Z -> Z[t] -> Frac(Z[t]) = S
lattice = {}
# First we fill in the sides
#
# Z
# / \
# Q Z[t]
# / \
# Qp Frac(Z[t])
#
for i in range(len(Rs)):
lattice[i,0] = Rs[i]
for j in range(len(Ss)):
lattice[0,j] = Ss[j]
# Now we attempt to fill in the center, one (diagonal) row at a time,
# one commuting square at a time.
#
# Z
# / \
# Q Z[t]
# / \ / \
# Qp Q[t] Frac(Z[t])
# \ /
# Qp[t]
#
# There is always exactly one "correct" path/order in which to apply operations
# from the top to the bottom. In our example, this is down the far left side.
# We keep track of which that is by clearing out Rc and Sc as we go along.
#
# Note that when applying the functors in the correct order, base extension
# is not needed (though it may occur in the resulting morphisms).
#
for i in range(len(Rc)-1):
for j in range(len(Sc)-1):
try:
if lattice[i,j+1] == lattice[i+1,j]:
# In this case we have R <- S -> R
# We don't want to perform the operation twice
# and all subsequent squares will come from objects
# where the operation was already performed (either
# to the left or right)
Rc[i] = Sc[j] = None # IdentityConstructionFunctor()
lattice[i+1,j+1] = lattice[i,j+1]
elif Rc[i] is None and Sc[j] is None:
lattice[i+1,j+1] = lattice[i,j+1]
elif Rc[i] is None:
lattice[i+1,j+1] = Sc[j](lattice[i+1,j])
elif Sc[j] is None:
lattice[i+1,j+1] = Rc[i](lattice[i,j+1])
else:
# For now, we just look at the rank.
# TODO: be more sophisticated and query the functors themselves
if Rc[i].rank < Sc[j].rank:
lattice[i+1,j+1] = Sc[j](lattice[i+1,j])
Rc[i] = None # force us to use pre-applied Rc[i]
else:
lattice[i+1,j+1] = Rc[i](lattice[i,j+1])
Sc[j] = None # force us to use pre-applied Sc[i]
except (AttributeError, NameError):
# print i, j
# pp(lattice)
for i in range(100):
for j in range(100):
try:
R = lattice[i,j]
print i, j, R
except KeyError:
break
raise CoercionException("%s does not support %s" % (lattice[i,j], 'F'))
# If we are successful, we should have something that looks like this.
#
# Z
# / \
# Q Z[t]
# / \ / \
# Qp Q[t] Frac(Z[t])
# \ / \ /
# Qp[t] Frac(Q[t])
# \ /
# Frac(Qp[t])
#
R_loc = len(Rs)-1
S_loc = len(Ss)-1
# Find the composition coercion morphisms along the bottom left...
if S_loc > 0:
R_map = lattice[R_loc,1].coerce_map_from(R)
for i in range(1, S_loc):
map = lattice[R_loc, i+1].coerce_map_from(lattice[R_loc, i]) # The functor used is implicit here, should it be?
R_map = map * R_map
else:
R_map = R.coerce_map_from(R) # id
# ... and bottom right
if R_loc > 0:
S_map = lattice[1, S_loc].coerce_map_from(S)
for i in range(1, R_loc):
map = lattice[i+1, S_loc].coerce_map_from(lattice[i, S_loc])
S_map = map * S_map
else:
S_map = S.coerce_map_from(S) # id
return R_map, S_map
## def pp(lattice):
## """
## Used in debugging to print the current lattice.
## """
## for i in range(100):
## for j in range(100):
## try:
## R = lattice[i,j]
## print i, j, R
## except KeyError:
## break
def construction_tower(R):
"""
An auxiliary function that is used in :func:`pushout` and :func:`pushout_lattice`.
INPUT:
An object
OUTPUT:
A constructive description of the object from scratch, by a list of pairs
of a construction functor and an object to which the construction functor
is to be applied. The first pair is formed by ``None`` and the given object.
EXAMPLE::
sage: from sage.categories.pushout import construction_tower
sage: construction_tower(MatrixSpace(FractionField(QQ['t']),2))
[(None, Full MatrixSpace of 2 by 2 dense matrices over Fraction Field of Univariate Polynomial Ring in t over Rational Field), (MatrixFunctor, Fraction Field of Univariate Polynomial Ring in t over Rational Field), (FractionField, Univariate Polynomial Ring in t over Rational Field), (Poly[t], Rational Field), (FractionField, Integer Ring)]
"""
tower = [(None, R)]
c = R.construction()
while c is not None:
f, R = c
if not isinstance(f, ConstructionFunctor):
f = BlackBoxConstructionFunctor(f)
tower.append((f,R))
c = R.construction()
return tower
def expand_tower(tower):
"""
An auxiliary function that is used in :func:`pushout`.
INPUT:
A construction tower as returned by :func:`construction_tower`.
OUTPUT:
A new construction tower with all the construction functors expanded.
EXAMPLE::
sage: from sage.categories.pushout import construction_tower, expand_tower
sage: construction_tower(QQ['x,y,z'])
[(None, Multivariate Polynomial Ring in x, y, z over Rational Field),
(MPoly[x,y,z], Rational Field),
(FractionField, Integer Ring)]
sage: expand_tower(construction_tower(QQ['x,y,z']))
[(None, Multivariate Polynomial Ring in x, y, z over Rational Field),
(MPoly[z], Univariate Polynomial Ring in y over Univariate Polynomial Ring in x over Rational Field),
(MPoly[y], Univariate Polynomial Ring in x over Rational Field),
(MPoly[x], Rational Field),
(FractionField, Integer Ring)]
"""
new_tower = []
for f, R in reversed(tower):
if f is None:
new_tower.append((f, R))
else:
fs = f.expand()
for ff in reversed(fs[1:]):
new_tower.append((ff, R))
R = ff(R)
new_tower.append((fs[0], R))
return list(reversed(new_tower))
def type_to_parent(P):
"""
An auxiliary function that is used in :func:`pushout`.
INPUT:
A type
OUTPUT:
A Sage parent structure corresponding to the given type
TEST::
sage: from sage.categories.pushout import type_to_parent
sage: type_to_parent(int)
Integer Ring
sage: type_to_parent(float)
Real Double Field
sage: type_to_parent(complex)
Complex Double Field
sage: type_to_parent(list)
Traceback (most recent call last):
...
TypeError: Not a scalar type.
"""
import sage.rings.all
if P in [int, long]:
return sage.rings.all.ZZ
elif P is float:
return sage.rings.all.RDF
elif P is complex:
return sage.rings.all.CDF
else:
raise TypeError("Not a scalar type.")
| 37.205521
| 470
| 0.561259
|
078e262b66c9254b895a611d85a57b9ee60b4632
| 1,614
|
py
|
Python
|
waf-api/python/LetsEncrypt/manual_dns_create_challenges.py
|
scott-treacy/waf-automation
|
5445c3cd62411e367463de95d5456f241f2042b8
|
[
"MIT"
] | 16
|
2017-11-16T22:07:57.000Z
|
2021-12-23T09:47:01.000Z
|
waf-api/python/LetsEncrypt/manual_dns_create_challenges.py
|
scott-treacy/waf-automation
|
5445c3cd62411e367463de95d5456f241f2042b8
|
[
"MIT"
] | 5
|
2018-02-02T17:58:39.000Z
|
2021-05-24T23:45:11.000Z
|
waf-api/python/LetsEncrypt/manual_dns_create_challenges.py
|
scott-treacy/waf-automation
|
5445c3cd62411e367463de95d5456f241f2042b8
|
[
"MIT"
] | 16
|
2018-01-22T22:02:48.000Z
|
2021-08-12T11:57:55.000Z
|
#!/usr/bin/env python
import csv
import sys
import argparse
from utils.acme_client import *
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-k", "--account-key", required=True, help="Path to your Let's Encrypt account private key")
parser.add_argument("-D", "--domain-file", required=True, help="File to read domain(s) to create challenges for from")
parser.add_argument("--quiet", action="store_const", const=logging.ERROR, help="Suppress output except for errors")
parser.add_argument("--staging", action="store_true", help="Use staging instance of Let's Encrypt")
args = parser.parse_args(argv)
logging.getLogger().setLevel(args.quiet or logging.getLogger().level)
client = ACMEClient(args.account_key, None, logging, STAGING_CA if args.staging else DEFAULT_CA)
domains = []
with open(args.domain_file, 'r') as f:
for domain in f:
domain = domain.strip()
if domain:
domains.append(domain)
print("{} domains to process.".format(len(domains)))
with open('dns-challenges.csv', 'w', newline='') as f:
fields = ('domain', 'txt_record', 'txt_value', 'challenge_token', 'challenge_uri')
writer = csv.DictWriter(f, fields)
writer.writeheader()
for domain in domains:
print("{}...".format(domain))
challenge_dict = client.verify_domain_dns_get_challenge(domain)
if challenge_dict:
writer.writerow(challenge_dict)
f.flush()
if __name__ == "__main__": # pragma: no cover
main(sys.argv[1:])
| 38.428571
| 122
| 0.654275
|
fcc9652e947e9b75e55cd14de364300fa397418c
| 15,113
|
py
|
Python
|
src/procedure/image.py
|
figai/figocr
|
c358cceff80502647379bcfc94247ab19e626946
|
[
"Apache-2.0"
] | 2
|
2020-04-19T09:34:19.000Z
|
2020-04-19T14:40:02.000Z
|
src/procedure/image.py
|
figai/figocr
|
c358cceff80502647379bcfc94247ab19e626946
|
[
"Apache-2.0"
] | null | null | null |
src/procedure/image.py
|
figai/figocr
|
c358cceff80502647379bcfc94247ab19e626946
|
[
"Apache-2.0"
] | 3
|
2020-04-19T06:45:08.000Z
|
2020-04-22T13:16:51.000Z
|
# -*- coding: utf8 -*-
import cv2
import numpy as np
from skimage.morphology import opening, closing, square
from skimage.filters import threshold_otsu
import logging
def image_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
resized = cv2.resize(image, dim, interpolation=inter)
return resized
def cv2_imshow(image):
import matplotlib.pyplot as plt
plt.imshow(image, interpolation='bicubic')
plt.show()
# cv2.imshow('debug', image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# cv2.waitKey(1)
def roi_detect(image, region, thresh_mean=None, trace=False):
# (x, y, w, h)
x, y, width, height = region
up_y_offset = int(height / 2)
up_y = y - up_y_offset
down_y_offset = height + int(height / 2)
down_y = y + down_y_offset
roi = image[up_y: down_y, x: x + width]
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
try:
thresh_value = int(1*threshold_otsu(gray))
except:
thresh_value = 255
if thresh_mean is not None:
thresh_value = min(thresh_value, thresh_mean)
thresh = cv2.threshold(gray, thresh_value, 255, cv2.THRESH_BINARY_INV)[1]
thresh = closing(thresh)
# 垂直计算留白
yitensity = np.sum(thresh, axis=1)
middle = height
step = 1
while yitensity[middle] == 0:
middle = height + step
if step < 0:
step = abs(step)+1
else:
step = - step
if abs(step) > 20:
# 行中间水平线上下20个单位内没有内容
return None
# 上边距4个空白单位
up_blank_line = 0
for i in reversed(range(middle)):
if yitensity[i] == 0:
up_y_offset = i
up_blank_line += 1
if up_blank_line > 3:
break
# 下边距4个空白单位
down_blank_line = 0
for i in range(middle, (down_y - up_y)):
if yitensity[i] == 0:
down_y_offset = i
down_blank_line += 1
if down_blank_line > 3:
break
y = up_y + up_y_offset
height = down_y_offset - up_y_offset
# 垂直裁剪
thresh = thresh[up_y_offset: down_y_offset, 0: width]
thresh = cv2.Canny(thresh, 100, 200, 3, L2gradient=True)
thresh = cv2.dilate(thresh, None)
# 水平计算留白
xitensity = np.sum(thresh, axis=0)
x_offset = 0
x_suffix = len(xitensity) - 1
while True:
if (x_offset >= x_suffix) or (xitensity[x_offset] and xitensity[x_suffix]):
break
if xitensity[x_offset] == 0:
x_offset += 1
if xitensity[x_suffix] == 0:
x_suffix -= 1
x_offset = x_offset - 5 if x_offset - 5 > 0 else 0
x_suffix = x_suffix + 5 if x_suffix + \
5 < len(xitensity) else (len(xitensity) - 1)
x = x + x_offset
width = x_suffix - x_offset
if height < 16 or width <= 10:
# 行内容高度只有8个单位(小数点的大小)
return None
# # 水平裁剪
# thresh = thresh[0 : height, x_offset : x_suffix]
# cv2.rectangle(image, (x+cnt_x, y+cnt_y), (x+cnt_x + cnt_w-2, y+cnt_y + cnt_h-2), (0, 0, 0), 1)
# cv2.rectangle(image, (x, y), (x + width-2, y + height-2), (0, 255, 0), 1)
return x, y, width, height
def max_width_poly(image, region, thresh_mean=None):
x, y, width, height = region
roi = image[y: y + height, x: x + width]
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
try:
thresh_value = int(1*threshold_otsu(gray))
except:
thresh_value = np.percentile(gray, 50)
if thresh_mean is not None:
thresh_value = min(thresh_value, thresh_mean)
thresh = cv2.threshold(gray, thresh_value, 255, cv2.THRESH_BINARY_INV)[1]
thresh = cv2.Canny(thresh, 100, 200, 3, L2gradient=True)
thresh = cv2.dilate(thresh, None)
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[0]
boxes = map(lambda cnt: cv2.boundingRect(cnt), cnts)
boxes = sorted(boxes, key=lambda x: x[2])
if boxes:
box = boxes.pop()
return x+box[0], y+box[1], box[2], box[3]
else:
return None
def threshold(image):
return int(1*threshold_otsu(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)))
def consine(pt1, pt2, pt0):
v1 = np.array(pt1) - np.array(pt0)
v2 = np.array(pt2) - np.array(pt0)
return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
def angle_degree(pt1, pt2, pt0):
radian = np.arccos(consine(pt1, pt2, pt0))
return np.degrees(radian)
def square_contours_kps(image, min_area=1800, min_density=None):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 0, 0)
# linesP = cv2.HoughLinesP(edges, 1, (np.pi / 180)*10, 50, None, 100, 10)
# if linesP is not None:
# for i in range(0, len(linesP)):
# l = linesP[i][0]
# cv2.line(gray, (l[0], l[1]), (l[2], l[3]), (0,0,255), 1, cv2.LINE_AA)
# cv2_imshow(gray)
edges = closing(edges, square(3))
# cv2_imshow(edges)
cnts, _ = cv2.findContours(
edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
squares = []
for cnt in cnts:
approx = cv2.approxPolyDP(cnt, cv2.arcLength(cnt, True)*0.02, True)
area = cv2.contourArea(approx)
# cv2.drawContours(image, [approx], -1, (0,0,255), 1)
if area < min_area:
continue
# cv2.drawContours(image, [approx], -1, (0,0,255), 1)
# print(f'area: {area}, approx: {approx}')
# for v in approx:
# cv2.circle(image, tuple(v[0]), 3, (0, 255, 255), -1)
vertex_num = len(approx)
if vertex_num >= 4: # 4个或多于4个顶点 and cv2.isContourConvex(approx):
for offset in range(0, vertex_num): # 取连续的4个顶点
square_approx = []
for idx in range(0, 4):
sq_index = (offset+idx) % vertex_num
square_approx.append(approx[sq_index])
square_approx = np.array(square_approx)
maxCosine = 0
for j in range(2, 5):
square_pts = np.squeeze(square_approx)
cosine = abs(
consine(square_pts[j % 4], square_pts[j-2], square_pts[j-1]))
# print(cosine)
maxCosine = max(maxCosine, cosine)
if maxCosine < 0.20: # up and down 12 degree
if vertex_num > 4:
area = cv2.contourArea(square_approx)
if area < min_area:
continue
mask = np.zeros(edges.shape, dtype="uint8")
cv2.drawContours(mask, [square_approx], -1, 255, -1)
mask = cv2.bitwise_and(edges, edges, mask=mask)
# cv2_imshow(mask)
mass = cv2.countNonZero(mask)
density = mass / area
logging.info(f'area:{area}, mass:{mass}')
if min_density is not None:
if density < min_density:
continue
squares.append((area, density, square_approx))
break
# cv2.drawContours(image, np.array([sq[1] for sq in squares]), -1, (0,0,255), 1)
# cv2_imshow(image)
if len(squares) < 4:
return None
# sort by area
squares.sort(key=lambda sq: -sq[0])
squares = squares[:4]
result = []
# (area, density, contour)
for _, _, sq in squares:
M = cv2.moments(sq)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
result.append([cx, cy])
return result
def block_contours_kps(image, min_area=1800):
# convert it to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# cv2_imshow(gray)
result = None
delta = 2
while result is None and delta < 5: # O(3)
# blur it slightly
ksize = 2 * delta + 1
blur = cv2.GaussianBlur(gray, (ksize, ksize), delta)
# cv2_imshow(blur)
# threshold the image
thresh_value = threshold_otsu(blur)
thresh_factor = 3
while result is None and thresh_factor > 0: # O(3*3)
thresh = cv2.threshold(
blur, thresh_value/thresh_factor, 255, cv2.THRESH_BINARY_INV)[1]
# perform a series of erosions + dilations to remove any small regions of noise
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=2)
# cv2_imshow(thresh)
# find contours in thresholded image
cnts, _ = cv2.findContours(
thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# image_copy = image.copy()
# cv2.drawContours(image_copy, cnts, -1, (0,0,255), 2)
# cv2_imshow(image_copy)
# 没有连通域形状
if len(cnts) < 4:
thresh_factor -= 1
continue
cnts = [(cnt, cv2.moments(cnt)) for cnt in cnts]
cnts = [cnt for cnt in cnts if cnt[1]['m00'] > min_area/delta]
# image_copy = image.copy()
# cv2.drawContours(image_copy, [cnt[0] for cnt in cnts], -1, (0,255,0), 1)
# cv2_imshow(image_copy)
# 找到形状面积超过设定值的形状
if len(cnts) < 4:
thresh_factor -= 1
continue
cnts = sorted(cnts, key=lambda cnt: -cnt[1]['m00'])
# 黑色面积最大的四个形状
cnts_target = cnts[:4]
result = []
for cnt, M in cnts_target:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
# M['m00'] is the mass, [cx, cy] is the centroid
result.append([cx, cy])
# image_copy = image.copy()
# for index, (cnt, M) in enumerate(cnts_target):
# print(M['m00'])
# cv2.drawContours(image_copy, [cnt], -1, (0,255,0), 1)
# cv2.circle(image_copy, tuple(result[index]), 5, (255, 255, 255), -1)
# cv2_imshow(image_copy)
delta += 1
logging.info(f'delta: {delta}, thresh factor: {thresh_factor}')
return result
def get_square_vertex(kps):
"""
pt: [x, y]
kps: [ pt1, pt2, pt3, pt4 ]
return: [ top, right, bot, left ]
"""
if not kps or len(kps) != 4:
return None
kps.sort(key=lambda p: p[0])
left_p = kps[:2]
right_p = kps[2:]
left_p.sort(key=lambda p: p[1])
extTop, extLeft = left_p
right_p.sort(key=lambda p: p[1])
extRight, extBot = right_p
# rows,cols,_ = image.shape
# angle_horizon = np.arctan2(extRight[1] - extTop[1], extRight[0] - extTop[0])
# deg = np.rad2deg(angle_horizon)
# M = cv2.getRotationMatrix2D((cols/2,rows/2),deg,1)
# dst = cv2.warpAffine(image,M,(cols,rows))
# cv2_imshow(dst)
degrees = [
angle_degree(extRight, extLeft, extTop),
angle_degree(extTop, extBot, extRight),
angle_degree(extRight, extLeft, extBot),
angle_degree(extBot, extTop, extLeft)
]
degree_max = max(degrees)
degree_min = min(degrees)
# print(degree)
if 89 <= degree_min and degree_max <= 91:
return np.array([extTop, extRight, extBot, extLeft])
else:
return None
def draw_match_2_side(img1, kp1, img2, kp2, N):
"""Draw matches on 2 sides
Args:
img1 (HxW(xC) array): image 1
kp1 (Nx2 array): keypoint for image 1
img2 (HxW(xC) array): image 2
kp2 (Nx2 array): keypoint for image 2
N (int): number of matches to draw
Returns:
out_img (Hx2W(xC) array): output image with drawn matches
"""
kp_list = np.linspace(
0, min(kp1.shape[0], kp2.shape[0])-1, N, dtype=np.int)
# Convert keypoints to cv2.Keypoint object
cv_kp1 = [cv2.KeyPoint(x=pt[0], y=pt[1], _size=1) for pt in kp1[kp_list]]
cv_kp2 = [cv2.KeyPoint(x=pt[0], y=pt[1], _size=1) for pt in kp2[kp_list]]
out_img = np.array([])
good_matches = [cv2.DMatch(
_imgIdx=0, _queryIdx=idx, _trainIdx=idx, _distance=0) for idx in range(N)]
out_img = cv2.drawMatches(
img1, cv_kp1, img2, cv_kp2, matches1to2=good_matches, outImg=out_img)
return out_img
class Image(object):
frames = {}
@classmethod
def get_frame(cls, frame_file):
frame_file = str(frame_file)
frame = cls.frames.get(frame_file)
if frame is None:
img = cv2.imread(frame_file)
vertexs = get_square_vertex(block_contours_kps(img))
if vertexs is None:
return None
frame = (img, vertexs)
cls.frames[frame_file] = frame
return frame
@classmethod
def get_image(cls, image_file):
image_file = str(image_file)
img = cv2.imread(image_file)
vertexs = get_square_vertex(block_contours_kps(img))
if vertexs is None:
vertexs = get_square_vertex(square_contours_kps(img))
if vertexs is not None:
return (img, vertexs)
else:
return None
@classmethod
def align_images(cls, image_file, frame_file):
# scanned image
image_info = cls.get_image(image_file)
if not image_info:
return None
img, kps = image_info
# template image
mask_info = cls.get_frame(frame_file)
if not mask_info:
return None
mask_img, mask_kps = mask_info
# Draw top matches
image = draw_match_2_side(img, kps, mask_img, mask_kps, 4)
####### DEBUG #########
# # resize image
# image = cv2.resize(image, None, fx=0.5, fy=0.5, interpolation = cv2.INTER_CUBIC)
# cv2_imshow(image)
####### DEBUG #########
# Find homography
m, mask = cv2.findHomography(kps, mask_kps, cv2.RANSAC, 5.0)
# Use homography to warp image
h, w, _ = mask_img.shape
result = cv2.warpPerspective(img, m, (w, h))
return result
if __name__ == '__main__':
logging.root.setLevel(logging.INFO)
bads = [
'/Users/jon/Documents/cv/data/CM-MBL-E-01/OCR20170828_0020.tif',
'/Users/jon/Documents/cv/data/CM-MBL-E-01/OCR2017091_0010.tif'
]
goods = [
'/Users/jon/Documents/cv/data/CM-MBL-E-01/CCE2017068_0008.tif',
'/Users/jon/Documents/cv/data/CM-MBL-E-01/CCE2017068_0009.tif'
]
for i in bads:
image, vertexs = Image.get_image(i)
extTop, extRight, extBot, extLeft = vertexs
####### DEBUG #########
cv2.circle(image, tuple(extLeft), 2, (0, 0, 255), -1)
cv2.circle(image, tuple(extRight), 2, (0, 255, 0), -1)
cv2.circle(image, tuple(extTop), 2, (255, 0, 0), -1)
cv2.circle(image, tuple(extBot), 2, (255, 255, 0), -1)
cv2_imshow(image)
####### DEBUG #########
| 30.531313
| 100
| 0.562562
|
7b78dce74e365cf91845bac8a5e8e20720f78e13
| 1,105
|
py
|
Python
|
photo/views.py
|
firdausa7/MY-GALLERY
|
5d2fe2727d760929800c14c11b0ff4c6d081584b
|
[
"MIT"
] | null | null | null |
photo/views.py
|
firdausa7/MY-GALLERY
|
5d2fe2727d760929800c14c11b0ff4c6d081584b
|
[
"MIT"
] | 3
|
2020-06-05T23:24:25.000Z
|
2021-06-10T22:02:41.000Z
|
photo/views.py
|
firdausa7/MY-GALLERY
|
5d2fe2727d760929800c14c11b0ff4c6d081584b
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from django.conf.urls import url,include
from .models import Location, tags, Category, Image
# Create your views here.
#HOME PAGE VIEW FUNCTION
#########################
def index(request):
photos = Image.objects.all()
context = {'photos': photos}
return render( request, 'home.html', {'photos': photos})
#LOCATION Page View Function!
def location(request):
return render(request,'location.html',)
#CATEGORY Page View Function!
def category(request):
return render(request,'category.html',)
#SEARCH CAPABILITY Page View Function!
def search_results(request):
if 'category' in request.GET and request.GET["category"]:
search_term = request.GET.get("category")
searched_category = Image.search_by_category(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"categorys": searched_category})
else:
message = "You haven't searched for any term"
return render(request,'search.html',{"message":message})
| 26.309524
| 96
| 0.695023
|
c671edd2dc03a715d5202e39317ff42d86e4aa98
| 1,126
|
py
|
Python
|
server/src/test/unit/weblab/data/test_experiments.py
|
romainrossi/weblabdeusto
|
494f1cd291d03dcf1d2e8f3e36d3dbe2348b167f
|
[
"BSD-2-Clause"
] | 15
|
2015-03-12T12:15:41.000Z
|
2021-12-20T17:53:24.000Z
|
server/src/test/unit/weblab/data/test_experiments.py
|
romainrossi/weblabdeusto
|
494f1cd291d03dcf1d2e8f3e36d3dbe2348b167f
|
[
"BSD-2-Clause"
] | 44
|
2015-01-07T09:22:05.000Z
|
2017-01-31T22:44:21.000Z
|
server/src/test/unit/weblab/data/test_experiments.py
|
romainrossi/weblabdeusto
|
494f1cd291d03dcf1d2e8f3e36d3dbe2348b167f
|
[
"BSD-2-Clause"
] | 22
|
2015-01-13T13:55:48.000Z
|
2021-12-16T17:07:00.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <pablo@ordunya.com>
#
from __future__ import print_function, unicode_literals
import unittest
from weblab.data.experiments import ExperimentId, ExperimentInstanceId
class ExperimentIdsTestCase(unittest.TestCase):
def setUp(self):
self.experiment_id = ExperimentId('exp', 'cat')
self.experiment_instance_id = ExperimentInstanceId('inst', 'exp', 'cat')
def _check_repr(self, obj):
self.assertEquals(repr(obj), repr(eval(repr(obj))))
def test_experiment_id(self):
self._check_repr(self.experiment_id)
def test_experiment_instance_id(self):
self._check_repr(self.experiment_instance_id)
def suite():
return unittest.makeSuite(ExperimentIdsTestCase)
if __name__ == '__main__':
unittest.main()
| 26.809524
| 80
| 0.72913
|
1dfe3ab6a4375dc484bf5f3b5449c63b9a1d4c35
| 7,169
|
py
|
Python
|
gwlfe/Output/Loading/StreamBankEros_1.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | null | null | null |
gwlfe/Output/Loading/StreamBankEros_1.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | 6
|
2018-07-24T22:46:28.000Z
|
2018-07-29T19:13:09.000Z
|
gwlfe/Output/Loading/StreamBankEros_1.py
|
mudkipmaster/gwlf-e
|
9e058445537dd32d1916f76c4b73ca64261771cd
|
[
"Apache-2.0"
] | 1
|
2018-07-24T18:22:01.000Z
|
2018-07-24T18:22:01.000Z
|
from numpy import maximum
from numpy import zeros
from gwlfe.BMPs.Stream.SEDFEN import SEDFEN
from gwlfe.BMPs.Stream.SEDFEN import SEDFEN_f
from gwlfe.BMPs.Stream.SEDSTAB import SEDSTAB
from gwlfe.BMPs.Stream.SEDSTAB import SEDSTAB_f
from gwlfe.BMPs.Stream.SURBBANK import SURBBANK
from gwlfe.BMPs.Stream.SURBBANK import SURBBANK_f
from gwlfe.Memoization import memoize
from gwlfe.Output.Loading.StreamBankEros import StreamBankEros
from gwlfe.Output.Loading.StreamBankEros import StreamBankEros_f as StreamBankEros_f_actual
@memoize
def StreamBankEros_1(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0, AntMoist_0, Grow_0, CNP_0, Imper,
ISRR, ISRA, CN, UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0, RecessionCoef, SeepCoef,
Qretention, PctAreaInfil, n25b, Landuse, TileDrainDensity, PointFlow, StreamWithdrawal,
GroundWithdrawal, NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF, AvSlope,
SedAAdjust, StreamLength, n42b, n46c, n85d, AgLength, n42, n45, n85, UrbBankStab):
result = zeros((NYrs, 12))
streambankeros = StreamBankEros(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0, AntMoist_0,
Grow_0,
CNP_0, Imper,
ISRR, ISRA, CN, UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef
, Qretention, PctAreaInfil, n25b, Landuse, TileDrainDensity, PointFlow,
StreamWithdrawal, GroundWithdrawal
, NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF, AvSlope,
SedAAdjust, StreamLength)
sedstab = SEDSTAB(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0, AntMoist_0, Grow_0,
CNP_0, Imper,
ISRR, ISRA, CN, UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef
, Qretention, PctAreaInfil, n25b, Landuse, TileDrainDensity, PointFlow,
StreamWithdrawal, GroundWithdrawal
, NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF, AvSlope,
SedAAdjust, StreamLength, n42b, n46c, n85d)
sedfen = SEDFEN(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0, AntMoist_0, Grow_0,
CNP_0, Imper,
ISRR, ISRA, CN, UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef
, Qretention, PctAreaInfil, n25b, Landuse, TileDrainDensity, PointFlow,
StreamWithdrawal, GroundWithdrawal
, NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF, AvSlope,
SedAAdjust, StreamLength, AgLength, n42, n45, n85)
surbbank = SURBBANK(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0, AntMoist_0, Grow_0, CNP_0,
Imper,
ISRR, ISRA, CN, UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0, RecessionCoef, SeepCoef
, Qretention, PctAreaInfil, n25b, Landuse, TileDrainDensity, PointFlow, StreamWithdrawal,
GroundWithdrawal
, NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF, AvSlope, SedAAdjust,
StreamLength
, UrbBankStab, n42b, n85d)
for Y in range(NYrs):
for i in range(12):
result[Y][i] = streambankeros[Y][i] - (sedstab[Y][i] + sedfen[Y][i] + surbbank[Y][i])
if result[Y][i] < 0:
result[Y][i] = 0
return result
@memoize
def StreamBankEros_1_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0, AntMoist_0, Grow_0, CNP_0,
Imper,
ISRR, ISRA, CN, UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0, RecessionCoef, SeepCoef,
Qretention, PctAreaInfil, n25b, Landuse, TileDrainDensity, PointFlow, StreamWithdrawal,
GroundWithdrawal, NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF, AvSlope,
SedAAdjust, StreamLength, n42b, n46c, n85d, AgLength, n42, n45, n85, UrbBankStab):
streambankeros = StreamBankEros_f_actual(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0,
AntMoist_0, Grow_0,
CNP_0, Imper,
ISRR, ISRA, CN, UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef
, Qretention, PctAreaInfil, n25b, Landuse, TileDrainDensity, PointFlow,
StreamWithdrawal, GroundWithdrawal
, NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF, AvSlope,
SedAAdjust, StreamLength)
sedstab = SEDSTAB_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0, AntMoist_0, Grow_0,
CNP_0, Imper,
ISRR, ISRA, CN, UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef
, Qretention, PctAreaInfil, n25b, Landuse, TileDrainDensity, PointFlow,
StreamWithdrawal, GroundWithdrawal
, NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF, AvSlope,
SedAAdjust, StreamLength, n42b, n46c, n85d)
sedfen = SEDFEN_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0, AntMoist_0, Grow_0,
CNP_0, Imper,
ISRR, ISRA, CN, UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0,
RecessionCoef, SeepCoef
, Qretention, PctAreaInfil, n25b, Landuse, TileDrainDensity, PointFlow,
StreamWithdrawal, GroundWithdrawal
, NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0, AvKF, AvSlope,
SedAAdjust, StreamLength, AgLength, n42, n45, n85)
surbbank = SURBBANK_f(NYrs, DaysMonth, Temp, InitSnow_0, Prec, NRur, NUrb, Area, CNI_0, AntMoist_0, Grow_0, CNP_0,
Imper, ISRR, ISRA, CN, UnsatStor_0, KV, PcntET, DayHrs, MaxWaterCap, SatStor_0, RecessionCoef,
SeepCoef, Qretention, PctAreaInfil, n25b, Landuse, TileDrainDensity, PointFlow,
StreamWithdrawal, GroundWithdrawal, NumAnimals, AvgAnimalWt, StreamFlowVolAdj, SedAFactor_0,
AvKF, AvSlope, SedAAdjust, StreamLength, UrbBankStab, n42b, n85d)
return maximum(streambankeros - (sedstab + sedfen + surbbank), 0)
| 70.284314
| 120
| 0.590877
|
28de8082213a5504e69994ceeef982856bb732cd
| 4,104
|
py
|
Python
|
ALREC_Method/trajectory_viewer.py
|
proy3/Abnormal_Trajectory_Classifier
|
a6b27c6847262e9703a0f3404c85c135415c1d4c
|
[
"MIT"
] | 6
|
2019-10-29T03:05:14.000Z
|
2022-03-18T05:14:25.000Z
|
ALREC_Method/trajectory_viewer.py
|
proy3/Abnormal_Trajectory_Classifier
|
a6b27c6847262e9703a0f3404c85c135415c1d4c
|
[
"MIT"
] | 1
|
2022-03-11T03:49:34.000Z
|
2022-03-11T03:49:34.000Z
|
ALREC_Method/trajectory_viewer.py
|
proy3/Abnormal_Trajectory_Classifier
|
a6b27c6847262e9703a0f3404c85c135415c1d4c
|
[
"MIT"
] | 1
|
2021-12-15T09:21:26.000Z
|
2021-12-15T09:21:26.000Z
|
"""
This script is used to view all the trajectories, including augmented and abnormal ones.
"""
import matplotlib.pyplot as plt
from scipy import ndimage
from PIL import Image
import os.path
number_of_images = 100
start_frame_number = 400
background_image_path = 'background_image_2.png'
overwrite_background_image = True
# Ref.: https://stackoverflow.com/questions/24731035/python-pil-0-5-opacity-transparency-alpha
opacity_level = 250 # Opaque is 255, input between 0-255
def mouse_move(self,event):
if event.inaxes and event.inaxes.get_navigate():
s = event.inaxes.format_coord(event.xdata, event.ydata)
self.set_message(s)
def make_image_transparent(image):
"""
Makes the image transparent.
Re.: https://stackoverflow.com/questions/24731035/python-pil-0-5-opacity-transparency-alpha
:param image: opened image
:return: transformed image
"""
image2 = image.convert('RGBA')
data_array = image2.getdata()
newData = []
for item in data_array:
if item[0] == 0 and item[1] == 0 and item[2] == 0:
newData.append((0, 0, 0, opacity_level))
else:
newData.append(item)
image2.putdata(newData)
return image2
def generate_background_image(input_raw_image_frame_path,
frame_name='',
frame_starting_number=0,
is_caviar_data=False):
if is_caviar_data:
image_name_1 = input_raw_image_frame_path + frame_name + str(frame_starting_number) + '.jpg'
else:
image_name_1 = input_raw_image_frame_path + str(1).zfill(8) + '.jpg'
im1 = Image.open(image_name_1)
im1 = make_image_transparent(im1)
alpha_value = 1.0 / number_of_images
for i in range(number_of_images):
if is_caviar_data:
image_name_2 = input_raw_image_frame_path + frame_name \
+ str(i+1+frame_starting_number+start_frame_number) + '.jpg'
else:
image_name_2 = input_raw_image_frame_path + str(i+1+start_frame_number).zfill(8) + '.jpg'
im2 = Image.open(image_name_2)
im2 = make_image_transparent(im2)
im1 = Image.blend(im1,im2,alpha_value)
im1 = make_image_transparent(im1)
im1.save(background_image_path)
class ImageViewer:
def __init__(self, input_raw_image_frame_path,
frame_name='',
frame_starting_number=0,
is_caviar_data=False):
self.fig = plt.figure()
self.ax = plt.axes()
plt.rcParams.update({'font.size': 22})
if overwrite_background_image or not os.path.isfile(background_image_path):
generate_background_image(input_raw_image_frame_path, frame_name, frame_starting_number, is_caviar_data)
img_test = plt.imread(background_image_path, format='png')
self.ax.imshow(ndimage.rotate(img_test, 0))
def format_coord(x,y):
return "(x={:.2f}, y={:.2f})".format(x,y)
self.ax.format_coord=format_coord
mouse_move_patch = lambda arg: mouse_move(self.fig.canvas.toolbar, arg)
self.fig.canvas.toolbar._idDrag = self.fig.canvas.mpl_connect('motion_notify_event', mouse_move_patch)
def add_trajectory(self, x_positions, y_positions, line_width=1, line_color='firebrick'):
self.ax.plot(x_positions, y_positions, '-', linewidth=line_width, color=line_color)
self.ax.arrow(x_positions[-2], y_positions[-2],
x_positions[-1] - x_positions[-2], y_positions[-1] - y_positions[-2],
head_width=5*line_width, head_length=2.5*line_width, fc=line_color, ec=line_color)
def show_image(self):
plt.show()
def save_image(self, image_path_name):
plt.xlabel('x')
plt.ylabel('y')
plt.savefig(image_path_name)
# Test
#x = range(100,300)
#trajectory_image.add_trajectory(x,x)
#x = range(300,400)
#trajectory_image.add_trajectory(x,x)
#x = range(50,100)
#trajectory_image.add_trajectory(x,x)
#x = range(20,40)
#trajectory_image.add_trajectory(x,x)
#plt.show()
| 34.2
| 116
| 0.665205
|
28b3b666936fc41c4051c7821a66cb8922aa6144
| 45
|
py
|
Python
|
droput_authentication/droput_auth/config/__init__.py
|
hosein-yousefii/DROPUT
|
99a714f03a92b14228a3691ca6568ece0f0ea48c
|
[
"Apache-2.0"
] | 2
|
2022-03-17T08:08:07.000Z
|
2022-03-17T21:38:54.000Z
|
droput_authentication/droput_auth/config/__init__.py
|
hosein-yousefii/DROPUT
|
99a714f03a92b14228a3691ca6568ece0f0ea48c
|
[
"Apache-2.0"
] | null | null | null |
droput_authentication/droput_auth/config/__init__.py
|
hosein-yousefii/DROPUT
|
99a714f03a92b14228a3691ca6568ece0f0ea48c
|
[
"Apache-2.0"
] | null | null | null |
from droput_auth.config.config import Config
| 22.5
| 44
| 0.866667
|
bc0a1ebc7c0ca2c3120bc175c609fc6cb113540a
| 757
|
py
|
Python
|
examples/peripherals/pcnt/rotary_encoder/pytest_rotary_encoder.py
|
BU-EC444/esp-idf
|
5963de1caf284b14ddfed11e52730a55e3783a3d
|
[
"Apache-2.0"
] | 4
|
2022-03-15T22:43:28.000Z
|
2022-03-28T01:25:08.000Z
|
examples/peripherals/pcnt/rotary_encoder/pytest_rotary_encoder.py
|
BU-EC444/esp-idf
|
5963de1caf284b14ddfed11e52730a55e3783a3d
|
[
"Apache-2.0"
] | null | null | null |
examples/peripherals/pcnt/rotary_encoder/pytest_rotary_encoder.py
|
BU-EC444/esp-idf
|
5963de1caf284b14ddfed11e52730a55e3783a3d
|
[
"Apache-2.0"
] | 2
|
2022-02-04T21:36:58.000Z
|
2022-02-09T12:22:48.000Z
|
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: CC0-1.0
import pytest
from pytest_embedded.dut import Dut
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.esp32s3
@pytest.mark.generic
def test_rotary_encoder(dut: Dut) -> None:
dut.expect_exact('install pcnt unit')
dut.expect_exact('set glitch filter')
dut.expect_exact('install pcnt channels')
dut.expect_exact('set edge and level actions for pcnt channels')
dut.expect_exact('add watch points and register callbacks')
dut.expect_exact('clear pcnt unit')
dut.expect_exact('start pcnt unit')
res = dut.expect(r'Pulse count: (\d+)')
count_val = res.group(1).decode('utf8')
assert -100 <= int(count_val) <= 100
| 32.913043
| 71
| 0.729194
|
8551ebdbf92ecd69657fb5e361e9eb22d83bfb51
| 2,406
|
py
|
Python
|
tools/cxx_wrapper.py
|
URUSCG-LLC/fletch
|
35967b56cecce8fd5ae96a0d85ca318272ee69a0
|
[
"BSD-3-Clause"
] | null | null | null |
tools/cxx_wrapper.py
|
URUSCG-LLC/fletch
|
35967b56cecce8fd5ae96a0d85ca318272ee69a0
|
[
"BSD-3-Clause"
] | null | null | null |
tools/cxx_wrapper.py
|
URUSCG-LLC/fletch
|
35967b56cecce8fd5ae96a0d85ca318272ee69a0
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2015, the Fletch project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import os
import sys
import utils
import subprocess
def invoke_clang(args):
fletch_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os_name = utils.GuessOS()
if os_name == "macos":
os_name = "mac"
args.extend([
'-isysroot',
subprocess.check_output(['xcrun', '--show-sdk-path']).strip()])
clang_bin = os.path.join(
fletch_path, "third_party", "clang", os_name, "bin", "clang++")
print clang_bin
args.insert(0, clang_bin)
print "'%s'" % "' '".join(args)
os.execv(clang_bin, args)
def invoke_gcc(args):
args.insert(0, "g++")
os.execv("/usr/bin/g++", args)
def invoke_gcc_arm(args):
args.insert(0, "arm-linux-gnueabihf-g++-4.8")
os.execv("/usr/bin/arm-linux-gnueabihf-g++-4.8", args)
def invoke_gcc_arm64(args):
args.insert(0, "aarch64-linux-gnu-g++-4.8")
os.execv("/usr/bin/aarch64-linux-gnu-g++-4.8", args)
def invoke_gcc_mbed(args):
path = "/usr/local/gcc-arm-none-eabi-4_9-2015q2/bin/arm-none-eabi-g++"
subprocess.check_call([path] + args)
def invoke_gcc_lk(args):
args.insert(0, "arm-none-eabi-g++")
os.execv("/usr/bin/arm-none-eabi-g++", args)
def main():
args = sys.argv[1:]
if "-L/FLETCH_ASAN" in args:
args.remove("-L/FLETCH_ASAN")
args.insert(0, '-fsanitize=address')
if "-L/FLETCH_CLANG" in args:
args.insert(0, '-fsanitize-undefined-trap-on-error')
if "-DFLETCH_CLANG" in args:
invoke_clang(args)
elif "-L/FLETCH_CLANG" in args:
args.remove("-L/FLETCH_CLANG")
invoke_clang(args)
elif "-DFLETCH_ARM" in args:
invoke_gcc_arm(args)
elif "-L/FLETCH_ARM" in args:
args.remove("-L/FLETCH_ARM")
invoke_gcc_arm(args)
elif "-DFLETCH_ARM64" in args:
invoke_gcc_arm64(args)
elif "-L/FLETCH_ARM64" in args:
args.remove("-L/FLETCH_ARM64")
invoke_gcc_arm64(args)
elif "-DFLETCH_MBED" in args:
invoke_gcc_mbed(args)
elif "-L/FLETCH_MBED" in args:
args.remove("-L/FLETCH_MBED")
invoke_gcc_mbed(args)
elif "-DFLETCH_LK" in args:
invoke_gcc_lk(args)
elif "-L/FLETCH_LK" in args:
args.remove("-L/FLETCH_LK")
invoke_gcc_lk(args)
else:
invoke_gcc(args)
if __name__ == '__main__':
main()
| 28.305882
| 78
| 0.672901
|
24970b286220a3536e312b5ea4869acc1d512ffa
| 4,909
|
py
|
Python
|
qcodes/tests/test_interactive_widget.py
|
RosenblumLabUser/Qcodes
|
01fe56a0751a744d978a893f78ee9d6c8230478f
|
[
"MIT"
] | 223
|
2016-10-29T15:00:24.000Z
|
2022-03-20T06:53:34.000Z
|
qcodes/tests/test_interactive_widget.py
|
M1racleShih/Qcodes
|
c03029a6968e16379155aadc8b083a02e01876a6
|
[
"MIT"
] | 3,406
|
2016-10-25T10:44:50.000Z
|
2022-03-31T09:47:35.000Z
|
qcodes/tests/test_interactive_widget.py
|
nikhartman/Qcodes
|
042c5e25ab9e40b20c316b4055c4842844834d1e
|
[
"MIT"
] | 263
|
2016-10-25T11:35:36.000Z
|
2022-03-31T08:53:20.000Z
|
import time
from unittest.mock import patch
import matplotlib
import pytest
from ipywidgets import HTML, Button, GridspecLayout, Tab, Textarea
# set matplotlib backend before importing pyplot
matplotlib.use("Agg")
from qcodes import interactive_widget
# we only need `experiment` here, but pytest does not discover the dependencies
# by itself so we also need to import all the fixtures this one is dependent
# on
# pylint: disable=unused-import
from qcodes.tests.dataset.conftest import (
dataset,
empty_temp_db,
experiment,
standalone_parameters_dataset,
)
@pytest.fixture(name="tab", scope="function")
def _create_tab():
yield interactive_widget.create_tab()
def test_snapshot_browser():
dct = {"a": {"b": "c", "d": {"e": "f"}}}
interactive_widget.nested_dict_browser(dct)
interactive_widget.nested_dict_browser(dct, ["a"])
@pytest.mark.usefixtures("empty_temp_db")
def test_full_widget_on_empty_db():
interactive_widget.experiments_widget()
@pytest.mark.usefixtures("experiment")
def test_full_widget_on_empty_experiment():
interactive_widget.experiments_widget()
@pytest.mark.usefixtures("dataset")
def test_full_widget_on_empty_dataset():
interactive_widget.experiments_widget()
@pytest.mark.usefixtures("standalone_parameters_dataset")
def test_full_widget_on_one_dataset():
interactive_widget.experiments_widget()
def test_button_to_text(
standalone_parameters_dataset,
): # pylint: disable=redefined-outer-name
box = interactive_widget.button_to_text("title", "body")
(button,) = box.children
button.click()
time.sleep(0.5) # after click
text_area, back_button = box.children
assert "body" in text_area.value
back_button.click()
time.sleep(0.5) # after click
assert len(box.children) == 1
def test_snapshot_button(
tab, standalone_parameters_dataset
): # pylint: disable=redefined-outer-name
ds = standalone_parameters_dataset
snapshot_button = interactive_widget._get_snapshot_button(ds, tab)
snapshot_button.click()
time.sleep(0.5) # after click
# maybe use https://github.com/jupyter-widgets/ipywidgets/issues/2417
assert "snapshot" in tab.get_title(1)
@patch("matplotlib.pyplot.show")
def test_plot_button(
tab, standalone_parameters_dataset
): # pylint: disable=redefined-outer-name
ds = standalone_parameters_dataset
plot_button = interactive_widget._get_plot_button(ds, tab)
plot_button.click()
time.sleep(0.5) # after click
@pytest.mark.parametrize(
"get_button_function",
[
interactive_widget._get_experiment_button,
interactive_widget._get_timestamp_button,
interactive_widget._get_run_id_button,
interactive_widget._get_parameters_button,
],
)
def test_get_experiment_button(
get_button_function, standalone_parameters_dataset,
): # pylint: disable=redefined-outer-name
ds = standalone_parameters_dataset
box = get_button_function(ds)
snapshot_button = box.children[0]
snapshot_button.click()
time.sleep(0.5) # after click
assert len(box.children) == 2
def test_get_parameters(standalone_parameters_dataset):
parameters = interactive_widget._get_parameters(
standalone_parameters_dataset
)
assert bool(parameters["dependent"]) # not empty
assert bool(parameters["independent"]) # not empty
def test_editable_metadata(
standalone_parameters_dataset,
): # pylint: disable=redefined-outer-name
ds = standalone_parameters_dataset
box = interactive_widget.editable_metadata(ds)
button = box.children[0]
button.click()
assert len(box.children) == 2
text_area, save_box = box.children
save_button = save_box.children[0]
assert isinstance(text_area, Textarea)
assert isinstance(button, Button)
test_test = "test value"
text_area.value = test_test
save_button.click()
time.sleep(0.5) # after click
# Test if metadata is saved.
assert ds.metadata[interactive_widget._META_DATA_KEY] == test_test
assert box.children[0].description == test_test
def test_experiments_widget(standalone_parameters_dataset):
dss = [standalone_parameters_dataset]
widget = interactive_widget.experiments_widget(data_sets=dss)
assert len(widget.children) == 3
html, tab, grid = widget.children
assert isinstance(html, HTML)
assert isinstance(tab, Tab)
assert isinstance(grid, GridspecLayout)
assert grid.n_rows == 1 + 1
@pytest.mark.parametrize('sort_by', [None, "run_id", "timestamp"])
def test_experiments_widget_sorting(standalone_parameters_dataset, sort_by):
dss = [standalone_parameters_dataset]
widget = interactive_widget.experiments_widget(
data_sets=dss, sort_by=sort_by
)
assert len(widget.children) == 3
grid = widget.children[2]
assert isinstance(grid, GridspecLayout)
assert grid.n_rows == 1 + 1
| 30.490683
| 79
| 0.744755
|
171a12e661d8b0a26e6093543e96edeac9ab7a50
| 18,951
|
py
|
Python
|
pyzoo/zoo/orca/learn/tf2/tf_runner.py
|
shane-huang/analytics-zoo
|
9c29bc7d678b526cd8ff256d731ed9ac2c18cc81
|
[
"Apache-2.0"
] | null | null | null |
pyzoo/zoo/orca/learn/tf2/tf_runner.py
|
shane-huang/analytics-zoo
|
9c29bc7d678b526cd8ff256d731ed9ac2c18cc81
|
[
"Apache-2.0"
] | 1
|
2020-04-17T02:41:28.000Z
|
2020-04-20T02:37:41.000Z
|
pyzoo/zoo/orca/learn/tf2/tf_runner.py
|
shane-huang/analytics-zoo
|
9c29bc7d678b526cd8ff256d731ed9ac2c18cc81
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2017 The Ray Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import os
import numpy as np
import ray
import ray.services
from contextlib import closing
import logging
import socket
from zoo.orca.data.utils import ray_partition_get_data_label
logger = logging.getLogger(__name__)
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def _try_import_strategy():
"""Late import for Tesnorflow"""
import tensorflow as tf
return tf.distribute.experimental.MultiWorkerMirroredStrategy
class DatasetHandler:
def __init__(self, rank, size):
self.rank = rank
self.size = size
def handle_datasets_train(self, data_creator,
validation_data_creator,
config, epochs, steps_per_epoch,
validation_steps):
config, local_batch_size = self._handle_batch_size(config)
train_dataset = data_creator(config)
if isinstance(train_dataset, ray.ObjectID):
assert steps_per_epoch is not None, "steps_per_epoch must be provided for xshard"
train_dataset = self._handle_xshards(train_dataset,
steps=steps_per_epoch * epochs,
local_batch_size=local_batch_size,
shuffle=True)
else:
train_dataset = self._handle_sharding(train_dataset)
if validation_data_creator is not None:
test_dataset = validation_data_creator(config)
if isinstance(test_dataset, ray.ObjectID):
assert validation_steps is not None, "validation_steps must be provided" \
"when use xshards for evaluate"
test_dataset = self._handle_xshards(test_dataset,
steps=validation_steps,
local_batch_size=local_batch_size,
shuffle=False)
else:
test_dataset = self._handle_sharding(test_dataset)
else:
test_dataset = None
return train_dataset, test_dataset
def handle_dataset_validation(self, data_creator, config, steps):
config, local_batch_size = self._handle_batch_size(config)
dataset = data_creator(config)
if isinstance(dataset, ray.ObjectID):
assert steps is not None, "steps must be provided for xshard"
dataset = self._handle_xshards(dataset,
steps=steps,
local_batch_size=local_batch_size,
shuffle=False)
else:
dataset = self._handle_sharding(dataset)
return dataset
def _handle_xshards(self, dataset, steps, local_batch_size, shuffle):
raise NotImplementedError
def _handle_sharding(self, dataset):
raise NotImplementedError
def _handle_batch_size(self, config):
raise NotImplementedError
@staticmethod
def get_handler(backend, rank, size):
if backend == "horovod":
return HorovodDatasetHanlder(rank, size)
if backend == "tf-distributed":
return TFDistributedDatasetHandler(rank, size)
if backend == "tf-local":
return LocalDatasetHandler(rank, size)
raise Exception(f"invalid backend: {backend}")
class HorovodDatasetHanlder(DatasetHandler):
def _handle_xshards(self, dataset, steps, local_batch_size, shuffle):
import tensorflow as tf
data, label = ray_partition_get_data_label(ray.get(dataset),
allow_tuple=True,
allow_list=False)
dataset = tf.data.Dataset.from_tensor_slices((data, label))
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
dataset = dataset.with_options(options)
dataset = dataset.repeat()
dataset = dataset.take(steps * local_batch_size)
if shuffle:
dataset = dataset.shuffle(local_batch_size * min(steps, 10))
dataset = dataset.batch(local_batch_size)
return dataset
def _handle_sharding(self, dataset):
from tensorflow.python.distribute.input_ops import auto_shard_dataset
dataset = auto_shard_dataset(dataset, self.size, self.rank)
return dataset
def _handle_batch_size(self, config):
assert "batch_size" in config, "batch_size must be set in config"
config["batch_size"] = config["batch_size"] // self.size
return config, config["batch_size"]
class TFDistributedDatasetHandler(DatasetHandler):
def _handle_xshards(self, dataset, steps, local_batch_size, shuffle):
import tensorflow as tf
data, label = ray_partition_get_data_label(ray.get(dataset),
allow_tuple=True,
allow_list=False)
def dataset_fn(input_context):
dataset = tf.data.Dataset.from_tensor_slices((data, label))
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = \
tf.data.experimental.AutoShardPolicy.OFF
dataset = dataset.with_options(options)
dataset = dataset.repeat()
dataset = dataset.take(steps * local_batch_size)
if shuffle:
dataset = dataset.shuffle(local_batch_size * min(steps, 10))
dataset = dataset.batch(local_batch_size)
return dataset
from tensorflow.python.distribute import distribution_strategy_context as ds_context
strategy = ds_context.get_strategy()
dataset = strategy.experimental_distribute_datasets_from_function(dataset_fn)
return dataset
def _handle_sharding(self, dataset):
return dataset
def _handle_batch_size(self, config):
assert "batch_size" in config, "batch_size must be set in config"
local_batch_size = config["batch_size"] // self.size
return config, local_batch_size
class LocalDatasetHandler(DatasetHandler):
def _handle_xshards(self, dataset, steps, local_batch_size, shuffle):
import tensorflow as tf
data, label = ray_partition_get_data_label(ray.get(dataset),
allow_tuple=True,
allow_list=False)
dataset = tf.data.Dataset.from_tensor_slices((data, label))
dataset = dataset.repeat()
dataset = dataset.take(steps * local_batch_size)
if shuffle:
dataset = dataset.shuffle(local_batch_size * min(steps, 10))
dataset = dataset.batch(local_batch_size)
return dataset
def _handle_sharding(self, dataset):
return dataset
def _handle_batch_size(self, config):
assert "batch_size" in config, "batch_size must be set in config"
return config, config["batch_size"]
class TFRunner:
"""Manages a TensorFlow model for training."""
def __init__(self, model_creator, compile_args_creator,
config=None,
verbose=False):
"""Initializes the runner.
Args:
model_creator (dict -> Model): see tf_trainer.py.
data_creator (dict -> tf.Dataset, tf.Dataset): see tf_trainer.py.
config (dict): see tf_trainer.py.
verbose (bool): Outputs training data if true.
"""
self.model_creator = model_creator
self.compile_args_creator = compile_args_creator
self.config = {} if config is None else config
self.inter_op_parallelism = self.config.get("inter_op_parallelism", 1)
self.intra_op_parallelism = self.config.get("intra_op_parallelism", 1)
import tensorflow as tf
tf.config.threading.set_inter_op_parallelism_threads(self.inter_op_parallelism)
tf.config.threading.set_intra_op_parallelism_threads(self.intra_op_parallelism)
os.environ["OMP_NUM_THREADS"] = self.config.get("OMP_NUM_THREADS",
str(self.intra_op_parallelism))
os.environ["KMP_BLOCKING_TIME"] = self.config.get("KMP_BLOCKING_TIME",
os.environ.get("KMP_BLOCKING_TIME", "0"))
self.epoch = 0
self.verbose = verbose
def setup(self):
"""Initializes the model."""
logger.debug("Creating model")
self.model = self.model_creator(self.config)
self.model.compile(**self.compile_args_creator(self.config))
self.backend = "tf-local"
self.size = 1
self.rank = 0
from tensorflow.python.distribute import distribution_strategy_context as ds_context
self.strategy = ds_context.get_strategy()
def setup_horovod(self):
import horovod.tensorflow.keras as hvd
hvd.init()
self.model = self.model_creator(self.config)
compile_args = self.compile_args_creator(self.config)
compile_args["optimizer"] = hvd.DistributedOptimizer(compile_args["optimizer"])
self.model.compile(**compile_args)
self.backend = "horovod"
self.size = hvd.size()
self.rank = hvd.rank()
from tensorflow.python.distribute import distribution_strategy_context as ds_context
self.strategy = ds_context.get_strategy()
def setup_distributed(self, urls, world_rank, world_size):
"""Sets up TensorFLow distributed environment and initializes the model.
Args:
urls (str): the URLs that each node uses to connect.
world_rank (int): the index of the runner.
world_size (int): the total number of runners.
"""
assert len(urls) == world_size
tf_config = {
"cluster": {
"worker": urls
},
"task": {
"index": world_rank,
"type": "worker"
}
}
os.environ["TF_CONFIG"] = json.dumps(tf_config)
no_proxy = os.environ.get("no_proxy", "")
ips = [url.split(":")[0] for url in urls]
os.environ["no_proxy"] = ",".join(ips) + "," + no_proxy
MultiWorkerMirroredStrategy = _try_import_strategy()
# MultiWorkerMirroredStrategy handles everything for us, from
# sharding the dataset (or even sharding the data itself if the loader
# reads files from disk) to merging the metrics and weight updates
#
# worker 0 is the "chief" worker and will handle the map-reduce
# every worker ends up with the exact same metrics and model
# after model.fit
#
# because of this, we only really ever need to query its state
self.strategy = MultiWorkerMirroredStrategy()
logger.debug("Creating model with MultiWorkerMirroredStrategy")
with self.strategy.scope():
self.model = self.model_creator(self.config)
# For use in model.evaluate()
self.local_model = None
self.backend = "tf-distributed"
self.size = world_size
self.rank = world_rank
def step(self, data_creator, epochs=1, batch_size=32, verbose=1,
callbacks=None, validation_data_creator=None, class_weight=None,
steps_per_epoch=None, validation_steps=None, validation_freq=1,
data_config=None):
"""Runs a training epoch and updates the model parameters."""
config = self.config.copy()
if data_config is not None:
config.update(data_config)
config['batch_size'] = batch_size
with self.strategy.scope():
dataset_handler = DatasetHandler.get_handler(self.backend, self.rank, self.size)
train_dataset, test_dataset = dataset_handler\
.handle_datasets_train(data_creator,
validation_data_creator,
config=config, epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps)
# process other arguments
if self.backend == "horovod":
import horovod.tensorflow.keras as hvd
hvd_callbacks = [hvd.callbacks.BroadcastGlobalVariablesCallback(0),
hvd.callbacks.MetricAverageCallback()]
if hvd.rank() != 0:
verbose = 0
if callbacks is not None:
callbacks = hvd_callbacks + callbacks
else:
callbacks = hvd_callbacks
elif self.backend == "tf-distributed":
if self.strategy.cluster_resolver.task_id != 0:
verbose = 0
history = self.model.fit(train_dataset,
epochs=self.epoch + epochs,
verbose=verbose,
callbacks=callbacks,
validation_data=test_dataset,
class_weight=class_weight,
initial_epoch=self.epoch,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
validation_freq=validation_freq)
if history is None:
stats = {}
else:
stats = {"train_" + k: v[-1] for k, v in history.history.items()}
self.epoch += epochs
return [stats]
def validate(self, data_creator, batch_size=32, verbose=1, sample_weight=None,
steps=None, callbacks=None, data_config=None):
"""Evaluates the model on the validation data set."""
config = self.config.copy()
if data_config is not None:
config.update(data_config)
config["batch_size"] = batch_size
with self.strategy.scope():
dataset_handler = DatasetHandler.get_handler(self.backend,
self.rank,
self.size)
dataset = dataset_handler.handle_dataset_validation(data_creator,
config=config,
steps=steps)
if self.backend == "horovod":
import horovod.tensorflow.keras as hvd
if hvd.rank() != 0:
verbose = 0
elif self.backend == "tf-distributed":
if self.strategy.cluster_resolver.task_id != 0:
verbose = 0
params = dict(
verbose=verbose,
sample_weight=sample_weight,
steps=steps,
callbacks=callbacks,
)
results = self.model.evaluate(dataset, **params)
if results is None:
# Using local Model since model.evaluate() returns None
# for MultiWorkerMirroredStrategy
logger.warning("Running a local model to get validation score.")
self.local_model = self.model_creator(self.config)
self.local_model.set_weights(self.model.get_weights())
results = self.local_model.evaluate(dataset, **params)
if isinstance(results, list):
stats = {
"validation_" + k: v
for k, v in zip(self.model.metrics_names, results)
}
else:
stats = {"results": results}
return [stats]
def predict(self, data_creator, batch_size, verbose, steps, callbacks, data_config):
config = self.config.copy()
if data_config is not None:
config.update(data_config)
dataset = data_creator(config)
if not isinstance(dataset, ray.ObjectID):
raise ValueError("Only xshards is supported for predict")
partition = ray.get(dataset)
params = dict(
batch_size=batch_size,
verbose=verbose,
steps=steps,
callbacks=callbacks,
)
if self.backend == "tf-distributed":
local_model = self.model_creator(self.config)
local_model.set_weights(self.model.get_weights())
else:
local_model = self.model
def predict_fn(shard):
y = local_model.predict(shard["x"], **params)
return {"prediction": y}
new_part = [predict_fn(shard) for shard in partition]
return new_part
def get_state(self):
"""Returns the state of the runner."""
return {
"epoch": self.epoch,
"weights": self.model.get_weights(),
"optimizer_weights": self.model.optimizer.get_weights()
}
def set_state(self, state):
"""Sets the state of the model."""
self.epoch = state["epoch"]
self.model.set_weights(state["weights"])
def shutdown(self):
"""Attempts to shut down the worker."""
del self.model
def get_node_ip(self):
"""Returns the IP address of the current node."""
return ray.services.get_node_ip_address()
def find_free_port(self):
"""Finds a free port on the current node."""
return find_free_port()
| 39.563674
| 100
| 0.598649
|
f7a6522272ca25f0b11966dffdd9996ab5aae982
| 29,283
|
py
|
Python
|
ml-agents-envs/mlagents/envs/environment.py
|
trainmachines/unity-ml-agents-cl
|
71ab07a0caf49ea73082f4f80b71951f2b10ff15
|
[
"Apache-2.0"
] | 58
|
2019-06-13T16:35:40.000Z
|
2021-12-30T03:16:45.000Z
|
ml-agents-envs/mlagents/envs/environment.py
|
trainmachines/unity-ml-agents-cl
|
71ab07a0caf49ea73082f4f80b71951f2b10ff15
|
[
"Apache-2.0"
] | 7
|
2020-09-26T00:43:02.000Z
|
2022-02-10T01:26:53.000Z
|
ml-agents-envs/mlagents/envs/environment.py
|
trainmachines/unity-ml-agents-cl
|
71ab07a0caf49ea73082f4f80b71951f2b10ff15
|
[
"Apache-2.0"
] | 23
|
2019-06-25T17:09:32.000Z
|
2021-03-18T06:44:17.000Z
|
import atexit
import glob
import logging
import numpy as np
import os
import subprocess
from typing import Dict, List, Optional, Any
from mlagents.envs.base_unity_environment import BaseUnityEnvironment
from mlagents.envs.timers import timed, hierarchical_timer
from .brain import AllBrainInfo, BrainInfo, BrainParameters
from .exception import (
UnityEnvironmentException,
UnityCommunicationException,
UnityActionException,
UnityTimeOutException,
)
from mlagents.envs.communicator_objects.unity_rl_input_pb2 import UnityRLInput
from mlagents.envs.communicator_objects.unity_rl_output_pb2 import UnityRLOutput
from mlagents.envs.communicator_objects.agent_action_proto_pb2 import AgentActionProto
from mlagents.envs.communicator_objects.environment_parameters_proto_pb2 import (
EnvironmentParametersProto,
)
from mlagents.envs.communicator_objects.unity_rl_initialization_input_pb2 import (
UnityRLInitializationInput,
)
from mlagents.envs.communicator_objects.unity_rl_initialization_output_pb2 import (
UnityRLInitializationOutput,
)
from mlagents.envs.communicator_objects.unity_input_pb2 import UnityInput
from mlagents.envs.communicator_objects.custom_action_pb2 import CustomAction
from .rpc_communicator import RpcCommunicator
from sys import platform
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("mlagents.envs")
class UnityEnvironment(BaseUnityEnvironment):
SCALAR_ACTION_TYPES = (int, np.int32, np.int64, float, np.float32, np.float64)
SINGLE_BRAIN_ACTION_TYPES = SCALAR_ACTION_TYPES + (list, np.ndarray)
SINGLE_BRAIN_TEXT_TYPES = list
def __init__(
self,
file_name: Optional[str] = None,
worker_id: int = 0,
base_port: int = 5005,
seed: int = 0,
docker_training: bool = False,
no_graphics: bool = False,
timeout_wait: int = 30,
args: Optional[List[str]] = None,
):
"""
Starts a new unity environment and establishes a connection with the environment.
Notice: Currently communication between Unity and Python takes place over an open socket without authentication.
Ensure that the network where training takes place is secure.
:string file_name: Name of Unity environment binary.
:int base_port: Baseline port number to connect to Unity environment over. worker_id increments over this.
:int worker_id: Number to add to communication port (5005) [0]. Used for asynchronous agent scenarios.
:bool docker_training: Informs this class whether the process is being run within a container.
:bool no_graphics: Whether to run the Unity simulator in no-graphics mode
:int timeout_wait: Time (in seconds) to wait for connection from environment.
:bool train_mode: Whether to run in training mode, speeding up the simulation, by default.
:list args: Addition Unity command line arguments
"""
args = args or []
atexit.register(self._close)
self.port = base_port + worker_id
self._buffer_size = 12000
self._version_ = "API-10"
self._loaded = (
False
) # If true, this means the environment was successfully loaded
self.proc1 = (
None
) # The process that is started. If None, no process was started
self.communicator = self.get_communicator(worker_id, base_port, timeout_wait)
self.worker_id = worker_id
# If the environment name is None, a new environment will not be launched
# and the communicator will directly try to connect to an existing unity environment.
# If the worker-id is not 0 and the environment name is None, an error is thrown
if file_name is None and worker_id != 0:
raise UnityEnvironmentException(
"If the environment name is None, "
"the worker-id must be 0 in order to connect with the Editor."
)
if file_name is not None:
self.executable_launcher(file_name, docker_training, no_graphics, args)
else:
logger.info(
"Start training by pressing the Play button in the Unity Editor."
)
self._loaded = True
rl_init_parameters_in = UnityRLInitializationInput(seed=seed)
try:
aca_params = self.send_academy_parameters(rl_init_parameters_in)
except UnityTimeOutException:
self._close()
raise
# TODO : think of a better way to expose the academyParameters
self._unity_version = aca_params.version
if self._unity_version != self._version_:
self._close()
raise UnityEnvironmentException(
"The API number is not compatible between Unity and python. Python API : {0}, Unity API : "
"{1}.\nPlease go to https://github.com/Unity-Technologies/ml-agents to download the latest version "
"of ML-Agents.".format(self._version_, self._unity_version)
)
self._n_agents: Dict[str, int] = {}
self._is_first_message = True
self._academy_name = aca_params.name
self._log_path = aca_params.log_path
self._brains: Dict[str, BrainParameters] = {}
self._brain_names: List[str] = []
self._external_brain_names: List[str] = []
for brain_param in aca_params.brain_parameters:
self._brain_names += [brain_param.brain_name]
self._brains[brain_param.brain_name] = BrainParameters.from_proto(
brain_param
)
if brain_param.is_training:
self._external_brain_names += [brain_param.brain_name]
self._num_brains = len(self._brain_names)
self._num_external_brains = len(self._external_brain_names)
self._resetParameters = dict(aca_params.environment_parameters.float_parameters)
logger.info(
"\n'{0}' started successfully!\n{1}".format(self._academy_name, str(self))
)
if self._num_external_brains == 0:
logger.warning(
" No Learning Brains set to train found in the Unity Environment. "
"You will not be able to pass actions to your agent(s)."
)
@property
def logfile_path(self):
return self._log_path
@property
def brains(self):
return self._brains
@property
def academy_name(self):
return self._academy_name
@property
def number_brains(self):
return self._num_brains
@property
def number_external_brains(self):
return self._num_external_brains
@property
def brain_names(self):
return self._brain_names
@property
def external_brain_names(self):
return self._external_brain_names
@staticmethod
def get_communicator(worker_id, base_port, timeout_wait):
return RpcCommunicator(worker_id, base_port, timeout_wait)
@property
def external_brains(self):
external_brains = {}
for brain_name in self.external_brain_names:
external_brains[brain_name] = self.brains[brain_name]
return external_brains
@property
def reset_parameters(self):
return self._resetParameters
def executable_launcher(self, file_name, docker_training, no_graphics, args):
cwd = os.getcwd()
file_name = (
file_name.strip()
.replace(".app", "")
.replace(".exe", "")
.replace(".x86_64", "")
.replace(".x86", "")
)
true_filename = os.path.basename(os.path.normpath(file_name))
logger.debug("The true file name is {}".format(true_filename))
launch_string = None
if platform == "linux" or platform == "linux2":
candidates = glob.glob(os.path.join(cwd, file_name) + ".x86_64")
if len(candidates) == 0:
candidates = glob.glob(os.path.join(cwd, file_name) + ".x86")
if len(candidates) == 0:
candidates = glob.glob(file_name + ".x86_64")
if len(candidates) == 0:
candidates = glob.glob(file_name + ".x86")
if len(candidates) > 0:
launch_string = candidates[0]
elif platform == "darwin":
candidates = glob.glob(
os.path.join(
cwd, file_name + ".app", "Contents", "MacOS", true_filename
)
)
if len(candidates) == 0:
candidates = glob.glob(
os.path.join(file_name + ".app", "Contents", "MacOS", true_filename)
)
if len(candidates) == 0:
candidates = glob.glob(
os.path.join(cwd, file_name + ".app", "Contents", "MacOS", "*")
)
if len(candidates) == 0:
candidates = glob.glob(
os.path.join(file_name + ".app", "Contents", "MacOS", "*")
)
if len(candidates) > 0:
launch_string = candidates[0]
elif platform == "win32":
candidates = glob.glob(os.path.join(cwd, file_name + ".exe"))
if len(candidates) == 0:
candidates = glob.glob(file_name + ".exe")
if len(candidates) > 0:
launch_string = candidates[0]
if launch_string is None:
self._close()
raise UnityEnvironmentException(
"Couldn't launch the {0} environment. "
"Provided filename does not match any environments.".format(
true_filename
)
)
else:
logger.debug("This is the launch string {}".format(launch_string))
# Launch Unity environment
if not docker_training:
subprocess_args = [launch_string]
if no_graphics:
subprocess_args += ["-nographics", "-batchmode"]
subprocess_args += ["--port", str(self.port)]
subprocess_args += args
try:
self.proc1 = subprocess.Popen(subprocess_args)
except PermissionError as perm:
# This is likely due to missing read or execute permissions on file.
raise UnityEnvironmentException(
f"Error when trying to launch environment - make sure "
f"permissions are set correctly. For example "
f'"chmod -R 755 {launch_string}"'
) from perm
else:
"""
Comments for future maintenance:
xvfb-run is a wrapper around Xvfb, a virtual xserver where all
rendering is done to virtual memory. It automatically creates a
new virtual server automatically picking a server number `auto-servernum`.
The server is passed the arguments using `server-args`, we are telling
Xvfb to create Screen number 0 with width 640, height 480 and depth 24 bits.
Note that 640 X 480 are the default width and height. The main reason for
us to add this is because we'd like to change the depth from the default
of 8 bits to 24.
Unfortunately, this means that we will need to pass the arguments through
a shell which is why we set `shell=True`. Now, this adds its own
complications. E.g SIGINT can bounce off the shell and not get propagated
to the child processes. This is why we add `exec`, so that the shell gets
launched, the arguments are passed to `xvfb-run`. `exec` replaces the shell
we created with `xvfb`.
"""
docker_ls = (
"exec xvfb-run --auto-servernum"
" --server-args='-screen 0 640x480x24'"
" {0} --port {1}"
).format(launch_string, str(self.port))
self.proc1 = subprocess.Popen(
docker_ls,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
def __str__(self):
return (
"""Unity Academy name: {0}
Number of Brains: {1}
Number of Training Brains : {2}
Reset Parameters :\n\t\t{3}""".format(
self._academy_name,
str(self._num_brains),
str(self._num_external_brains),
"\n\t\t".join(
[
str(k) + " -> " + str(self._resetParameters[k])
for k in self._resetParameters
]
),
)
+ "\n"
+ "\n".join([str(self._brains[b]) for b in self._brains])
)
def reset(
self,
config: Dict = None,
train_mode: bool = True,
custom_reset_parameters: Any = None,
) -> AllBrainInfo:
"""
Sends a signal to reset the unity environment.
:return: AllBrainInfo : A data structure corresponding to the initial reset state of the environment.
"""
if config is None:
config = self._resetParameters
elif config:
logger.info(
"Academy reset with parameters: {0}".format(
", ".join([str(x) + " -> " + str(config[x]) for x in config])
)
)
for k in config:
if (k in self._resetParameters) and (isinstance(config[k], (int, float))):
self._resetParameters[k] = config[k]
elif not isinstance(config[k], (int, float)):
raise UnityEnvironmentException(
"The value for parameter '{0}'' must be an Integer or a Float.".format(
k
)
)
else:
raise UnityEnvironmentException(
"The parameter '{0}' is not a valid parameter.".format(k)
)
if self._loaded:
outputs = self.communicator.exchange(
self._generate_reset_input(train_mode, config, custom_reset_parameters)
)
if outputs is None:
raise UnityCommunicationException("Communicator has stopped.")
rl_output = outputs.rl_output
s = self._get_state(rl_output)
for _b in self._external_brain_names:
self._n_agents[_b] = len(s[_b].agents)
self._is_first_message = False
return s
else:
raise UnityEnvironmentException("No Unity environment is loaded.")
@timed
def step(
self,
vector_action: Dict[str, np.ndarray] = None,
memory: Optional[Dict[str, np.ndarray]] = None,
text_action: Optional[Dict[str, List[str]]] = None,
value: Optional[Dict[str, np.ndarray]] = None,
custom_action: Dict[str, Any] = None,
) -> AllBrainInfo:
"""
Provides the environment with an action, moves the environment dynamics forward accordingly,
and returns observation, state, and reward information to the agent.
:param value: Value estimates provided by agents.
:param vector_action: Agent's vector action. Can be a scalar or vector of int/floats.
:param memory: Vector corresponding to memory used for recurrent policies.
:param text_action: Text action to send to environment for.
:param custom_action: Optional instance of a CustomAction protobuf message.
:return: AllBrainInfo : A Data structure corresponding to the new state of the environment.
"""
if self._is_first_message:
return self.reset()
vector_action = {} if vector_action is None else vector_action
memory = {} if memory is None else memory
text_action = {} if text_action is None else text_action
value = {} if value is None else value
custom_action = {} if custom_action is None else custom_action
# Check that environment is loaded, and episode is currently running.
if not self._loaded:
raise UnityEnvironmentException("No Unity environment is loaded.")
else:
if isinstance(vector_action, self.SINGLE_BRAIN_ACTION_TYPES):
if self._num_external_brains == 1:
vector_action = {self._external_brain_names[0]: vector_action}
elif self._num_external_brains > 1:
raise UnityActionException(
"You have {0} brains, you need to feed a dictionary of brain names a keys, "
"and vector_actions as values".format(self._num_brains)
)
else:
raise UnityActionException(
"There are no external brains in the environment, "
"step cannot take a vector_action input"
)
if isinstance(memory, self.SINGLE_BRAIN_ACTION_TYPES):
if self._num_external_brains == 1:
memory = {self._external_brain_names[0]: memory}
elif self._num_external_brains > 1:
raise UnityActionException(
"You have {0} brains, you need to feed a dictionary of brain names as keys "
"and memories as values".format(self._num_brains)
)
else:
raise UnityActionException(
"There are no external brains in the environment, "
"step cannot take a memory input"
)
if isinstance(text_action, self.SINGLE_BRAIN_TEXT_TYPES):
if self._num_external_brains == 1:
text_action = {self._external_brain_names[0]: text_action}
elif self._num_external_brains > 1:
raise UnityActionException(
"You have {0} brains, you need to feed a dictionary of brain names as keys "
"and text_actions as values".format(self._num_brains)
)
else:
raise UnityActionException(
"There are no external brains in the environment, "
"step cannot take a value input"
)
if isinstance(value, self.SINGLE_BRAIN_ACTION_TYPES):
if self._num_external_brains == 1:
value = {self._external_brain_names[0]: value}
elif self._num_external_brains > 1:
raise UnityActionException(
"You have {0} brains, you need to feed a dictionary of brain names as keys "
"and state/action value estimates as values".format(
self._num_brains
)
)
else:
raise UnityActionException(
"There are no external brains in the environment, "
"step cannot take a value input"
)
if isinstance(custom_action, CustomAction):
if self._num_external_brains == 1:
custom_action = {self._external_brain_names[0]: custom_action}
elif self._num_external_brains > 1:
raise UnityActionException(
"You have {0} brains, you need to feed a dictionary of brain names as keys "
"and CustomAction instances as values".format(self._num_brains)
)
else:
raise UnityActionException(
"There are no external brains in the environment, "
"step cannot take a custom_action input"
)
for brain_name in (
list(vector_action.keys())
+ list(memory.keys())
+ list(text_action.keys())
):
if brain_name not in self._external_brain_names:
raise UnityActionException(
"The name {0} does not correspond to an external brain "
"in the environment".format(brain_name)
)
for brain_name in self._external_brain_names:
n_agent = self._n_agents[brain_name]
if brain_name not in vector_action:
if self._brains[brain_name].vector_action_space_type == "discrete":
vector_action[brain_name] = (
[0.0]
* n_agent
* len(self._brains[brain_name].vector_action_space_size)
)
else:
vector_action[brain_name] = (
[0.0]
* n_agent
* self._brains[brain_name].vector_action_space_size[0]
)
else:
vector_action[brain_name] = self._flatten(vector_action[brain_name])
if brain_name not in memory:
memory[brain_name] = []
else:
if memory[brain_name] is None:
memory[brain_name] = []
else:
memory[brain_name] = self._flatten(memory[brain_name])
if brain_name not in text_action:
text_action[brain_name] = [""] * n_agent
else:
if text_action[brain_name] is None:
text_action[brain_name] = [""] * n_agent
if brain_name not in custom_action:
custom_action[brain_name] = [None] * n_agent
else:
if custom_action[brain_name] is None:
custom_action[brain_name] = [None] * n_agent
if isinstance(custom_action[brain_name], CustomAction):
custom_action[brain_name] = [
custom_action[brain_name]
] * n_agent
number_text_actions = len(text_action[brain_name])
if not ((number_text_actions == n_agent) or number_text_actions == 0):
raise UnityActionException(
"There was a mismatch between the provided text_action and "
"the environment's expectation: "
"The brain {0} expected {1} text_action but was given {2}".format(
brain_name, n_agent, number_text_actions
)
)
discrete_check = (
self._brains[brain_name].vector_action_space_type == "discrete"
)
expected_discrete_size = n_agent * len(
self._brains[brain_name].vector_action_space_size
)
continuous_check = (
self._brains[brain_name].vector_action_space_type == "continuous"
)
expected_continuous_size = (
self._brains[brain_name].vector_action_space_size[0] * n_agent
)
if not (
(
discrete_check
and len(vector_action[brain_name]) == expected_discrete_size
)
or (
continuous_check
and len(vector_action[brain_name]) == expected_continuous_size
)
):
raise UnityActionException(
"There was a mismatch between the provided action and "
"the environment's expectation: "
"The brain {0} expected {1} {2} action(s), but was provided: {3}".format(
brain_name,
str(expected_discrete_size)
if discrete_check
else str(expected_continuous_size),
self._brains[brain_name].vector_action_space_type,
str(vector_action[brain_name]),
)
)
step_input = self._generate_step_input(
vector_action, memory, text_action, value, custom_action
)
with hierarchical_timer("communicator.exchange"):
outputs = self.communicator.exchange(step_input)
if outputs is None:
raise UnityCommunicationException("Communicator has stopped.")
rl_output = outputs.rl_output
state = self._get_state(rl_output)
for _b in self._external_brain_names:
self._n_agents[_b] = len(state[_b].agents)
return state
def close(self):
"""
Sends a shutdown signal to the unity environment, and closes the socket connection.
"""
if self._loaded:
self._close()
else:
raise UnityEnvironmentException("No Unity environment is loaded.")
def _close(self):
self._loaded = False
self.communicator.close()
if self.proc1 is not None:
self.proc1.kill()
@classmethod
def _flatten(cls, arr: Any) -> List[float]:
"""
Converts arrays to list.
:param arr: numpy vector.
:return: flattened list.
"""
if isinstance(arr, cls.SCALAR_ACTION_TYPES):
arr = [float(arr)]
if isinstance(arr, np.ndarray):
arr = arr.tolist()
if len(arr) == 0:
return arr
if isinstance(arr[0], np.ndarray):
arr = [item for sublist in arr for item in sublist.tolist()]
if isinstance(arr[0], list):
arr = [item for sublist in arr for item in sublist]
arr = [float(x) for x in arr]
return arr
def _get_state(self, output: UnityRLOutput) -> AllBrainInfo:
"""
Collects experience information from all external brains in environment at current step.
:return: a dictionary of BrainInfo objects.
"""
_data = {}
for brain_name in output.agentInfos:
agent_info_list = output.agentInfos[brain_name].value
_data[brain_name] = BrainInfo.from_agent_proto(
self.worker_id, agent_info_list, self.brains[brain_name]
)
return _data
@timed
def _generate_step_input(
self,
vector_action: Dict[str, np.ndarray],
memory: Dict[str, np.ndarray],
text_action: Dict[str, list],
value: Dict[str, np.ndarray],
custom_action: Dict[str, list],
) -> UnityInput:
rl_in = UnityRLInput()
for b in vector_action:
n_agents = self._n_agents[b]
if n_agents == 0:
continue
_a_s = len(vector_action[b]) // n_agents
_m_s = len(memory[b]) // n_agents
for i in range(n_agents):
action = AgentActionProto(
vector_actions=vector_action[b][i * _a_s : (i + 1) * _a_s],
memories=memory[b][i * _m_s : (i + 1) * _m_s],
text_actions=text_action[b][i],
custom_action=custom_action[b][i],
)
if b in value:
if value[b] is not None:
action.value = float(value[b][i])
rl_in.agent_actions[b].value.extend([action])
rl_in.command = 0
return self.wrap_unity_input(rl_in)
def _generate_reset_input(
self, training: bool, config: Dict, custom_reset_parameters: Any
) -> UnityInput:
rl_in = UnityRLInput()
rl_in.is_training = training
rl_in.environment_parameters.CopyFrom(EnvironmentParametersProto())
for key in config:
rl_in.environment_parameters.float_parameters[key] = config[key]
if custom_reset_parameters is not None:
rl_in.environment_parameters.custom_reset_parameters.CopyFrom(
custom_reset_parameters
)
rl_in.command = 1
return self.wrap_unity_input(rl_in)
def send_academy_parameters(
self, init_parameters: UnityRLInitializationInput
) -> UnityRLInitializationOutput:
inputs = UnityInput()
inputs.rl_initialization_input.CopyFrom(init_parameters)
return self.communicator.initialize(inputs).rl_initialization_output
@staticmethod
def wrap_unity_input(rl_input: UnityRLInput) -> UnityInput:
result = UnityInput()
result.rl_input.CopyFrom(rl_input)
return result
| 43.190265
| 120
| 0.563262
|
68a6faee5354dc73505617fbb80d35ae1721bcaa
| 916
|
py
|
Python
|
backblast/model.py
|
kickstandproject/backblast
|
a3b251afeba5798cccfcd3766f1ea3e55f78034c
|
[
"Apache-2.0"
] | 1
|
2016-03-26T21:30:19.000Z
|
2016-03-26T21:30:19.000Z
|
backblast/model.py
|
kickstandproject/backblast
|
a3b251afeba5798cccfcd3766f1ea3e55f78034c
|
[
"Apache-2.0"
] | null | null | null |
backblast/model.py
|
kickstandproject/backblast
|
a3b251afeba5798cccfcd3766f1ea3e55f78034c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class TriggerEvent(object):
def __init__(self):
self.type = None
self.channel = None
self.exten = None
def __repr__(self):
ret = 'TriggerEvent %s %s to %s' % (self.type,
self.channel,
self.exten)
return ret
| 33.925926
| 75
| 0.628821
|
e2092fb01863f91227a7a77f4cf5d7e031c0822a
| 1,365
|
py
|
Python
|
files/utils.py
|
SIBSIND/PHPMYADMINWEBSITE
|
e2112f0fb43f042be551ecaadb05b1cc79ba5360
|
[
"MIT"
] | 31
|
2015-05-26T23:13:06.000Z
|
2022-03-10T12:03:33.000Z
|
files/utils.py
|
SIBSIND/PHPMYADMINWEBSITE
|
e2112f0fb43f042be551ecaadb05b1cc79ba5360
|
[
"MIT"
] | 136
|
2015-01-15T23:30:23.000Z
|
2022-03-31T00:59:01.000Z
|
files/utils.py
|
SIBSIND/PHPMYADMINWEBSITE
|
e2112f0fb43f042be551ecaadb05b1cc79ba5360
|
[
"MIT"
] | 158
|
2015-01-15T23:25:26.000Z
|
2022-02-09T01:47:20.000Z
|
# -*- coding: UTF-8 -*-
# vim: set expandtab sw=4 ts=4 sts=4:
#
# phpMyAdmin web site
#
# Copyright (C) 2008 - 2016 Michal Cihar <michal@cihar.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from hashlib import sha1, sha256
def read_sum(filename, origfile=None):
try:
with open(filename, 'r') as handle:
return handle.read().split()[0]
except IOError:
if origfile is not None:
with open(origfile, 'r') as handle:
data = handle.read()
if filename.endswith('.sha1'):
return sha1(data).hexdigest()
if filename.endswith('.sha256'):
return sha256(data).hexdigest()
return ''
| 35
| 73
| 0.671795
|
be64e074af6729b6171d5eed328bc46d2d983abb
| 19,608
|
py
|
Python
|
tensorflow_probability/python/distributions/masked.py
|
mederrata/probability
|
bc6c411b0fbd83141f303f91a27343fe3c43a797
|
[
"Apache-2.0"
] | 1
|
2022-03-22T11:56:31.000Z
|
2022-03-22T11:56:31.000Z
|
tensorflow_probability/python/distributions/masked.py
|
robot0102/probability
|
89d248c420b8ecabfd9d6de4a1aa8d3886920049
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_probability/python/distributions/masked.py
|
robot0102/probability
|
89d248c420b8ecabfd9d6de4a1aa8d3886920049
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The MaskedIndependent distribution class."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.bijectors import bijector as bijector_lib
from tensorflow_probability.python.distributions import batch_broadcast
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.distributions import log_prob_ratio
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
def _add_event_dims_to_mask(validity_mask, *, dist=None, event_ndims=None):
validity_mask = tf.convert_to_tensor(validity_mask)
if event_ndims is None:
event_ndims = ps.rank_from_shape(dist.event_shape_tensor())
return tf.reshape(
validity_mask,
ps.concat([
ps.shape(validity_mask),
ps.ones(event_ndims, dtype=tf.int32)
], axis=0))
def _make_masked_fn(fn_name, n_event_shapes, safe_value,
make_arg0_safe=False):
"""Implements functions like mean, variance, etc.
Args:
fn_name: Name of the method called on the underlying distribution.
n_event_shapes: Number of event shape repeats in the shape of the underlying
function's output.
safe_value: The value to be placed in invalid locations. May be
`'safe_sample'` to specify we should use the "safe sample" value.
make_arg0_safe: If `True`, we will apply `self.safe_sample_fn` to ensure the
argument passed into the underlying routine is a "safe" sample.
Returns:
fn: Callable implementing the given function.
"""
def fn(self, *args, **kwargs):
if safe_value == 'safe_sample' or make_arg0_safe: # Only if needed.
safe_val = tf.stop_gradient(self.safe_sample_fn(self.distribution))
validity_mask = tf.convert_to_tensor(self.validity_mask)
if make_arg0_safe:
x = args[0]
safe_x = tf.where(
_add_event_dims_to_mask(validity_mask, dist=self), x, safe_val)
args = (safe_x,) + args[1:]
val = getattr(self.distribution, fn_name)(*args, **kwargs)
if n_event_shapes:
validity_mask = tf.reshape(
validity_mask,
ps.concat(
[ps.shape(validity_mask)] +
[ps.ones_like(self.event_shape_tensor())] * n_event_shapes,
axis=0))
if safe_value == 'safe_sample':
sentinel = tf.cast(safe_val, val.dtype)
else:
sentinel = tf.cast(safe_value, val.dtype)
return tf.where(validity_mask, val, sentinel)
fn.__name__ = f'_{fn_name}'
return fn
def _fixed_sample(d):
return d.sample(seed=samplers.zeros_seed())
class _Masked(distribution_lib.Distribution):
"""A distribution that masks invalid underlying distributions.
Sometimes we may want a way of masking out a subset of distributions. Perhaps
we have labels for only a subset of batch members and want to evaluate a
log_prob. Or we may want to encode a sparse random variable as a dense
random variable with a mask applied. In single-program/multiple-data regimes,
it can be necessary to pad Distributions and the samples thereof to a given
size in order to achieve the "single-program" desideratum.
When computing a probability density in this regime, we would like to mask out
the contributions of invalid batch members. We may also want to ensure that
the values being sampled are valid parameters for descendant distributions in
a hierarchical model, even if they are ultimately masked out. This
distribution answers those requirements. Specifically, for invalid batch
elements:
- `log_prob(x) == 0.` for all `x`, with no gradients back to `x`, nor any
gradients to the parameters of `distribution`.
- `sample() == tf.stop_gradient(safe_value_fn(distribution))`, with no
gradients back to the parameters of `distribution`.
The distribution accepts a mask specified by `validity_mask`, a boolean tensor
broadcastable with the underlying distribution's batch shape which specifies
for each batch element whether or not it is valid.
Entries in `validity_mask` which are `False` denote missing distributions,
which means that the corresponding entries in the measures (e.g. `prob`)
and statistics (e.g. `mean`) must not be treated as coming from some real
distribution. Whenever doing a reduction across those quantites, make sure to
either mask out the invalid entries or make sure the returned value
corresponds to the identity element of the reduction. For a couple examples:
- OK: `reduce_sum(masked_dist.log_prob(x))`
- OK: `tfd.Independent(masked_dist, ...)`
- Not OK: `reduce_var(masked_dist.mean())` will underestimate the variance
because it uses too large an `N`.
- Not OK: `tf.linalg.cholesky(masked_dist.covariance())` will fail for invalid
batch elements.
The default `safe_value_fn` is to draw a fixed-seeded sample from the
underlying `distribution`. Since this may be expensive, it is suggested to
specify a computationally cheaper method. Some options might include:
- `tfd.Distribution.mode`
- `tfd.Distribution.mean`
- `lambda d: d.quantile(.5)` (median)
- `lambda _: 0.` (if zero is always in the support of d)
- `lambda d: d.experimental_default_event_space_bijector()(0.)`
Besides the output of `sample`, results from `safe_value_fn` may also appear
in (invalid batch members of) `masked.default_event_space_bijector().forward`.
#### Examples
```
# Use tf.sequence_mask for `range(n) < num_valid`.
num_valid = 3
num_entries = 4
d = tfd.Masked(
tfd.MultivariateNormalDiag(tf.zeros([2, num_entries, 5]), tf.ones([5])),
tf.sequence_mask(num_valid, num_entries))
d.batch_shape # [2, 4]
d.event_shape # [5]
d.log_prob(tf.zeros([5])) # shape [2, 4]
# => [[nonzero, nonzero, nonzero, 0.],
# [nonzero, nonzero, nonzero, 0.]]
# Explicitly denote which elements are valid, adding a new batch dim of 2.
d = tfd.Masked(tfd.MultivariateNormalDiag(tf.zeros([4, 5]), tf.ones([5])),
[[False], [True]])
d.batch_shape # [2, 4]
d.event_shape # [5]
d.log_prob(tf.zeros([5])) # shape [2, 4]
# => [[0., 0., 0., 0.],
# [nonzero, nonzero, nonzero, nonzero]]
# Use `BatchBroadcast` and `Independent` to achieve the equivalent of adding
# positional mask functionality to `tfd.Sample`.
# Suppose we wanted to achieve this:
# `tfd.Sample(tfd.Normal(tf.zeros(2), 1), [3, 4], validity_mask=mask)`
# We can write:
d = tfd.Independent(
tfd.Masked(tfd.BatchBroadcast(tfd.Normal(0, 1), [2, 3, 4]), mask),
reinterpreted_batch_ndims=2)
d.batch_shape # [2]
d.event_shape # [3, 4]
d.log_prob(tf.ones([3, 4])) # shape [2]
```
"""
def __init__(self,
distribution,
validity_mask,
safe_sample_fn=_fixed_sample,
validate_args=False,
allow_nan_stats=True,
name=None):
"""Constructs a Masked distribution.
Args:
distribution: The underlying distribution, which will be masked.
validity_mask: Boolean mask where `True` indicates an element is valid.
`validity_mask` must broadcast with the batch shape of the underlying
distribution. Invalid batch elements are masked so that sampling returns
`safe_sample_fn(dist)` in invalid positions and `log_prob(x)` returns
`0.` for invalid positions.
safe_sample_fn: A callable which takes a distribution (namely,
the `distribution` argument) and returns a determinstic, safe sample
value. This helps to avoid `nan` gradients and allows downstream usage
of samples from a `Masked` distribution to assume a "safe" even if
invalid value. (Be careful to ensure that such downstream usages are
themselves masked!) Note that the result of this function will be
wrapped in a `tf.stop_gradient` call.
validate_args: Boolean indicating whether argument assertions should be
run. May impose performance penalties.
allow_nan_stats: Boolean indicating whether statistical functions may
return `nan`, or should instead use asserts where possible.
name: Optional name for operation scoping.
"""
parameters = dict(locals())
with tf.name_scope(name or f'Masked{distribution.name}') as name:
self._distribution = distribution
self._validity_mask = tensor_util.convert_nonref_to_tensor(
validity_mask, dtype_hint=tf.bool)
self._safe_sample_fn = safe_sample_fn
super(_Masked, self).__init__(
dtype=distribution.dtype,
reparameterization_type=distribution.reparameterization_type,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
name=name)
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
distribution=parameter_properties.BatchedComponentProperties(),
validity_mask=parameter_properties.ParameterProperties(
shape_fn=parameter_properties.SHAPE_FN_NOT_IMPLEMENTED))
@property
def distribution(self):
return self._distribution
@property
def validity_mask(self):
return self._validity_mask
@property
def safe_sample_fn(self):
return self._safe_sample_fn
@property
def experimental_is_sharded(self):
return self.distribution.experimental_is_sharded
def _event_shape(self):
return self.distribution.event_shape
def _event_shape_tensor(self):
return self.distribution.event_shape_tensor()
def _sample_n(self, n, seed=None, **kwargs):
validity_mask = tf.convert_to_tensor(self.validity_mask)
# To avoid the shape gymnastics of drawing extra samples, we delegate
# sampling to the BatchBroadcast distribution.
bb = batch_broadcast.BatchBroadcast(self.distribution,
ps.shape(validity_mask))
samples = bb.sample(n, seed=seed, **kwargs)
safe_val = tf.stop_gradient(self.safe_sample_fn(self.distribution))
return tf.where(_add_event_dims_to_mask(validity_mask, dist=self),
samples, safe_val)
_log_prob = _make_masked_fn(
'log_prob', n_event_shapes=0, safe_value=0., make_arg0_safe=True)
_prob = _make_masked_fn(
'prob', n_event_shapes=0, safe_value=1., make_arg0_safe=True)
_log_cdf = _make_masked_fn(
'log_cdf', n_event_shapes=0, safe_value=0., make_arg0_safe=True)
_cdf = _make_masked_fn(
'cdf', n_event_shapes=0, safe_value=1., make_arg0_safe=True)
_log_survival_function = _make_masked_fn(
'log_survival_function', n_event_shapes=0, safe_value=-float('inf'),
make_arg0_safe=True)
_survival_function = _make_masked_fn(
'survival_function', n_event_shapes=0, safe_value=0.,
make_arg0_safe=True)
_entropy = _make_masked_fn(
'entropy', n_event_shapes=0, safe_value=0.)
_mode = _make_masked_fn(
'mode', n_event_shapes=1, safe_value='safe_sample')
_mean = _make_masked_fn(
'mean', n_event_shapes=1, safe_value='safe_sample')
_variance = _make_masked_fn(
'variance', n_event_shapes=1, safe_value=0.)
_stddev = _make_masked_fn(
'stddev', n_event_shapes=1, safe_value=0.)
_covariance = _make_masked_fn(
'covariance', n_event_shapes=2, safe_value=0.)
_quantile = _make_masked_fn(
'quantile', n_event_shapes=1, safe_value='safe_sample')
def _default_event_space_bijector(self, *args, **kwargs):
underlying_bijector = (
self.distribution.experimental_default_event_space_bijector())
if underlying_bijector is None:
return None
return _MaskedBijector(self, underlying_bijector)
class Masked(_Masked, distribution_lib.AutoCompositeTensorDistribution):
def __new__(cls, *args, **kwargs):
"""Maybe return a non-`CompositeTensor` `_Masked`."""
if cls is Masked:
if args:
distribution = args[0]
else:
distribution = kwargs.get('distribution')
if not isinstance(distribution, tf.__internal__.CompositeTensor):
return _Masked(*args, **kwargs)
return super(Masked, cls).__new__(cls)
Masked.__doc__ = _Masked.__doc__ + '\n' + (
'If `distribution` is a `CompositeTensor`, then the resulting `Masked` '
'instance is a `CompositeTensor` as well. Otherwise, a '
'non-`CompositeTensor` `_Masked` instance is created instead. Distribution '
'subclasses that inherit from `Masked` will also inherit from '
'`CompositeTensor`.')
@kullback_leibler.RegisterKL(_Masked, _Masked)
def _kl_masked_masked(a, b, name=None):
"""KL divergence between Masked distributions."""
with tf.name_scope(name or 'kl_masked_masked'):
a_valid = tf.convert_to_tensor(a.validity_mask)
b_valid = tf.convert_to_tensor(b.validity_mask)
underlying_kl = kullback_leibler.kl_divergence(
a.distribution, b.distribution)
# The treatment for KL is as follows:
# When both random variables are valid, the underlying KL applies.
# When neither random variable is valid, the KL is 0., i.e.
# `a log a - a log b = 0` because log a and log b are everywhere 0.
# When exactly one is valid, we (a) raise an assertion error, if either
# distribution's allow_nan_stats is set to False, or (b) return nan in
# such positions.
asserts = []
if not (a.allow_nan_stats and b.allow_nan_stats):
asserts.append(assert_util.assert_equal(
a_valid, b_valid,
message='KL is only valid for matching mask values'))
with tf.control_dependencies(asserts):
both_valid = (a_valid & b_valid)
neither_valid = (~a_valid) & (~b_valid)
dtype = underlying_kl.dtype
return tf.where(both_valid, underlying_kl,
tf.where(neither_valid,
tf.zeros([], dtype), float('nan')))
@log_prob_ratio.RegisterLogProbRatio(_Masked)
def _masked_log_prob_ratio(p, x, q, y, name=None):
"""Computes log p(x) - log q(y) for Masked p, q."""
with tf.name_scope(name or 'masked_log_prob_ratio'):
p_valid = tf.convert_to_tensor(p.validity_mask)
safe_x = tf.where(_add_event_dims_to_mask(p_valid, dist=p),
x, tf.stop_gradient(p.safe_sample_fn(p.distribution)))
q_valid = tf.convert_to_tensor(q.validity_mask)
safe_y = tf.where(_add_event_dims_to_mask(q_valid, dist=q),
y, tf.stop_gradient(q.safe_sample_fn(q.distribution)))
underlying = log_prob_ratio.log_prob_ratio(
p.distribution, safe_x, q.distribution, safe_y)
asserts = []
# As with KL, we return the underlying log_prob_ratio where both are valid,
# `0.` where neither is valid, and `nan` otherwise (or an assertion if
# either distribution does not `allow_nan_stats`).
if not (p.allow_nan_stats and p.allow_nan_stats):
asserts.append(assert_util.assert_equal(
p_valid, q_valid,
message='Masked log_prob_ratio only valid for matching mask values'))
with tf.control_dependencies(asserts):
both_valid = (p_valid & q_valid)
neither_valid = (~p_valid) & (~q_valid)
return tf.where(both_valid, underlying,
tf.where(neither_valid,
tf.zeros([], dtype=underlying.dtype),
float('nan')))
class _NonCompositeTensorMaskedBijector(bijector_lib.Bijector):
"""Event space bijector for Masked distributions."""
def __init__(self, masked, underlying_bijector):
self._masked = masked
self._bijector = underlying_bijector
super(_NonCompositeTensorMaskedBijector, self).__init__(
validate_args=underlying_bijector.validate_args,
dtype=underlying_bijector.dtype,
forward_min_event_ndims=underlying_bijector.forward_min_event_ndims,
inverse_min_event_ndims=underlying_bijector.inverse_min_event_ndims)
def _forward_event_shape(self, x):
return self._bijector.forward_event_shape(x)
def _forward_event_shape_tensor(self, x):
return self._bijector.forward_event_shape_tensor(x)
def _inverse_event_shape(self, y):
return self._bijector.inverse_event_shape(y)
def _inverse_event_shape_tensor(self, y):
return self._bijector.inverse_event_shape_tensor(y)
def _make_safe_x(self, x, validity_mask):
bij = self._bijector
masked = self._masked
pullback_event_ndims = ps.rank_from_shape(
lambda: bij.inverse_event_shape_tensor(masked.event_shape_tensor()),
self._bijector.inverse_event_shape(masked.event_shape))
pullback_event_mask = _add_event_dims_to_mask(
validity_mask, event_ndims=pullback_event_ndims)
# We presume that 0 in unconstrained space is safe.
return tf.where(pullback_event_mask, x, 0.)
def _forward(self, x):
mask = self._masked.validity_mask
safe_x = self._make_safe_x(x, mask)
return self._make_safe_y(self._bijector.forward(safe_x), mask)
def _forward_log_det_jacobian(self, x):
validity_mask = tf.convert_to_tensor(self._masked.validity_mask)
safe_x = self._make_safe_x(x, validity_mask)
return tf.where(validity_mask,
self._bijector.forward_log_det_jacobian(safe_x),
0.)
def _make_safe_y(self, y, validity_mask):
safe_val = tf.stop_gradient(
self._masked.safe_sample_fn(self._masked.distribution))
event_mask = _add_event_dims_to_mask(validity_mask, dist=self._masked)
return tf.where(event_mask, y, safe_val)
def _inverse(self, y):
safe_y = self._make_safe_y(y, self._masked.validity_mask)
return self._bijector.inverse(safe_y)
def _inverse_log_det_jacobian(self, y):
validity_mask = tf.convert_to_tensor(self._masked.validity_mask)
safe_y = self._make_safe_y(y, validity_mask)
return tf.where(validity_mask,
self._bijector.inverse_log_det_jacobian(safe_y),
0.)
class _MaskedBijector(_NonCompositeTensorMaskedBijector,
bijector_lib.AutoCompositeTensorBijector):
"""Event space bijector for Masked distributions."""
def __new__(cls, *args, **kwargs):
"""Maybe return a `_NonCompositeTensorMaskedBijector`."""
if cls is _MaskedBijector:
if args:
masked = args[0]
else:
masked = kwargs.get('masked')
if len(args) > 1:
bijector = args[1]
else:
bijector = kwargs.get('underlying_bijector')
if not (isinstance(masked, tf.__internal__.CompositeTensor)
and isinstance(bijector, tf.__internal__.CompositeTensor)):
return _NonCompositeTensorMaskedBijector(*args, **kwargs)
return super(_MaskedBijector, cls).__new__(cls)
| 41.719149
| 88
| 0.708588
|
0ba40d41d92725b5dfdbb5f9f2d17cf5f19f4b7f
| 2,797
|
py
|
Python
|
tests/bel.py
|
dahlia/hangulize
|
903e07cb587670f80020818f5c384ceca29ed67b
|
[
"BSD-3-Clause"
] | 1
|
2020-10-18T20:28:54.000Z
|
2020-10-18T20:28:54.000Z
|
tests/bel.py
|
dahlia/hangulize
|
903e07cb587670f80020818f5c384ceca29ed67b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/bel.py
|
dahlia/hangulize
|
903e07cb587670f80020818f5c384ceca29ed67b
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from tests import HangulizeTestCase
from hangulize.langs.bel import Belarusian
class BelarusianTestCase(HangulizeTestCase):
lang = Belarusian()
def test_people(self):
self.assert_examples({
u'Аляксей Абалмасаў': u'알략세이 아발마사우',
u'Вікторыя Азарэнка': u'빅토리야 아자렌카',
u'Святлана Алексіевіч': u'스뱌틀라나 알렉시예비치',
u'Францішак Аляхновіч': u'프란치샤크 알랴흐노비치',
u'Андрэй Арамнаў': u'안드레이 아람나우',
u'Алег Ахрэм': u'알레크 아흐렘',
u'Максім Багдановіч': u'막심 바흐다노비치',
u'Святлана Багінская': u'스뱌틀라나 바힌스카야',
u'Францішак Багушэвіч': u'프란치샤크 바후셰비치',
u'Сымон Будны': u'시몬 부드니',
u'Аляксандр Глеб': u'알략산드르 흘레프',
u'Яўген Глебаў': u'야우헨 흘레바우',
u'Аляксей Грышын': u'알략세이 흐리신',
u'Вінцэнт Дунін-Марцінкевіч': u'빈첸트 두닌마르친케비치',
u'Ефрасіння Полацкая': u'예프라신냐 폴라츠카야',
u'Кастусь Каліноўскі': u'카스투스 칼리노우스키',
u'Кацярына Карстэн': u'카차리나 카르스텐',
u'Якуб Колас': u'야쿠프 콜라스',
u'Янка Купала': u'얀카 쿠팔라',
u'Вацлаў Ластоўскі': u'바츨라우 라스토우스키',
u'Аляксандр Лукашэнка': u'알략산드르 루카셴카',
u'Ігар Лучанок': u'이하르 루차노크',
u'Вадзім Махнеў': u'바짐 마흐네우',
u'Юлія Несцярэнка': u'율리야 네스차렌카',
u'Аляксандр Патупа': u'알략산드르 파투파',
u'Іпаці Пацей': u'이파치 파체이',
u'Алаіза Пашкевіч': u'알라이자 파슈케비치',
u'Наталля Пяткевіч': u'나탈랴 퍄트케비치',
u'Радзівіл': u'라지빌',
u'Максім Рамашчанка': u'막심 라마샨카',
u'Міхаіл Савіцкі': u'미하일 사비츠키',
u'Леў Сапега': u'레우 사페하',
u'Ян Серада': u'얀 세라다',
u'Францыск Скарына': u'프란치스크 스카리나',
u'Раман Скірмунт': u'라만 스키르문트',
u'Мялецій Сматрыцкі': u'먈레치 스마트리츠키',
u'Ян Станкевіч': u'얀 스탄케비치',
u'Фёдар Сумкін': u'표다르 숨킨',
u'Браніслаў Тарашкевіч': u'브라니슬라우 타라슈케비치',
u'Віктар Тураў': u'빅타르 투라우',
u'Мікалай Улашчык': u'미칼라이 울라시크',
u'Фёдар Фёдараў': u'표다르 표다라우',
u'Ян Чачот': u'얀 차초트',
})
def test_places(self):
self.assert_examples({
u'Бабруйск': u'바브루이스크',
u'Баранавічы': u'바라나비치',
u'Белавежская пушча': u'벨라베슈스카야 푸샤',
u'Беларусь': u'벨라루스',
u'Брэст': u'브레스트',
u'Віцебск': u'비쳅스크',
u'Гомель': u'호멜',
u'Гродна': u'흐로드나',
u'Камянец': u'카먀네츠',
u'Магілёў': u'마힐료우',
u'Мінск': u'민스크',
u'Мір': u'미르',
u'Мураванка': u'무라반카',
u'Нясвіж': u'냐스비시',
u'Полацк': u'폴라츠크',
u'Сынкавічы': u'신카비치',
})
| 37.293333
| 58
| 0.508759
|
d41e0912749c613a036317fd354ba93582c1bd0a
| 738
|
py
|
Python
|
value_investing/filter_results.py
|
bfan1256/fundamental-analysis-algorithms
|
96b41d46eb40124d0b2d74bd3f51b3b431e50be3
|
[
"Apache-2.0"
] | null | null | null |
value_investing/filter_results.py
|
bfan1256/fundamental-analysis-algorithms
|
96b41d46eb40124d0b2d74bd3f51b3b431e50be3
|
[
"Apache-2.0"
] | null | null | null |
value_investing/filter_results.py
|
bfan1256/fundamental-analysis-algorithms
|
96b41d46eb40124d0b2d74bd3f51b3b431e50be3
|
[
"Apache-2.0"
] | null | null | null |
import csv
with open('undervalued_good_buys.csv') as f:
reader = csv.reader(f)
data = []
for row in reader:
data.append(row)
data = data[1:]
filtered_data = []
for row in data:
if float(row[1]) <= 6 and float(row[-3]) < 1:
filtered_data.append(row)
final_data = []
for row in filtered_data:
new_row = [row[0]]
for value in row[1:]:
value = float(value)
new_row.append(round(value, 3))
final_data.append(new_row)
with open('./final_data/undervalued_good_buys_filtered.csv', 'w') as f:
writer = csv.writer(f)
writer.writerow(['Symbol', 'Final Weighted Rating', 'DCF/P', 'PEG', 'Dividend Payout Ratio', 'P/FV', 'Unweighted Rating'])
writer.writerows(final_data)
| 28.384615
| 126
| 0.636856
|
52c92dd011d770faa0b895db9b1fe44cd7a83c30
| 58
|
py
|
Python
|
api.py
|
ioggstream/rest-samples
|
03c9e524ea2dca7d56fcfdc134f285c118151d32
|
[
"MIT"
] | null | null | null |
api.py
|
ioggstream/rest-samples
|
03c9e524ea2dca7d56fcfdc134f285c118151d32
|
[
"MIT"
] | null | null | null |
api.py
|
ioggstream/rest-samples
|
03c9e524ea2dca7d56fcfdc134f285c118151d32
|
[
"MIT"
] | null | null | null |
def ping(*args, **kwargs):
return {"ciao": "belli"}
| 11.6
| 28
| 0.551724
|
fec50492c14d8a387b445f6ae03e433ec3d3116d
| 11,002
|
py
|
Python
|
hmtl/dataset_readers/coref_conll.py
|
rahular/joint-coref-srl
|
cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be
|
[
"MIT"
] | null | null | null |
hmtl/dataset_readers/coref_conll.py
|
rahular/joint-coref-srl
|
cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be
|
[
"MIT"
] | null | null | null |
hmtl/dataset_readers/coref_conll.py
|
rahular/joint-coref-srl
|
cd85fb4e11af1a1ea400ed657d0a4511c1d6c6be
|
[
"MIT"
] | null | null | null |
import sys
import logging
import collections
from typing import Any, Dict, List, Optional, Tuple, DefaultDict, Set
from overrides import overrides
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.fields import (
Field,
ListField,
TextField,
SpanField,
MetadataField,
SequenceLabelField,
)
from allennlp.data.instance import Instance
from allennlp.data.tokenizers import Token, PretrainedTransformerTokenizer
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.dataset_readers.dataset_utils import Ontonotes, enumerate_spans
logger = logging.getLogger(__name__)
def canonicalize_clusters(
clusters: DefaultDict[int, List[Tuple[int, int]]]
) -> List[List[Tuple[int, int]]]:
"""
The CONLL 2012 data includes 2 annotated spans which are identical,
but have different ids. This checks all clusters for spans which are
identical, and if it finds any, merges the clusters containing the
identical spans.
"""
merged_clusters: List[Set[Tuple[int, int]]] = []
for cluster in clusters.values():
cluster_with_overlapping_mention = None
for mention in cluster:
# Look at clusters we have already processed to
# see if they contain a mention in the current
# cluster for comparison.
for cluster2 in merged_clusters:
if mention in cluster2:
# first cluster in merged clusters
# which contains this mention.
cluster_with_overlapping_mention = cluster2
break
# Already encountered overlap - no need to keep looking.
if cluster_with_overlapping_mention is not None:
break
if cluster_with_overlapping_mention is not None:
# Merge cluster we are currently processing into
# the cluster in the processed list.
cluster_with_overlapping_mention.update(cluster)
else:
merged_clusters.append(set(cluster))
return [list(c) for c in merged_clusters]
@DatasetReader.register("coref_conll")
class CorefConllReader(DatasetReader):
"""
Reads a single CoNLL-formatted file. This is the same file format as used in the
:class:`~allennlp.data.dataset_readers.semantic_role_labelling.SrlReader`, but is preprocessed
to dump all documents into a single file per train, dev and test split. See
scripts/compile_coref_data.sh for more details of how to pre-process the Ontonotes 5.0 data
into the correct format.
Returns a `Dataset` where the `Instances` have four fields : `text`, a `TextField`
containing the full document text, `spans`, a `ListField[SpanField]` of inclusive start and
end indices for span candidates, and `metadata`, a `MetadataField` that stores the instance's
original text. For data with gold cluster labels, we also include the original `clusters`
(a list of list of index pairs) and a `SequenceLabelField` of cluster ids for every span
candidate.
# Parameters
max_span_width : `int`, required.
The maximum width of candidate spans to consider.
token_indexers : `Dict[str, TokenIndexer]`, optional
This is used to index the words in the document. See :class:`TokenIndexer`.
Default is `{"tokens": SingleIdTokenIndexer()}`.
wordpiece_modeling_tokenizer: `PretrainedTransformerTokenizer`, optional (default = None)
If not None, this dataset reader does subword tokenization using the supplied tokenizer
and distribute the labels to the resulting wordpieces. All the modeling will be based on
wordpieces. If this is set to `False` (default), the user is expected to use
`PretrainedTransformerMismatchedIndexer` and `PretrainedTransformerMismatchedEmbedder`,
and the modeling will be on the word-level.
"""
def __init__(
self,
max_span_width: int,
token_indexers: Dict[str, TokenIndexer] = None,
wordpiece_modeling_tokenizer: Optional[PretrainedTransformerTokenizer] = None,
subset_size: int = sys.maxsize,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._max_span_width = max_span_width
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._wordpiece_modeling_tokenizer = wordpiece_modeling_tokenizer
self._to_yield = subset_size
@overrides
def _read(self, file_path: str):
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
ontonotes_reader = Ontonotes()
for sentences in ontonotes_reader.dataset_document_iterator(file_path):
clusters: DefaultDict[int, List[Tuple[int, int]]] = collections.defaultdict(
list
)
if self._to_yield == 0:
break
total_tokens = 0
for sentence in sentences:
for typed_span in sentence.coref_spans:
# Coref annotations are on a _per sentence_
# basis, so we need to adjust them to be relative
# to the length of the document.
span_id, (start, end) = typed_span
clusters[span_id].append((start + total_tokens, end + total_tokens))
total_tokens += len(sentence.words)
canonical_clusters = canonicalize_clusters(clusters)
yield self.text_to_instance(
[s.words for s in sentences], canonical_clusters
)
self._to_yield -= 1
@overrides
def text_to_instance(
self, # type: ignore
sentences: List[List[str]],
gold_clusters: Optional[List[List[Tuple[int, int]]]] = None,
) -> Instance:
"""
# Parameters
sentences : `List[List[str]]`, required.
A list of lists representing the tokenised words and sentences in the document.
gold_clusters : `Optional[List[List[Tuple[int, int]]]]`, optional (default = None)
A list of all clusters in the document, represented as word spans. Each cluster
contains some number of spans, which can be nested and overlap, but will never
exactly match between clusters.
# Returns
An `Instance` containing the following `Fields`:
text : `TextField`
The text of the full document.
spans : `ListField[SpanField]`
A ListField containing the spans represented as `SpanFields`
with respect to the document text.
span_labels : `SequenceLabelField`, optional
The id of the cluster which each possible span belongs to, or -1 if it does
not belong to a cluster. As these labels have variable length (it depends on
how many spans we are considering), we represent this a as a `SequenceLabelField`
with respect to the `spans `ListField`.
"""
flattened_sentences = [
self._normalize_word(word) for sentence in sentences for word in sentence
]
if self._wordpiece_modeling_tokenizer is not None:
(
flat_sentences_tokens,
offsets,
) = self._wordpiece_modeling_tokenizer.intra_word_tokenize(
flattened_sentences
)
flattened_sentences = [t.text for t in flat_sentences_tokens]
else:
flat_sentences_tokens = [Token(word) for word in flattened_sentences]
text_field = TextField(flat_sentences_tokens, self._token_indexers)
cluster_dict = {}
if gold_clusters is not None:
if self._wordpiece_modeling_tokenizer is not None:
for cluster in gold_clusters:
for mention_id, mention in enumerate(cluster):
start = offsets[mention[0]][0]
end = offsets[mention[1]][1]
cluster[mention_id] = (start, end)
for cluster_id, cluster in enumerate(gold_clusters):
for mention in cluster:
cluster_dict[tuple(mention)] = cluster_id
spans: List[Field] = []
span_labels: Optional[List[int]] = [] if gold_clusters is not None else None
sentence_offset = 0
for sentence in sentences:
for start, end in enumerate_spans(
sentence, offset=sentence_offset, max_span_width=self._max_span_width
):
if self._wordpiece_modeling_tokenizer is not None:
start = offsets[start][0]
end = offsets[end][1]
# `enumerate_spans` uses word-level width limit; here we apply it to wordpieces
# We have to do this check here because we use a span width embedding that has
# only `self._max_span_width` entries, and since we are doing wordpiece
# modeling, the span width embedding operates on wordpiece lengths. So a check
# here is necessary or else we wouldn't know how many entries there would be.
if end - start + 1 > self._max_span_width:
continue
# We also don't generate spans that contain special tokens
if (
start
< self._wordpiece_modeling_tokenizer.num_added_start_tokens
):
continue
if (
end
>= len(flat_sentences_tokens)
- self._wordpiece_modeling_tokenizer.num_added_end_tokens
):
continue
if span_labels is not None:
if (start, end) in cluster_dict:
span_labels.append(cluster_dict[(start, end)])
else:
span_labels.append(-1)
spans.append(SpanField(start, end, text_field))
sentence_offset += len(sentence)
span_field = ListField(spans)
metadata: Dict[str, Any] = {"original_text": flattened_sentences}
if gold_clusters is not None:
metadata["clusters"] = gold_clusters
metadata_field = MetadataField(metadata)
fields: Dict[str, Field] = {
"text": text_field,
"spans": span_field,
"metadata": metadata_field,
}
if span_labels is not None:
fields["span_labels"] = SequenceLabelField(span_labels, span_field)
return Instance(fields)
@staticmethod
def _normalize_word(word):
if word in ("/.", "/?"):
return word[1:]
else:
return word
| 43.65873
| 99
| 0.62225
|
3a313c3f567495cb262c97605f26e3a1118f04a8
| 7,275
|
py
|
Python
|
server.py
|
alexvbogdan/Assistant-for-People-with-Low-Vision
|
2c8d60a857a63ce516f33263e61313a3bad0695f
|
[
"MIT"
] | 1
|
2020-08-21T07:35:54.000Z
|
2020-08-21T07:35:54.000Z
|
server.py
|
alexvbogdan/Assistant-for-People-with-Low-Vision
|
2c8d60a857a63ce516f33263e61313a3bad0695f
|
[
"MIT"
] | null | null | null |
server.py
|
alexvbogdan/Assistant-for-People-with-Low-Vision
|
2c8d60a857a63ce516f33263e61313a3bad0695f
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from flask import Flask, request, redirect, url_for, jsonify, send_from_directory
from time import time
import time
import numpy as np
import hashlib
import os
import sys
sys.path.append("./questionAnswering") # path to question answering module
from QuestionAnswering import QuestionAnswering
from im2txt.imgCaptioning import imgCap
from recognition.recognition import FaceRecognition
from emotion.tf_emotion_class import EmotionPredictor
from detection.Detectface import DetectFaceClass
from LSTM.lstm import SentencePredictor
from menu_recognition.menu_recog import ReadText
import cv2
VIZ_FOLDER = './viz/'
UPLOAD_FOLDER = './uploads/'
ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'JPG', 'JPEG', 'png', 'PNG'])
# global variables
app = Flask(__name__, static_url_path='')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
im2txt = None
questionAnswering = None
recognizer = None
faceDetector = None
emotionDetector = None
sentencePredictor = None
textReader = None
feature_cache = {}
def format_image(image, face):
image = image[face[1]:face[3], face[0]:face[2]]
return image
# helpers
def setup():
global im2txt
global questionAnswering
global recognizer
global faceDetector
global emotionDetector
global sentencePredictor
global textReader
# uploads
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
if not os.path.exists(VIZ_FOLDER):
os.makedirs(VIZ_FOLDER)
emotionDetector = EmotionPredictor()
faceDetector = DetectFaceClass(1, '/home/richard/Desktop/emotion-recognition-neural-networks-master/detection/mxnet-face-fr50', 0, 0.3, 0.001, 600, 1000)
im2txt = imgCap()
sentencePredictor = SentencePredictor()
questionAnswering = QuestionAnswering()
textReader = ReadText()
recognizer = FaceRecognition(1.0, faceDetector)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
# routes
@app.route('/', methods=['GET'])
def index():
return app.send_static_file('demo2.html')
@app.route('/api/upload_all', methods=['POST'])
def upload_all():
tick = time.clock()
file = request.files['image']
if not file:
return jsonify({'error': 'No file was uploaded.'})
if not allowed_file(file.filename):
return jsonify({'error': 'Please upload a JPG or PNG.'})
if request.form['request'] == "":
return jsonify({'answer': "Please, ask a question!"})
file_hash = hashlib.md5(file.read()).hexdigest()
save_path = os.path.join(app.config['UPLOAD_FOLDER'], file_hash + '.jpg')
file.seek(0)
file.save(save_path)
print("file:")
print(save_path)
question = request.form['request']
lst = question.split()
lst[0] = lst[0][0].upper() + lst[0][1:]
question = " ".join(lst)
result = np.squeeze(sentencePredictor.predict(question))
maxval = 0
idx = 0
for i in range(len(result)):
print(result[i])
if result[i] > maxval:
maxval = result[i]
idx = i
# handle image first
print("question:")
print(question)
print( str(idx) + "th NETWORK")
name = question[:question.find(" ")]
print(name)
if (name == 'name' or name == 'Name'):
print("adding person")
name = question[question.find(" ") + 1:]
Image = cv2.imread(save_path)
answer = recognizer.add(Image, name, file_hash + '.jpg')
if answer != "success":
print("could not save person " + name + ", because: " + answer)
return jsonify({'answer': answer})
# img captioning
if idx == 0:
answer = im2txt.feed_image(save_path)
tock = time.clock()
print(str(tock - tick) + "time used for iamge captioning")
return jsonify({'answer': answer})
# face
if (idx == 3):
try:
print("path = " + save_path)
answer = ""
Image = cv2.imread(save_path)
people = recognizer.recognize(Image)
if len(people) == 0:
answer = "I don't see anyone here!"
elif (len(people) == 1):
if people[0] == "I don't know :(":
answer = "I don't know :("
else:
answer = "It is " + people[0]
else:
known_people = ""
unknow_people = 0
for person in people:
if person == "I don't know :(":
unknow_people += 1
else:
if known_people == "":
known_people = person
else:
known_people = known_people + ", " + person
if known_people == "":
answer = "There are " + str(unknow_people) + " people. I don't know anyone here."
else:
answer = "There are " + known_people
if unknow_people > 0:
answer = answer + ". There are " + str(unknow_people) + " people, which I don't know."
except:
tock = time.clock()
print(str(tock - tick) + "time used for face")
return jsonify({'answer': "I don't see anyone here!"})
tock = time.clock()
print(str(tock - tick) + "time used for face")
return jsonify({'answer': answer})
# emotion
if (idx == 2):
img = cv2.imread(save_path)
faces = faceDetector.detect_Face(img)
if len(faces) == 0:
answer = "There are no people here!"
return jsonify({'answer': answer})
else:
answer = []
for face in faces:
answer.append(emotionDetector.predict(img, face))
tock = time.clock()
print(str(tock - tick) + "time used for emotion")
print(answer)
return jsonify({'answer': ",".join(answer)})
# question answering
if idx == 1:
feature = questionAnswering.img_handler(save_path)
if feature is None:
tock = time.clock()
print(str(tock - tick) + "time used for qa")
return jsonify({'error': 'Error reading image.'})
# image + question
img_ques_hash = hashlib.md5(file_hash + question).hexdigest()
json = questionAnswering.get_answers(question, feature, save_path, img_ques_hash, VIZ_FOLDER)
tock = time.clock()
print(str(tock - tick) + "time used for qa")
return jsonify(json)
# text
if idx == 4:
json = textReader.read(save_path)
print(type(json))
if json == '':
tock = time.clock()
print(str(tock - tick) + "time used for text")
return jsonify({'answer': "I don't see the text here!"})
tock = time.clock()
print(str(tock - tick) + "time used for text")
return jsonify({'answer':json})
else:
tock = time.clock()
print(str(tock - tick) + "time used for text")
return jsonify({'answer':"Error text"})
if __name__ == '__main__':
setup()
app.run(host='0.0.0.0', port=5000, debug=False)
| 29.815574
| 157
| 0.581306
|
7f4b387e48575d16d887b7d9f601f06f4ea2b7b9
| 2,600
|
py
|
Python
|
clip_retrieval/clip_filter.py
|
techthiyanes/clip-retrieval
|
aa00e05704cc65e5fd91504216c6b6f3e991a0cc
|
[
"MIT"
] | 201
|
2021-06-08T10:58:25.000Z
|
2022-03-29T21:23:44.000Z
|
clip_retrieval/clip_filter.py
|
techthiyanes/clip-retrieval
|
aa00e05704cc65e5fd91504216c6b6f3e991a0cc
|
[
"MIT"
] | 88
|
2021-06-21T14:58:10.000Z
|
2022-03-24T10:20:32.000Z
|
clip_retrieval/clip_filter.py
|
techthiyanes/clip-retrieval
|
aa00e05704cc65e5fd91504216c6b6f3e991a0cc
|
[
"MIT"
] | 25
|
2021-07-31T21:49:56.000Z
|
2022-03-23T17:54:02.000Z
|
"""clip filter is a tool to use a knn index and a image/text collection to extract interesting subsets"""
import fire
def clip_filter(query, output_folder, indice_folder, num_results=100, threshold=None):
"""Entry point of clip filter"""
import faiss # pylint: disable=import-outside-toplevel
import torch # pylint: disable=import-outside-toplevel
import os # pylint: disable=import-outside-toplevel
import shutil # pylint: disable=import-outside-toplevel
from pathlib import Path # pylint: disable=import-outside-toplevel
import pandas as pd # pylint: disable=import-outside-toplevel
import clip # pylint: disable=import-outside-toplevel
device = "cuda" if torch.cuda.is_available() else "cpu"
model, _ = clip.load("ViT-B/32", device=device, jit=False)
data_dir = Path(indice_folder + "/metadata")
df = pd.concat(pd.read_parquet(parquet_file) for parquet_file in sorted(data_dir.glob("*.parquet")))
url_list = None
if "url" in df:
url_list = df["url"].tolist()
image_list = df["image_path"].tolist()
image_index = faiss.read_index(indice_folder + "/image.index")
indices_loaded = {
"image_list": image_list,
"image_index": image_index,
}
text_input = query
image_index = indices_loaded["image_index"]
image_list = indices_loaded["image_list"]
if not os.path.exists(output_folder):
os.mkdir(output_folder)
text = clip.tokenize([text_input]).to(device)
text_features = model.encode_text(text)
text_features /= text_features.norm(dim=-1, keepdim=True)
query = text_features.cpu().detach().numpy().astype("float32")
index = image_index
if threshold is not None:
_, d, i = index.range_search(query, threshold)
print(f"Found {i.shape} items with query '{text_input}' and threshold {threshold}")
else:
d, i = index.search(query, num_results)
print(f"Found {num_results} items with query '{text_input}'")
i = i[0]
d = d[0]
min_d = min(d)
max_d = max(d)
print(f"The minimum distance is {min_d:.2f} and the maximum is {max_d:.2f}")
print(
"You may want to use these numbers to increase your --num_results parameter. Or use the --threshold parameter."
)
print(f"Copying the images in {output_folder}")
for _, ei in zip(d, i):
path = image_list[ei]
if os.path.exists(path):
shutil.copy(path, output_folder)
if url_list is not None:
print(url_list[ei])
if __name__ == "__main__":
fire.Fire(clip_filter)
| 34.210526
| 119
| 0.666154
|
8aac816e61445226ff66ccdfbe8f17dd2e061801
| 8,404
|
py
|
Python
|
tests/syntax/test_specifiers.py
|
HaoranBai17/Scenic
|
65372e488ec9323e550ccc1f157369aad88ad94d
|
[
"BSD-3-Clause"
] | 1
|
2019-06-14T21:04:37.000Z
|
2019-06-14T21:04:37.000Z
|
tests/syntax/test_specifiers.py
|
yuul/Scenic
|
66fbf7aa67e649cf2379ee6e4d4273ff4980c04c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/syntax/test_specifiers.py
|
yuul/Scenic
|
66fbf7aa67e649cf2379ee6e4d4273ff4980c04c
|
[
"BSD-3-Clause"
] | 2
|
2020-01-02T12:37:46.000Z
|
2020-07-30T02:02:01.000Z
|
import math
import pytest
from scenic.syntax.translator import InterpreterParseError, InvalidScenarioError
from scenic.core.vectors import Vector
from tests.utils import compileScenic, sampleEgoFrom
## Dependencies and lazy evaluation
def test_double_specification():
with pytest.raises(InterpreterParseError):
compileScenic('ego = Object at 0 @ 0, at 1 @ 1')
def test_cyclic_dependency():
with pytest.raises(InterpreterParseError):
compileScenic('ego = Object left of 0 @ 0, facing toward 1 @ 1')
def test_lazy_cyclic_dependency():
with pytest.raises(InterpreterParseError):
compileScenic(
'vf = VectorField("Foo", lambda pos: 3 * pos.x)\n'
'ego = Object at 0 @ (0 relative to vf)'
)
def test_default_dependency():
ego = sampleEgoFrom('ego = Object facing toward -1 @ 1')
assert tuple(ego.position) == (0, 0)
assert ego.heading == pytest.approx(math.radians(45))
def test_missing_dependency():
with pytest.raises(InterpreterParseError):
compileScenic('Point left of 0 @ 0 by 5\n' 'ego = Object')
def test_lazy_value_in_param():
with pytest.raises(InvalidScenarioError):
compileScenic(
'vf = VectorField("Foo", lambda pos: 3 * pos.x)\n'
'param X = 0 relative to vf\n'
'ego = Object\n'
)
def test_lazy_value_in_requirement():
# Case where we can statically detect the use of a lazy value
with pytest.raises(InvalidScenarioError):
compileScenic(
'vf = VectorField("Foo", lambda pos: 3 * pos.x)\n'
'x = 0 relative to vf\n'
'require x >= 0\n'
'ego = Object\n'
)
def test_lazy_value_in_requirement_2():
# Case where the lazy value is detected during requirement evaluation
scenario = compileScenic(
'vf = VectorField("Foo", lambda pos: 3 * pos.x)\n'
'require 0 relative to vf\n'
'ego = Object\n'
)
with pytest.raises(InterpreterParseError):
scenario.generate(maxIterations=1)
## Generic specifiers
def test_with():
ego = sampleEgoFrom('ego = Object with flubber 37')
assert ego.flubber == 37
## Position specifiers
def test_at():
ego = sampleEgoFrom('ego = Object at 149 @ 42')
assert tuple(ego.position) == pytest.approx((149, 42))
def test_offset_by():
ego = sampleEgoFrom(
'ego = Object at 10 @ 40, facing 90 deg\n'
'ego = Object offset by 5 @ 15'
)
assert tuple(ego.position) == pytest.approx((-5, 45))
def test_offset_by_no_ego():
with pytest.raises(InterpreterParseError):
compileScenic('ego = Object offset by 10 @ 40')
def test_offset_along():
ego = sampleEgoFrom(
'ego = Object at 10 @ 40\n'
'ego = Object offset along -90 deg by -10 @ 5'
)
assert tuple(ego.position) == pytest.approx((15, 50))
def test_offset_along_no_ego():
with pytest.raises(InterpreterParseError):
compileScenic('ego = Object offset along 0 by 10 @ 0')
def test_left_of_vector():
ego = sampleEgoFrom('ego = Object left of 10 @ 20, facing 90 deg')
assert tuple(ego.position) == pytest.approx((10, 19.5))
ego = sampleEgoFrom('ego = Object left of 10 @ 20, with width 10')
assert tuple(ego.position) == pytest.approx((5, 20))
def test_left_of_vector_by():
ego = sampleEgoFrom('ego = Object left of 10 @ 20 by 20')
assert tuple(ego.position) == pytest.approx((-10.5, 20))
ego = sampleEgoFrom('ego = Object left of 10 @ 20 by 20 @ 5')
assert tuple(ego.position) == pytest.approx((-10.5, 25))
def test_right_of_vector():
ego = sampleEgoFrom('ego = Object right of 10 @ 20, facing 90 deg')
assert tuple(ego.position) == pytest.approx((10, 20.5))
ego = sampleEgoFrom('ego = Object right of 10 @ 20, with width 10')
assert tuple(ego.position) == pytest.approx((15, 20))
def test_right_of_vector_by():
ego = sampleEgoFrom('ego = Object right of 10 @ 20 by 20')
assert tuple(ego.position) == pytest.approx((30.5, 20))
ego = sampleEgoFrom('ego = Object right of 10 @ 20 by 20 @ 5')
assert tuple(ego.position) == pytest.approx((30.5, 25))
def test_ahead_of_vector():
ego = sampleEgoFrom('ego = Object ahead of 10 @ 20, facing 90 deg')
assert tuple(ego.position) == pytest.approx((9.5, 20))
ego = sampleEgoFrom('ego = Object ahead of 10 @ 20, with height 10')
assert tuple(ego.position) == pytest.approx((10, 25))
def test_ahead_of_vector_by():
ego = sampleEgoFrom('ego = Object ahead of 10 @ 20 by 20')
assert tuple(ego.position) == pytest.approx((10, 40.5))
ego = sampleEgoFrom('ego = Object ahead of 10 @ 20 by 20 @ 5')
assert tuple(ego.position) == pytest.approx((30, 25.5))
def test_behind_vector():
ego = sampleEgoFrom('ego = Object behind 10 @ 20, facing 90 deg')
assert tuple(ego.position) == pytest.approx((10.5, 20))
ego = sampleEgoFrom('ego = Object behind 10 @ 20, with height 10')
assert tuple(ego.position) == pytest.approx((10, 15))
def test_behind_vector_by():
ego = sampleEgoFrom('ego = Object behind 10 @ 20 by 20')
assert tuple(ego.position) == pytest.approx((10, -0.5))
ego = sampleEgoFrom('ego = Object behind 10 @ 20 by 20 @ 5')
assert tuple(ego.position) == pytest.approx((30, 14.5))
def test_beyond():
ego = sampleEgoFrom(
'ego = Object at 10 @ 5\n'
'ego = Object beyond 4 @ 13 by 5'
)
assert tuple(ego.position) == pytest.approx((1, 17))
ego = sampleEgoFrom(
'ego = Object at 10 @ 5\n'
'ego = Object beyond 4 @ 13 by 10 @ 5'
)
assert tuple(ego.position) == pytest.approx((9, 23))
def test_beyond_no_ego():
with pytest.raises(InterpreterParseError):
compileScenic('ego = Object beyond 10 @ 10 by 5')
def test_beyond_from():
ego = sampleEgoFrom('ego = Object beyond 5 @ 0 by 20 from 5 @ 10')
assert tuple(ego.position) == pytest.approx((5, -20))
ego = sampleEgoFrom('ego = Object beyond 5 @ 0 by 15 @ 20 from 5 @ 10')
assert tuple(ego.position) == pytest.approx((-10, -20))
def test_visible():
scenario = compileScenic(
'ego = Object at 100 @ 200, facing -45 deg,\n'
' with visibleDistance 10, with viewAngle 90 deg\n'
'ego = Object visible'
)
for i in range(30):
scene, iterations = scenario.generate(maxIterations=50)
ego, base = scene.objects
assert ego.position.distanceTo(base.position) <= 10
assert ego.position.x >= base.position.x
assert ego.position.y >= base.position.y
def test_visible_no_ego():
with pytest.raises(InterpreterParseError):
compileScenic('ego = Object visible')
def test_visible_from_point():
scenario = compileScenic(
'x = Point at 300@200, with visibleDistance 2\n'
'ego = Object visible from x'
)
for i in range(30):
scene, iterations = scenario.generate(maxIterations=1)
assert scene.egoObject.position.distanceTo(Vector(300, 200)) <= 2
def test_visible_from_oriented_point():
scenario = compileScenic(
'op = OrientedPoint at 100 @ 200, facing 45 deg,\n'
' with visibleDistance 5, with viewAngle 90 deg\n'
'ego = Object visible from op'
)
base = Vector(100, 200)
for i in range(30):
scene, iterations = scenario.generate(maxIterations=1)
pos = scene.egoObject.position
assert pos.distanceTo(base) <= 5
assert pos.x <= base.x
assert pos.y >= base.y
## Position specifiers optionally specifying heading
def test_in():
scenario = compileScenic(
'r = RectangularRegion(100 @ 200, 90 deg, 50, 10)\n'
'ego = Object in r'
)
for i in range(30):
scene, iterations = scenario.generate(maxIterations=1)
pos = scene.egoObject.position
assert 95 <= pos.x <= 105
assert 150 <= pos.y <= 250
assert scene.egoObject.heading == 0
def test_in_heading():
scenario = compileScenic(
'r = PolylineRegion([50 @ -50, -20 @ 20])\n'
'ego = Object on r'
)
for i in range(30):
scene, iterations = scenario.generate(maxIterations=1)
pos = scene.egoObject.position
assert -20 <= pos.x <= 50
assert -50 <= pos.y <= 50
assert pos.x == pytest.approx(-pos.y)
assert scene.egoObject.heading == pytest.approx(math.radians(45))
| 36.06867
| 80
| 0.639457
|
9706a8a0cfe6e79d68086c481e13af8756c61d91
| 47,453
|
py
|
Python
|
Lib/distutils/ccompiler.py
|
gerph/cpython
|
98813cb03c2371789669c3d8debf8fca2a344de9
|
[
"CNRI-Python-GPL-Compatible"
] | 5
|
2020-01-25T19:30:31.000Z
|
2021-03-05T20:34:57.000Z
|
Lib/distutils/ccompiler.py
|
gerph/cpython
|
98813cb03c2371789669c3d8debf8fca2a344de9
|
[
"CNRI-Python-GPL-Compatible"
] | 18
|
2019-12-09T17:05:24.000Z
|
2021-06-09T15:19:49.000Z
|
Lib/distutils/ccompiler.py
|
gerph/cpython
|
98813cb03c2371789669c3d8debf8fca2a344de9
|
[
"CNRI-Python-GPL-Compatible"
] | 3
|
2020-05-15T22:25:58.000Z
|
2021-03-05T20:35:00.000Z
|
"""distutils.ccompiler
Contains CCompiler, an abstract base class that defines the interface
for the Distutils compiler abstraction model."""
import sys, os, re
from distutils.errors import *
from distutils.spawn import spawn
from distutils.file_util import move_file
from distutils.dir_util import mkpath
from distutils.dep_util import newer_pairwise, newer_group
from distutils.util import split_quoted, execute
from distutils import log
class CCompiler:
"""Abstract base class to define the interface that must be implemented
by real compiler classes. Also has some utility methods used by
several compiler classes.
The basic idea behind a compiler abstraction class is that each
instance can be used for all the compile/link steps in building a
single project. Thus, attributes common to all of those compile and
link steps -- include directories, macros to define, libraries to link
against, etc. -- are attributes of the compiler instance. To allow for
variability in how individual files are treated, most of those
attributes may be varied on a per-compilation or per-link basis.
"""
# 'compiler_type' is a class attribute that identifies this class. It
# keeps code that wants to know what kind of compiler it's dealing with
# from having to import all possible compiler classes just to do an
# 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
# should really, really be one of the keys of the 'compiler_class'
# dictionary (see below -- used by the 'new_compiler()' factory
# function) -- authors of new compiler interface classes are
# responsible for updating 'compiler_class'!
compiler_type = None
# XXX things not handled by this compiler abstraction model:
# * client can't provide additional options for a compiler,
# e.g. warning, optimization, debugging flags. Perhaps this
# should be the domain of concrete compiler abstraction classes
# (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
# class should have methods for the common ones.
# * can't completely override the include or library searchg
# path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
# I'm not sure how widely supported this is even by Unix
# compilers, much less on other platforms. And I'm even less
# sure how useful it is; maybe for cross-compiling, but
# support for that is a ways off. (And anyways, cross
# compilers probably have a dedicated binary with the
# right paths compiled in. I hope.)
# * can't do really freaky things with the library list/library
# dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
# different versions of libfoo.a in different locations. I
# think this is useless without the ability to null out the
# library search path anyways.
# Subclasses that rely on the standard filename generation methods
# implemented below should override these; see the comment near
# those methods ('object_filenames()' et. al.) for details:
src_extensions = None # list of strings
obj_extension = None # string
static_lib_extension = None
shared_lib_extension = None # string
static_lib_format = None # format string
shared_lib_format = None # prob. same as static_lib_format
exe_extension = None # string
# Default language settings. language_map is used to detect a source
# file or Extension target language, checking source filenames.
# language_order is used to detect the language precedence, when deciding
# what language to use when mixing source types. For example, if some
# extension has two files with ".c" extension, and one with ".cpp", it
# is still linked as c++.
language_map = {".c" : "c",
".cc" : "c++",
".cpp" : "c++",
".cxx" : "c++",
".m" : "objc",
}
language_order = ["c++", "objc", "c"]
def __init__(self, verbose=0, dry_run=0, force=0):
self.dry_run = dry_run
self.force = force
self.verbose = verbose
# 'output_dir': a common output directory for object, library,
# shared object, and shared library files
self.output_dir = None
# 'macros': a list of macro definitions (or undefinitions). A
# macro definition is a 2-tuple (name, value), where the value is
# either a string or None (no explicit value). A macro
# undefinition is a 1-tuple (name,).
self.macros = []
# 'include_dirs': a list of directories to search for include files
self.include_dirs = []
# 'libraries': a list of libraries to include in any link
# (library names, not filenames: eg. "foo" not "libfoo.a")
self.libraries = []
# 'library_dirs': a list of directories to search for libraries
self.library_dirs = []
# 'runtime_library_dirs': a list of directories to search for
# shared libraries/objects at runtime
self.runtime_library_dirs = []
# 'objects': a list of object files (or similar, such as explicitly
# named library files) to include on any link
self.objects = []
for key in self.executables.keys():
self.set_executable(key, self.executables[key])
def set_executables(self, **kwargs):
"""Define the executables (and options for them) that will be run
to perform the various stages of compilation. The exact set of
executables that may be specified here depends on the compiler
class (via the 'executables' class attribute), but most will have:
compiler the C/C++ compiler
linker_so linker used to create shared objects and libraries
linker_exe linker used to create binary executables
archiver static library creator
On platforms with a command-line (Unix, DOS/Windows), each of these
is a string that will be split into executable name and (optional)
list of arguments. (Splitting the string is done similarly to how
Unix shells operate: words are delimited by spaces, but quotes and
backslashes can override this. See
'distutils.util.split_quoted()'.)
"""
# Note that some CCompiler implementation classes will define class
# attributes 'cpp', 'cc', etc. with hard-coded executable names;
# this is appropriate when a compiler class is for exactly one
# compiler/OS combination (eg. MSVCCompiler). Other compiler
# classes (UnixCCompiler, in particular) are driven by information
# discovered at run-time, since there are many different ways to do
# basically the same things with Unix C compilers.
for key in kwargs:
if key not in self.executables:
raise ValueError("unknown executable '%s' for class %s" %
(key, self.__class__.__name__))
self.set_executable(key, kwargs[key])
def set_executable(self, key, value):
if isinstance(value, str):
setattr(self, key, split_quoted(value))
else:
setattr(self, key, value)
def _find_macro(self, name):
i = 0
for defn in self.macros:
if defn[0] == name:
return i
i += 1
return None
def _check_macro_definitions(self, definitions):
"""Ensures that every element of 'definitions' is a valid macro
definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do
nothing if all definitions are OK, raise TypeError otherwise.
"""
for defn in definitions:
if not (isinstance(defn, tuple) and
(len(defn) in (1, 2) and
(isinstance (defn[1], str) or defn[1] is None)) and
isinstance (defn[0], str)):
raise TypeError(("invalid macro definition '%s': " % defn) + \
"must be tuple (string,), (string, string), or " + \
"(string, None)")
# -- Bookkeeping methods -------------------------------------------
def define_macro(self, name, value=None):
"""Define a preprocessor macro for all compilations driven by this
compiler object. The optional parameter 'value' should be a
string; if it is not supplied, then the macro will be defined
without an explicit value and the exact outcome depends on the
compiler used (XXX true? does ANSI say anything about this?)
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
self.macros.append((name, value))
def undefine_macro(self, name):
"""Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
'define_macro()' and undefined by 'undefine_macro()' the last call
takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to 'compile()'), then that
takes precedence.
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
undefn = (name,)
self.macros.append(undefn)
def add_include_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
header files. The compiler is instructed to search directories in
the order in which they are supplied by successive calls to
'add_include_dir()'.
"""
self.include_dirs.append(dir)
def set_include_dirs(self, dirs):
"""Set the list of directories that will be searched to 'dirs' (a
list of strings). Overrides any preceding calls to
'add_include_dir()'; subsequence calls to 'add_include_dir()' add
to the list passed to 'set_include_dirs()'. This does not affect
any list of standard include directories that the compiler may
search by default.
"""
self.include_dirs = dirs[:]
def add_library(self, libname):
"""Add 'libname' to the list of libraries that will be included in
all links driven by this compiler object. Note that 'libname'
should *not* be the name of a file containing a library, but the
name of the library itself: the actual filename will be inferred by
the linker, the compiler, or the compiler class (depending on the
platform).
The linker will be instructed to link against libraries in the
order they were supplied to 'add_library()' and/or
'set_libraries()'. It is perfectly valid to duplicate library
names; the linker will be instructed to link against libraries as
many times as they are mentioned.
"""
self.libraries.append(libname)
def set_libraries(self, libnames):
"""Set the list of libraries to be included in all links driven by
this compiler object to 'libnames' (a list of strings). This does
not affect any standard system libraries that the linker may
include by default.
"""
self.libraries = libnames[:]
def add_library_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
libraries specified to 'add_library()' and 'set_libraries()'. The
linker will be instructed to search for libraries in the order they
are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
"""
self.library_dirs.append(dir)
def set_library_dirs(self, dirs):
"""Set the list of library search directories to 'dirs' (a list of
strings). This does not affect any standard library search path
that the linker may search by default.
"""
self.library_dirs = dirs[:]
def add_runtime_library_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
shared libraries at runtime.
"""
self.runtime_library_dirs.append(dir)
def set_runtime_library_dirs(self, dirs):
"""Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
"""
self.runtime_library_dirs = dirs[:]
def add_link_object(self, object):
"""Add 'object' to the list of object files (or analogues, such as
explicitly named library files or the output of "resource
compilers") to be included in every link driven by this compiler
object.
"""
self.objects.append(object)
def set_link_objects(self, objects):
"""Set the list of object files (or analogues) to be included in
every link to 'objects'. This does not affect any standard object
files that the linker may include by default (such as system
libraries).
"""
self.objects = objects[:]
# -- Private utility methods --------------------------------------
# (here for the convenience of subclasses)
# Helper method to prep compiler in subclass compile() methods
def _setup_compile(self, outdir, macros, incdirs, sources, depends,
extra):
"""Process arguments and decide which source files to compile."""
if outdir is None:
outdir = self.output_dir
elif not isinstance(outdir, str):
raise TypeError("'output_dir' must be a string or None")
if macros is None:
macros = self.macros
elif isinstance(macros, list):
macros = macros + (self.macros or [])
else:
raise TypeError("'macros' (if supplied) must be a list of tuples")
if incdirs is None:
incdirs = self.include_dirs
elif isinstance(incdirs, (list, tuple)):
incdirs = list(incdirs) + (self.include_dirs or [])
else:
raise TypeError(
"'include_dirs' (if supplied) must be a list of strings")
if extra is None:
extra = []
# Get the list of expected output (object) files
objects = self.object_filenames(sources, strip_dir=0,
output_dir=outdir)
assert len(objects) == len(sources)
pp_opts = gen_preprocess_options(macros, incdirs)
build = {}
for i in range(len(sources)):
src = sources[i]
obj = objects[i]
ext = os.path.splitext(src)[1]
self.mkpath(os.path.dirname(obj))
build[obj] = (src, ext)
return macros, objects, extra, pp_opts, build
def _get_cc_args(self, pp_opts, debug, before):
# works for unixccompiler, cygwinccompiler
cc_args = pp_opts + ['-c']
if debug:
cc_args[:0] = ['-g']
if before:
cc_args[:0] = before
return cc_args
def _fix_compile_args(self, output_dir, macros, include_dirs):
"""Typecheck and fix-up some of the arguments to the 'compile()'
method, and return fixed-up values. Specifically: if 'output_dir'
is None, replaces it with 'self.output_dir'; ensures that 'macros'
is a list, and augments it with 'self.macros'; ensures that
'include_dirs' is a list, and augments it with 'self.include_dirs'.
Guarantees that the returned values are of the correct type,
i.e. for 'output_dir' either string or None, and for 'macros' and
'include_dirs' either list or None.
"""
if output_dir is None:
output_dir = self.output_dir
elif not isinstance(output_dir, str):
raise TypeError("'output_dir' must be a string or None")
if macros is None:
macros = self.macros
elif isinstance(macros, list):
macros = macros + (self.macros or [])
else:
raise TypeError("'macros' (if supplied) must be a list of tuples")
if include_dirs is None:
include_dirs = self.include_dirs
elif isinstance(include_dirs, (list, tuple)):
include_dirs = list(include_dirs) + (self.include_dirs or [])
else:
raise TypeError(
"'include_dirs' (if supplied) must be a list of strings")
return output_dir, macros, include_dirs
def _prep_compile(self, sources, output_dir, depends=None):
"""Decide which souce files must be recompiled.
Determine the list of object files corresponding to 'sources',
and figure out which ones really need to be recompiled.
Return a list of all object files and a dictionary telling
which source files can be skipped.
"""
# Get the list of expected output (object) files
objects = self.object_filenames(sources, output_dir=output_dir)
assert len(objects) == len(sources)
# Return an empty dict for the "which source files can be skipped"
# return value to preserve API compatibility.
return objects, {}
def _fix_object_args(self, objects, output_dir):
"""Typecheck and fix up some arguments supplied to various methods.
Specifically: ensure that 'objects' is a list; if output_dir is
None, replace with self.output_dir. Return fixed versions of
'objects' and 'output_dir'.
"""
if not isinstance(objects, (list, tuple)):
raise TypeError("'objects' must be a list or tuple of strings")
objects = list(objects)
if output_dir is None:
output_dir = self.output_dir
elif not isinstance(output_dir, str):
raise TypeError("'output_dir' must be a string or None")
return (objects, output_dir)
def _fix_lib_args(self, libraries, library_dirs, runtime_library_dirs):
"""Typecheck and fix up some of the arguments supplied to the
'link_*' methods. Specifically: ensure that all arguments are
lists, and augment them with their permanent versions
(eg. 'self.libraries' augments 'libraries'). Return a tuple with
fixed versions of all arguments.
"""
if libraries is None:
libraries = self.libraries
elif isinstance(libraries, (list, tuple)):
libraries = list (libraries) + (self.libraries or [])
else:
raise TypeError(
"'libraries' (if supplied) must be a list of strings")
if library_dirs is None:
library_dirs = self.library_dirs
elif isinstance(library_dirs, (list, tuple)):
library_dirs = list (library_dirs) + (self.library_dirs or [])
else:
raise TypeError(
"'library_dirs' (if supplied) must be a list of strings")
if runtime_library_dirs is None:
runtime_library_dirs = self.runtime_library_dirs
elif isinstance(runtime_library_dirs, (list, tuple)):
runtime_library_dirs = (list(runtime_library_dirs) +
(self.runtime_library_dirs or []))
else:
raise TypeError("'runtime_library_dirs' (if supplied) "
"must be a list of strings")
return (libraries, library_dirs, runtime_library_dirs)
def _need_link(self, objects, output_file):
"""Return true if we need to relink the files listed in 'objects'
to recreate 'output_file'.
"""
if self.force:
return True
else:
if self.dry_run:
newer = newer_group (objects, output_file, missing='newer')
else:
newer = newer_group (objects, output_file)
return newer
def detect_language(self, sources):
"""Detect the language of a given file, or list of files. Uses
language_map, and language_order to do the job.
"""
if not isinstance(sources, list):
sources = [sources]
lang = None
index = len(self.language_order)
for source in sources:
base, ext = os.path.splitext(source)
extlang = self.language_map.get(ext)
try:
extindex = self.language_order.index(extlang)
if extindex < index:
lang = extlang
index = extindex
except ValueError:
pass
return lang
# -- Worker methods ------------------------------------------------
# (must be implemented by subclasses)
def preprocess(self, source, output_file=None, macros=None,
include_dirs=None, extra_preargs=None, extra_postargs=None):
"""Preprocess a single C/C++ source file, named in 'source'.
Output will be written to file named 'output_file', or stdout if
'output_file' not supplied. 'macros' is a list of macro
definitions as for 'compile()', which will augment the macros set
with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
list of directory names that will be added to the default list.
Raises PreprocessError on failure.
"""
pass
def compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""Compile one or more source files.
'sources' must be a list of filenames, most likely C/C++
files, but in reality anything that can be handled by a
particular compiler and compiler class (eg. MSVCCompiler can
handle resource files in 'sources'). Return a list of object
filenames, one per source filename in 'sources'. Depending on
the implementation, not all source files will necessarily be
compiled, but all corresponding object filenames will be
returned.
If 'output_dir' is given, object files will be put under it, while
retaining their original path component. That is, "foo/bar.c"
normally compiles to "foo/bar.o" (for a Unix implementation); if
'output_dir' is "build", then it would compile to
"build/foo/bar.o".
'macros', if given, must be a list of macro definitions. A macro
definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
The former defines a macro; if the value is None, the macro is
defined without an explicit value. The 1-tuple case undefines a
macro. Later definitions/redefinitions/ undefinitions take
precedence.
'include_dirs', if given, must be a list of strings, the
directories to add to the default include file search path for this
compilation only.
'debug' is a boolean; if true, the compiler will be instructed to
output debug symbols in (or alongside) the object file(s).
'extra_preargs' and 'extra_postargs' are implementation- dependent.
On platforms that have the notion of a command-line (e.g. Unix,
DOS/Windows), they are most likely lists of strings: extra
command-line arguments to prepend/append to the compiler command
line. On other platforms, consult the implementation class
documentation. In any event, they are intended as an escape hatch
for those occasions when the abstract compiler framework doesn't
cut the mustard.
'depends', if given, is a list of filenames that all targets
depend on. If a source file is older than any file in
depends, then the source file will be recompiled. This
supports dependency tracking, but only at a coarse
granularity.
Raises CompileError on failure.
"""
# A concrete compiler class can either override this method
# entirely or implement _compile().
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# Return *all* object filenames, not just the ones we just built.
return objects
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
# A concrete compiler class that does not override compile()
# should implement _compile().
pass
def create_static_lib(self, objects, output_libname, output_dir=None,
debug=0, target_lang=None):
"""Link a bunch of stuff together to create a static library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects', the extra object files supplied to
'add_link_object()' and/or 'set_link_objects()', the libraries
supplied to 'add_library()' and/or 'set_libraries()', and the
libraries supplied as 'libraries' (if any).
'output_libname' should be a library name, not a filename; the
filename will be inferred from the library name. 'output_dir' is
the directory where the library file will be put.
'debug' is a boolean; if true, debugging information will be
included in the library (note that on most platforms, it is the
compile step where this matters: the 'debug' flag is included here
just for consistency).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LibError on failure.
"""
pass
# values for target_desc parameter in link()
SHARED_OBJECT = "shared_object"
SHARED_LIBRARY = "shared_library"
EXECUTABLE = "executable"
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
"""Link a bunch of stuff together to create an executable or
shared library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects'. 'output_filename' should be a filename. If
'output_dir' is supplied, 'output_filename' is relative to it
(i.e. 'output_filename' can provide directory components if
needed).
'libraries' is a list of libraries to link against. These are
library names, not filenames, since they're translated into
filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
on Unix and "foo.lib" on DOS/Windows). However, they can include a
directory component, which means the linker will look in that
specific directory rather than searching all the normal locations.
'library_dirs', if supplied, should be a list of directories to
search for libraries that were specified as bare library names
(ie. no directory component). These are on top of the system
default and those supplied to 'add_library_dir()' and/or
'set_library_dirs()'. 'runtime_library_dirs' is a list of
directories that will be embedded into the shared library and used
to search for other shared libraries that *it* depends on at
run-time. (This may only be relevant on Unix.)
'export_symbols' is a list of symbols that the shared library will
export. (This appears to be relevant only on Windows.)
'debug' is as for 'compile()' and 'create_static_lib()', with the
slight distinction that it actually matters on most platforms (as
opposed to 'create_static_lib()', which includes a 'debug' flag
mostly for form's sake).
'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
of course that they supply command-line arguments for the
particular linker being used).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LinkError on failure.
"""
raise NotImplementedError
# Old 'link_*()' methods, rewritten to use the new 'link()' method.
def link_shared_lib(self,
objects,
output_libname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_LIBRARY, objects,
self.library_filename(output_libname, lib_type='shared'),
output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_shared_object(self,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_OBJECT, objects,
output_filename, output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_executable(self,
objects,
output_progname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
target_lang=None):
self.link(CCompiler.EXECUTABLE, objects,
self.executable_filename(output_progname), output_dir,
libraries, library_dirs, runtime_library_dirs, None,
debug, extra_preargs, extra_postargs, None, target_lang)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function; there is
# no appropriate default implementation so subclasses should
# implement all of these.
def library_dir_option(self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for libraries.
"""
raise NotImplementedError
def runtime_library_dir_option(self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
"""
raise NotImplementedError
def library_option(self, lib):
"""Return the compiler option to add 'lib' to the list of libraries
linked into the shared library or executable.
"""
raise NotImplementedError
def has_function(self, funcname, includes=None, include_dirs=None,
libraries=None, library_dirs=None):
"""Return a boolean indicating whether funcname is supported on
the current platform. The optional arguments can be used to
augment the compilation environment.
"""
# this can't be included at module scope because it tries to
# import math which might not be available at that point - maybe
# the necessary logic should just be inlined?
import tempfile
if includes is None:
includes = []
if include_dirs is None:
include_dirs = []
if libraries is None:
libraries = []
if library_dirs is None:
library_dirs = []
fd, fname = tempfile.mkstemp(".c", funcname, text=True)
f = os.fdopen(fd, "w")
try:
for incl in includes:
f.write("""#include "%s"\n""" % incl)
f.write("""\
int main (int argc, char **argv) {
%s();
return 0;
}
""" % funcname)
finally:
f.close()
try:
objects = self.compile([fname], include_dirs=include_dirs)
except CompileError:
return False
try:
self.link_executable(objects, "a.out",
libraries=libraries,
library_dirs=library_dirs)
except (LinkError, TypeError):
return False
return True
def find_library_file (self, dirs, lib, debug=0):
"""Search the specified list of directories for a static or shared
library file 'lib' and return the full path to that file. If
'debug' true, look for a debugging version (if that makes sense on
the current platform). Return None if 'lib' wasn't found in any of
the specified directories.
"""
raise NotImplementedError
# -- Filename generation methods -----------------------------------
# The default implementation of the filename generating methods are
# prejudiced towards the Unix/DOS/Windows view of the world:
# * object files are named by replacing the source file extension
# (eg. .c/.cpp -> .o/.obj)
# * library files (shared or static) are named by plugging the
# library name and extension into a format string, eg.
# "lib%s.%s" % (lib_name, ".a") for Unix static libraries
# * executables are named by appending an extension (possibly
# empty) to the program name: eg. progname + ".exe" for
# Windows
#
# To reduce redundant code, these methods expect to find
# several attributes in the current object (presumably defined
# as class attributes):
# * src_extensions -
# list of C/C++ source file extensions, eg. ['.c', '.cpp']
# * obj_extension -
# object file extension, eg. '.o' or '.obj'
# * static_lib_extension -
# extension for static library files, eg. '.a' or '.lib'
# * shared_lib_extension -
# extension for shared library/object files, eg. '.so', '.dll'
# * static_lib_format -
# format string for generating static library filenames,
# eg. 'lib%s.%s' or '%s.%s'
# * shared_lib_format
# format string for generating shared library filenames
# (probably same as static_lib_format, since the extension
# is one of the intended parameters to the format string)
# * exe_extension -
# extension for executable files, eg. '' or '.exe'
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(src_name)
if os.name == 'riscos':
base = os.path.join('o', os.path.split(base)[-1])
if ext not in self.src_extensions:
raise UnknownFileError(
"unknown file type '%s' (from '%s')" % (ext, src_name))
if strip_dir:
base = os.path.basename(base)
obj_names.append(os.path.join(output_dir,
base + self.obj_extension))
return obj_names
def shared_object_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename(basename)
return os.path.join(output_dir, basename + self.shared_lib_extension)
def executable_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename(basename)
return os.path.join(output_dir, basename + (self.exe_extension or ''))
def library_filename(self, libname, lib_type='static', # or 'shared'
strip_dir=0, output_dir=''):
assert output_dir is not None
if lib_type not in ("static", "shared", "dylib", "xcode_stub"):
raise ValueError(
"'lib_type' must be \"static\", \"shared\", \"dylib\", or \"xcode_stub\"")
fmt = getattr(self, lib_type + "_lib_format")
ext = getattr(self, lib_type + "_lib_extension")
dir, base = os.path.split(libname)
filename = fmt % (base, ext)
if strip_dir:
dir = ''
return os.path.join(output_dir, dir, filename)
# -- Utility methods -----------------------------------------------
def announce(self, msg, level=1):
log.debug(msg)
def debug_print(self, msg):
from distutils.debug import DEBUG
if DEBUG:
print(msg)
def warn(self, msg):
sys.stderr.write("warning: %s\n" % msg)
def execute(self, func, args, msg=None, level=1):
execute(func, args, msg, self.dry_run)
def spawn(self, cmd):
spawn(cmd, dry_run=self.dry_run)
def move_file(self, src, dst):
return move_file(src, dst, dry_run=self.dry_run)
def mkpath (self, name, mode=0o777):
mkpath(name, mode, dry_run=self.dry_run)
# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
# type for that platform. Keys are interpreted as re match
# patterns. Order is important; platform mappings are preferred over
# OS names.
_default_compilers = (
# Platform string mappings
# on a cygwin built python we can use gcc like an ordinary UNIXish
# compiler
('cygwin.*', 'unix'),
# OS name mappings
('posix', 'unix'),
('nt', 'msvc'),
# RISC OS can use a UNIXish thing
('riscos', 'unix'),
)
def get_default_compiler(osname=None, platform=None):
"""Determine the default compiler to use for the given platform.
osname should be one of the standard Python OS names (i.e. the
ones returned by os.name) and platform the common value
returned by sys.platform for the platform in question.
The default values are os.name and sys.platform in case the
parameters are not given.
"""
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
for pattern, compiler in _default_compilers:
if re.match(pattern, platform) is not None or \
re.match(pattern, osname) is not None:
return compiler
# Default to Unix compiler
return 'unix'
# Map compiler types to (module_name, class_name) pairs -- ie. where to
# find the code that implements an interface to this compiler. (The module
# is assumed to be in the 'distutils' package.)
compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler',
"standard UNIX-style compiler"),
'msvc': ('_msvccompiler', 'MSVCCompiler',
"Microsoft Visual C++"),
'cygwin': ('cygwinccompiler', 'CygwinCCompiler',
"Cygwin port of GNU C Compiler for Win32"),
'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"),
'bcpp': ('bcppcompiler', 'BCPPCompiler',
"Borland C++ Compiler"),
}
def show_compilers():
"""Print list of available compilers (used by the "--help-compiler"
options to "build", "build_ext", "build_clib").
"""
# XXX this "knows" that the compiler option it's describing is
# "--compiler", which just happens to be the case for the three
# commands that use it.
from distutils.fancy_getopt import FancyGetopt
compilers = []
for compiler in compiler_class.keys():
compilers.append(("compiler="+compiler, None,
compiler_class[compiler][2]))
compilers.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("List of available compilers:")
def new_compiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0):
"""Generate an instance of some CCompiler subclass for the supplied
platform/compiler combination. 'plat' defaults to 'os.name'
(eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
for that platform. Currently only 'posix' and 'nt' are supported, and
the default compilers are "traditional Unix interface" (UnixCCompiler
class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
possible to ask for a Unix compiler object under Windows, and a
Microsoft compiler object under Unix -- if you supply a value for
'compiler', 'plat' is ignored.
"""
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError(msg)
try:
module_name = "distutils." + module_name
__import__ (module_name)
module = sys.modules[module_name]
klass = vars(module)[class_name]
except ImportError:
raise DistutilsModuleError(
"can't compile C/C++ code: unable to load module '%s'" % \
module_name)
except KeyError:
raise DistutilsModuleError(
"can't compile C/C++ code: unable to find class '%s' "
"in module '%s'" % (class_name, module_name))
# XXX The None is necessary to preserve backwards compatibility
# with classes that expect verbose to be the first positional
# argument.
return klass(None, dry_run, force)
def gen_preprocess_options(macros, include_dirs):
"""Generate C pre-processor options (-D, -U, -I) as used by at least
two types of compilers: the typical Unix compiler and Visual C++.
'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
means undefine (-U) macro 'name', and (name,value) means define (-D)
macro 'name' to 'value'. 'include_dirs' is just a list of directory
names to be added to the header file search path (-I). Returns a list
of command-line options suitable for either Unix compilers or Visual
C++.
"""
# XXX it would be nice (mainly aesthetic, and so we don't generate
# stupid-looking command lines) to go over 'macros' and eliminate
# redundant definitions/undefinitions (ie. ensure that only the
# latest mention of a particular macro winds up on the command
# line). I don't think it's essential, though, since most (all?)
# Unix C compilers only pay attention to the latest -D or -U
# mention of a macro on their command line. Similar situation for
# 'include_dirs'. I'm punting on both for now. Anyways, weeding out
# redundancies like this should probably be the province of
# CCompiler, since the data structures used are inherited from it
# and therefore common to all CCompiler classes.
pp_opts = []
for macro in macros:
if not (isinstance(macro, tuple) and 1 <= len(macro) <= 2):
raise TypeError(
"bad macro definition '%s': "
"each element of 'macros' list must be a 1- or 2-tuple"
% macro)
if len(macro) == 1: # undefine this macro
pp_opts.append("-U%s" % macro[0])
elif len(macro) == 2:
if macro[1] is None: # define with no explicit value
pp_opts.append("-D%s" % macro[0])
else:
# XXX *don't* need to be clever about quoting the
# macro value here, because we're going to avoid the
# shell at all costs when we spawn the command!
pp_opts.append("-D%s=%s" % macro)
for dir in include_dirs:
pp_opts.append("-I%s" % dir)
return pp_opts
def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
"""Generate linker options for searching library directories and
linking with specific libraries. 'libraries' and 'library_dirs' are,
respectively, lists of library names (not filenames!) and search
directories. Returns a list of command-line options suitable for use
with some compiler (depending on the two format strings passed in).
"""
lib_opts = []
for dir in library_dirs:
lib_opts.append(compiler.library_dir_option(dir))
for dir in runtime_library_dirs:
opt = compiler.runtime_library_dir_option(dir)
if isinstance(opt, list):
lib_opts = lib_opts + opt
else:
lib_opts.append(opt)
# XXX it's important that we *not* remove redundant library mentions!
# sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
# resolve all symbols. I just hope we never have to say "-lfoo obj.o
# -lbar" to get things to work -- that's certainly a possibility, but a
# pretty nasty way to arrange your C code.
for lib in libraries:
(lib_dir, lib_name) = os.path.split(lib)
if lib_dir:
lib_file = compiler.find_library_file([lib_dir], lib_name)
if lib_file:
lib_opts.append(lib_file)
else:
compiler.warn("no library file corresponding to "
"'%s' found (skipping)" % lib)
else:
lib_opts.append(compiler.library_option (lib))
return lib_opts
| 42.406613
| 92
| 0.615578
|
50975694cfa84bd032eaef252e7a3a1686329d87
| 124
|
py
|
Python
|
shop/apps.py
|
marcopuccio/mpbb
|
18e303308865493886af7667c79720eee766641c
|
[
"MIT"
] | null | null | null |
shop/apps.py
|
marcopuccio/mpbb
|
18e303308865493886af7667c79720eee766641c
|
[
"MIT"
] | 12
|
2019-10-02T17:18:09.000Z
|
2022-03-11T23:54:53.000Z
|
shop/apps.py
|
marcopuccio/mpbb
|
18e303308865493886af7667c79720eee766641c
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.apps import AppConfig
class ShopConfig(AppConfig):
name = 'shop'
| 15.5
| 39
| 0.782258
|
c096097a57fecd741f4c67dc473f40962f4d8f74
| 7,785
|
py
|
Python
|
python_modules/libraries/dagster-databricks/dagster_databricks_tests/test_pyspark.py
|
makotonium/dagster
|
f5d56514b7e7c5bca28ea14060316d242f51b71b
|
[
"Apache-2.0"
] | 4,606
|
2018-06-21T17:45:20.000Z
|
2022-03-31T23:39:42.000Z
|
python_modules/libraries/dagster-databricks/dagster_databricks_tests/test_pyspark.py
|
makotonium/dagster
|
f5d56514b7e7c5bca28ea14060316d242f51b71b
|
[
"Apache-2.0"
] | 6,221
|
2018-06-12T04:36:01.000Z
|
2022-03-31T21:43:05.000Z
|
python_modules/libraries/dagster-databricks/dagster_databricks_tests/test_pyspark.py
|
makotonium/dagster
|
f5d56514b7e7c5bca28ea14060316d242f51b71b
|
[
"Apache-2.0"
] | 619
|
2018-08-22T22:43:09.000Z
|
2022-03-31T22:48:06.000Z
|
import os
from unittest import mock
import pytest
from dagster import (
InputDefinition,
ModeDefinition,
OutputDefinition,
execute_pipeline,
fs_io_manager,
pipeline,
reconstructable,
solid,
)
from dagster.core.definitions.no_step_launcher import no_step_launcher
from dagster.utils.merger import deep_merge_dicts
from dagster_aws.s3 import s3_pickle_io_manager, s3_resource
from dagster_azure.adls2 import adls2_pickle_io_manager, adls2_resource
from dagster_databricks import databricks_pyspark_step_launcher
from dagster_pyspark import DataFrame, pyspark_resource
from pyspark.sql import Row
from pyspark.sql.types import IntegerType, StringType, StructField, StructType
S3_BUCKET = "dagster-databricks-tests"
ADLS2_STORAGE_ACCOUNT = "dagsterdatabrickstests"
ADLS2_CONTAINER = "dagster-databricks-tests"
BASE_DATABRICKS_PYSPARK_STEP_LAUNCHER_CONFIG = {
"databricks_host": os.environ.get("DATABRICKS_HOST"),
"databricks_token": os.environ.get("DATABRICKS_TOKEN"),
"local_pipeline_package_path": os.path.abspath(os.path.dirname(__file__)),
"staging_prefix": "/dagster-databricks-tests",
"run_config": {
"cluster": {
"new": {
"size": {"num_workers": 1},
"spark_version": "6.5.x-scala2.11",
"nodes": {
"node_types": {"node_type_id": "Standard_DS3_v2"},
},
},
},
"libraries": [
{"pypi": {"package": "azure-storage-file-datalake~=12.0.1"}},
{"pypi": {"package": "dagster-aws"}},
{"pypi": {"package": "dagster-azure"}},
{"pypi": {"package": "databricks-api"}},
{"pypi": {"package": "pytest"}},
],
},
"secrets_to_env_variables": [],
"storage": {
"s3": {
"secret_scope": "dagster-databricks-tests",
"access_key_key": "aws-access-key",
"secret_key_key": "aws-secret-key",
}
},
}
@solid(
output_defs=[OutputDefinition(DataFrame)],
required_resource_keys={"pyspark_step_launcher", "pyspark"},
)
def make_df_solid(context):
schema = StructType([StructField("name", StringType()), StructField("age", IntegerType())])
rows = [Row(name="John", age=19), Row(name="Jennifer", age=29), Row(name="Henry", age=50)]
return context.resources.pyspark.spark_session.createDataFrame(rows, schema)
@solid(
name="blah",
description="this is a test",
config_schema={"foo": str, "bar": int},
input_defs=[InputDefinition("people", DataFrame)],
output_defs=[OutputDefinition(DataFrame)],
required_resource_keys={"pyspark_step_launcher"},
)
def filter_df_solid(_, people):
return people.filter(people["age"] < 30)
MODE_DEFS = [
ModeDefinition(
"prod_adls2",
resource_defs={
"pyspark_step_launcher": databricks_pyspark_step_launcher,
"pyspark": pyspark_resource,
"adls2": adls2_resource,
"io_manager": adls2_pickle_io_manager,
},
),
ModeDefinition(
"prod_s3",
resource_defs={
"pyspark_step_launcher": databricks_pyspark_step_launcher,
"pyspark": pyspark_resource,
"s3": s3_resource,
"io_manager": s3_pickle_io_manager,
},
),
ModeDefinition(
"test",
resource_defs={
"pyspark_step_launcher": databricks_pyspark_step_launcher,
"pyspark": pyspark_resource,
"io_manager": fs_io_manager,
},
),
ModeDefinition(
"local",
resource_defs={"pyspark_step_launcher": no_step_launcher, "pyspark": pyspark_resource},
),
]
@pipeline(mode_defs=MODE_DEFS)
def pyspark_pipe():
filter_df_solid(make_df_solid())
def define_pyspark_pipe():
return pyspark_pipe
@solid(
required_resource_keys={"pyspark_step_launcher", "pyspark"},
)
def do_nothing_solid(_):
pass
@pipeline(mode_defs=MODE_DEFS)
def do_nothing_pipe():
do_nothing_solid()
def define_do_nothing_pipe():
return do_nothing_pipe
def test_local():
result = execute_pipeline(
pipeline=reconstructable(define_pyspark_pipe),
mode="local",
run_config={"solids": {"blah": {"config": {"foo": "a string", "bar": 123}}}},
)
assert result.success
@mock.patch("dagster_databricks.databricks.DatabricksClient.submit_run")
@mock.patch("dagster_databricks.databricks.DatabricksClient.put_file")
@mock.patch("dagster_databricks.DatabricksPySparkStepLauncher.get_step_events")
@mock.patch("dagster_databricks.databricks.DatabricksJobRunner.wait_for_run_to_complete")
def test_pyspark_databricks(mock_wait, mock_get_step_events, mock_put_file, mock_submit_run):
mock_get_step_events.return_value = execute_pipeline(
pipeline=reconstructable(define_do_nothing_pipe), mode="local"
).events_by_step_key["do_nothing_solid"]
result = execute_pipeline(
pipeline=reconstructable(define_do_nothing_pipe),
mode="test",
run_config={
"resources": {
"pyspark_step_launcher": {
"config": deep_merge_dicts(
BASE_DATABRICKS_PYSPARK_STEP_LAUNCHER_CONFIG,
{"databricks_host": "", "databricks_token": ""},
),
},
},
},
)
assert result.success
assert mock_wait.call_count == 1
assert mock_get_step_events.call_count == 1
assert mock_put_file.call_count == 4
assert mock_submit_run.call_count == 1
@pytest.mark.skipif(
"DATABRICKS_TEST_DO_IT_LIVE_S3" not in os.environ,
reason="This test is slow and requires a Databricks cluster; run only upon explicit request",
)
def test_do_it_live_databricks_s3():
result = execute_pipeline(
reconstructable(define_pyspark_pipe),
mode="prod_s3",
run_config={
"solids": {"blah": {"config": {"foo": "a string", "bar": 123}}},
"resources": {
"pyspark_step_launcher": {"config": BASE_DATABRICKS_PYSPARK_STEP_LAUNCHER_CONFIG},
"io_manager": {
"config": {
"s3_bucket": "dagster-databricks-tests",
"s3_prefix": "dagster-databricks-tests",
}
},
},
},
)
assert result.success
@pytest.mark.skipif(
"DATABRICKS_TEST_DO_IT_LIVE_ADLS2" not in os.environ,
reason="This test is slow and requires a Databricks cluster; run only upon explicit request",
)
def test_do_it_live_databricks_adls2():
config = BASE_DATABRICKS_PYSPARK_STEP_LAUNCHER_CONFIG.copy()
config["storage"] = {
"adls2": {
"secret_scope": "dagster-databricks-tests",
"storage_account_name": ADLS2_STORAGE_ACCOUNT,
"storage_account_key_key": "adls2-storage-key",
}
}
result = execute_pipeline(
reconstructable(define_pyspark_pipe),
mode="prod_adls2",
run_config={
"solids": {"blah": {"config": {"foo": "a string", "bar": 123}}},
"resources": {
"pyspark_step_launcher": {"config": config},
"adls2": {
"config": {
"storage_account": ADLS2_STORAGE_ACCOUNT,
"credential": {"key": os.environ.get("AZURE_STORAGE_ACCOUNT_KEY")},
}
},
"io_manager": {
"config": {
"adls2_file_system": ADLS2_CONTAINER,
"adls2_prefix": "dagster-databricks-tests",
}
},
},
},
)
assert result.success
| 31.905738
| 98
| 0.618882
|
6e2269ca705745e9caeb3c22251ca2bf252d3fc9
| 6,728
|
py
|
Python
|
grl/rl_apps/psro/general_psro_eval.py
|
indylab/xdo
|
1ddd92aa56ba10fa468396de8f8824c83ba9d0ba
|
[
"MIT"
] | 12
|
2021-03-12T07:18:52.000Z
|
2022-03-15T22:30:44.000Z
|
grl/rl_apps/psro/general_psro_eval.py
|
indylab/xdo
|
1ddd92aa56ba10fa468396de8f8824c83ba9d0ba
|
[
"MIT"
] | 1
|
2021-11-22T16:39:46.000Z
|
2022-02-02T22:13:03.000Z
|
grl/rl_apps/psro/general_psro_eval.py
|
indylab/xdo
|
1ddd92aa56ba10fa468396de8f8824c83ba9d0ba
|
[
"MIT"
] | 4
|
2021-06-21T03:54:45.000Z
|
2022-01-13T10:28:26.000Z
|
import argparse
import logging
import time
import numpy as np
import ray
from ray.rllib.agents.trainer import with_common_config
from grl.algos.p2sro.eval_dispatcher.remote import RemoteEvalDispatcherClient
from grl.rl_apps import GRL_SEED
from grl.rl_apps.scenarios.catalog import scenario_catalog
from grl.rl_apps.scenarios.psro_scenario import PSROScenario
from grl.rl_apps.scenarios.ray_setup import init_ray_for_scenario
from grl.rllib_tools.policy_checkpoints import load_pure_strat
from grl.utils.port_listings import get_client_port_for_service
def run_episode(env, policies_for_each_player) -> np.ndarray:
num_players = len(policies_for_each_player)
obs = env.reset()
dones = {}
game_length = 0
policy_states = [policy.get_initial_state() for policy in policies_for_each_player]
payoffs_per_player_this_episode = np.zeros(shape=num_players, dtype=np.float64)
while True:
if "__all__" in dones:
if dones["__all__"]:
break
game_length += 1
action_dict = {}
for player in range(num_players):
if player in obs:
action_index, new_policy_state, action_info = policies_for_each_player[player].compute_single_action(
obs=obs[player], state=policy_states[player])
policy_states[player] = new_policy_state
action_dict[player] = action_index
obs, rewards, dones, infos = env.step(action_dict=action_dict)
for player in range(num_players):
payoff_so_far = payoffs_per_player_this_episode[player]
payoffs_per_player_this_episode[player] = payoff_so_far + rewards.get(player, 0.0)
return payoffs_per_player_this_episode
@ray.remote(num_cpus=0, num_gpus=0)
def run_poker_evaluation_loop(scenario_name: str, eval_dispatcher_port: int, eval_dispatcher_host: str):
scenario: PSROScenario = scenario_catalog.get(scenario_name=scenario_name)
if not isinstance(scenario, PSROScenario):
raise TypeError(f"Only instances of {PSROScenario} can be used here. {scenario.name} is a {type(scenario)}.")
eval_dispatcher = RemoteEvalDispatcherClient(port=eval_dispatcher_port, remote_server_host=eval_dispatcher_host)
env = scenario.env_class(env_config=scenario.env_config)
num_players = 2
trainer_config = scenario.get_trainer_config(env)
trainer_config["explore"] = scenario.allow_stochastic_best_responses
policies = [scenario.policy_classes["eval"](env.observation_space,
env.action_space,
with_common_config(trainer_config))
for _ in range(num_players)]
while True:
policy_specs_for_each_player, required_games_to_play = eval_dispatcher.take_eval_job()
if policy_specs_for_each_player is None:
time.sleep(2)
else:
if len(policy_specs_for_each_player) != 2:
raise NotImplementedError(f"This evaluation code only supports two player games. "
f"{len(policy_specs_for_each_player)} players were requested.")
# print(f"Got eval matchup:")
# for spec in policy_specs_for_each_player:
# print(f"spec: {spec.to_json()}")
for policy, spec in zip(policies, policy_specs_for_each_player):
load_pure_strat(policy=policy, pure_strat_spec=spec)
total_payoffs_per_player = np.zeros(shape=num_players, dtype=np.float64)
# max_reward = None
# min_reward = None
# time_since_last_output = time.time()
for game in range(required_games_to_play):
# if game % 1000 == 0:
# now = time.time()
# print(f"{policy_specs_for_each_player[0].id} vs "
# f"{policy_specs_for_each_player[1].id}: "
# f"{game}/{required_games_to_play} games played, {now - time_since_last_output} seconds")
# time_since_last_output = now
payoffs_per_player_this_episode = run_episode(env=env, policies_for_each_player=policies)
total_payoffs_per_player += payoffs_per_player_this_episode
# if max_reward is None or max(payoffs_per_player_this_episode) > max_reward:
# max_reward = max(payoffs_per_player_this_episode)
# if min_reward is None or min(payoffs_per_player_this_episode) < min_reward:
# min_reward = min(payoffs_per_player_this_episode)
payoffs_per_player = total_payoffs_per_player / required_games_to_play
print(f"payoffs per player:"
f"{policy_specs_for_each_player[0].id} vs "
f"{policy_specs_for_each_player[1].id}: "
f"{payoffs_per_player}")
eval_dispatcher.submit_eval_job_result(
policy_specs_for_each_player_tuple=policy_specs_for_each_player,
payoffs_for_each_player=payoffs_per_player,
games_played=required_games_to_play
)
def launch_evals(scenario_name: str,
eval_dispatcher_port: int,
eval_dispatcher_host: str,
block=True,
ray_head_address=None):
scenario: PSROScenario = scenario_catalog.get(scenario_name=scenario_name)
init_ray_for_scenario(scenario=scenario, head_address=ray_head_address, logging_level=logging.INFO)
num_workers = scenario.num_eval_workers
evaluator_refs = [run_poker_evaluation_loop.remote(scenario_name, eval_dispatcher_port, eval_dispatcher_host)
for _ in range(num_workers)]
if block:
ray.wait(evaluator_refs, num_returns=num_workers)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--scenario', type=str)
parser.add_argument('--ray_head', type=str, required=False, default=None)
parser.add_argument('--eval_port', type=int, required=False, default=None)
parser.add_argument('--eval_host', type=str, required=False, default='localhost')
commandline_args = parser.parse_args()
scenario_name = commandline_args.scenario
eval_port = commandline_args.eval_port
if eval_port is None:
eval_port = get_client_port_for_service(service_name=f"seed_{GRL_SEED}_{scenario_name}_evals")
launch_evals(scenario_name=scenario_name,
eval_dispatcher_port=eval_port,
eval_dispatcher_host=commandline_args.eval_host,
block=True,
ray_head_address=commandline_args.ray_head)
| 43.128205
| 117
| 0.676724
|
95b57e53cb69e7a0365a16294e049be2082f6de2
| 3,574
|
py
|
Python
|
samples/kubeflow-tf/kubeflow-training-classification.py
|
pamarquez/pipelineHW
|
5a5e39dc51add22c02e91222daa88fae0d82da9d
|
[
"Apache-2.0"
] | 1
|
2019-07-02T01:58:17.000Z
|
2019-07-02T01:58:17.000Z
|
samples/kubeflow-tf/kubeflow-training-classification.py
|
kweinmeister/pipelines
|
a819506dbfdd188077b160f2cc77b17807e5cc8a
|
[
"Apache-2.0"
] | 21
|
2020-01-28T22:48:55.000Z
|
2022-03-08T22:48:12.000Z
|
samples/kubeflow-tf/kubeflow-training-classification.py
|
pamarquez/pipelineHW
|
5a5e39dc51add22c02e91222daa88fae0d82da9d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfp
from kfp import components
from kfp import dsl
from kfp import gcp
dataflow_tf_transform_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/d0aa15dfb3ff618e8cd1b03f86804ec4307fd9c2/components/dataflow/tft/component.yaml')
kubeflow_tf_training_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/d0aa15dfb3ff618e8cd1b03f86804ec4307fd9c2/components/kubeflow/dnntrainer/component.yaml')
dataflow_tf_predict_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/d0aa15dfb3ff618e8cd1b03f86804ec4307fd9c2/components/dataflow/predict/component.yaml')
confusion_matrix_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/d0aa15dfb3ff618e8cd1b03f86804ec4307fd9c2/components/local/confusion_matrix/component.yaml')
@dsl.pipeline(
name='TF training and prediction pipeline',
description=''
)
def kubeflow_training(output, project,
evaluation='gs://ml-pipeline-playground/flower/eval100.csv',
train='gs://ml-pipeline-playground/flower/train200.csv',
schema='gs://ml-pipeline-playground/flower/schema.json',
learning_rate=0.1,
hidden_layer_size='100,50',
steps=2000,
target='label',
workers=0,
pss=0,
preprocess_mode='local',
predict_mode='local',
):
output_template = str(output) + '/{{workflow.uid}}/{{pod.name}}/data'
# set the flag to use GPU trainer
use_gpu = False
preprocess = dataflow_tf_transform_op(
training_data_file_pattern=train,
evaluation_data_file_pattern=evaluation,
schema=schema,
gcp_project=project,
run_mode=preprocess_mode,
preprocessing_module='',
transformed_data_dir=output_template
).apply(gcp.use_gcp_secret('user-gcp-sa'))
training = kubeflow_tf_training_op(
transformed_data_dir=preprocess.output,
schema=schema,
learning_rate=learning_rate,
hidden_layer_size=hidden_layer_size,
steps=steps,
target=target,
preprocessing_module='',
training_output_dir=output_template
).apply(gcp.use_gcp_secret('user-gcp-sa'))
if use_gpu:
training.image = 'gcr.io/ml-pipeline/ml-pipeline-kubeflow-tf-trainer-gpu:d4960d3379af4735fd04dc7167fab5fff82d0f22',
training.set_gpu_limit(1)
prediction = dataflow_tf_predict_op(
data_file_pattern=evaluation,
schema=schema,
target_column=target,
model=training.output,
run_mode=predict_mode,
gcp_project=project,
predictions_dir=output_template
).apply(gcp.use_gcp_secret('user-gcp-sa'))
confusion_matrix = confusion_matrix_op(
predictions=prediction.output,
output_dir=output_template
).apply(gcp.use_gcp_secret('user-gcp-sa'))
if __name__ == '__main__':
kfp.compiler.Compiler().compile(kubeflow_training, __file__ + '.zip')
| 38.847826
| 207
| 0.745663
|
c75d0756d81bcd50299c361bf16765d1de87c2bb
| 1,706
|
py
|
Python
|
vagrant/vagrant-brozzler-new-job.py
|
wolfgang42/brozzler
|
0f27c9995ad0251a22f238bd7a01653e0ef3b7b9
|
[
"Apache-2.0"
] | 519
|
2016-04-25T20:11:23.000Z
|
2022-03-30T10:25:38.000Z
|
vagrant/vagrant-brozzler-new-job.py
|
wolfgang42/brozzler
|
0f27c9995ad0251a22f238bd7a01653e0ef3b7b9
|
[
"Apache-2.0"
] | 102
|
2016-05-17T17:17:30.000Z
|
2022-02-25T23:26:17.000Z
|
vagrant/vagrant-brozzler-new-job.py
|
wolfgang42/brozzler
|
0f27c9995ad0251a22f238bd7a01653e0ef3b7b9
|
[
"Apache-2.0"
] | 94
|
2016-05-06T01:03:06.000Z
|
2021-12-30T20:57:30.000Z
|
#!/usr/bin/env python
'''
vagrant-brozzler-new-job.py - runs brozzler-new-job inside the vagrant vm to
queue a job for your vagrant brozzler deployment.
This is a standalone script with no dependencies other than python, and should
work with python 2.7 or python 3.2+. The only reason it's not a bash script is
so we can use the argparse library.
Copyright (C) 2016-2019 Internet Archive
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import os
import argparse
import subprocess
def main(argv=[]):
arg_parser = argparse.ArgumentParser(prog=os.path.basename(argv[0]))
arg_parser.add_argument(
'job_conf_file', metavar='JOB_CONF_FILE',
help='brozzler job configuration file in yaml')
args = arg_parser.parse_args(args=argv[1:])
# cd to path with Vagrantfile so "vagrant ssh" knows what to do
os.chdir(os.path.dirname(__file__))
with open(args.job_conf_file, 'rb') as f:
subprocess.call([
'vagrant', 'ssh', '--',
'f=`mktemp` && cat > $f && '
'/home/vagrant/brozzler-ve3/bin/python '
'/home/vagrant/brozzler-ve3/bin/brozzler-new-job $f'],
stdin=f)
if __name__ == '__main__':
main(sys.argv)
| 34.12
| 78
| 0.705158
|
db8c782e0ee725eda53f3f770587326c4e826e54
| 532
|
py
|
Python
|
Algo and DSA/LeetCode-Solutions-master/Python/reverse-words-in-a-string-ii.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 3,269
|
2018-10-12T01:29:40.000Z
|
2022-03-31T17:58:41.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/reverse-words-in-a-string-ii.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 53
|
2018-12-16T22:54:20.000Z
|
2022-02-25T08:31:20.000Z
|
Algo and DSA/LeetCode-Solutions-master/Python/reverse-words-in-a-string-ii.py
|
Sourav692/FAANG-Interview-Preparation
|
f523e5c94d582328b3edc449ea16ac6ab28cdc81
|
[
"Unlicense"
] | 1,236
|
2018-10-12T02:51:40.000Z
|
2022-03-30T13:30:37.000Z
|
# Time: O(n)
# Space:O(1)
class Solution(object):
def reverseWords(self, s):
"""
:type s: a list of 1 length strings (List[str])
:rtype: nothing
"""
def reverse(s, begin, end):
for i in xrange((end - begin) / 2):
s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]
reverse(s, 0, len(s))
i = 0
for j in xrange(len(s) + 1):
if j == len(s) or s[j] == ' ':
reverse(s, i, j)
i = j + 1
| 24.181818
| 75
| 0.411654
|
b3c5fe35b37fa38a081633042560f1a1d9c857aa
| 309
|
py
|
Python
|
sabueso/protein/is_protein.py
|
dprada/sabueso
|
14843cf3522b5b89db5b61c1541a7015f114dd53
|
[
"MIT"
] | null | null | null |
sabueso/protein/is_protein.py
|
dprada/sabueso
|
14843cf3522b5b89db5b61c1541a7015f114dd53
|
[
"MIT"
] | 2
|
2022-01-31T21:22:17.000Z
|
2022-02-04T20:20:12.000Z
|
sabueso/protein/is_protein.py
|
dprada/sabueso
|
14843cf3522b5b89db5b61c1541a7015f114dd53
|
[
"MIT"
] | 1
|
2021-07-20T15:01:14.000Z
|
2021-07-20T15:01:14.000Z
|
def is_protein(molecular_system, indices='all'):
from sabueso import get_form
from sabueso.forms import dict_is_protein
form = get_form(molecular_system)
output = dict_is_protein[form](molecular_system, indices='all')
if len(output)==1:
output = output[0]
return output
| 19.3125
| 67
| 0.702265
|
206896c8381e63fbd15895ce9e336363cd8b7627
| 32,367
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/importexport/v20161101/outputs.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/importexport/v20161101/outputs.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/importexport/v20161101/outputs.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'DriveBitLockerKeyResponseResult',
'DriveStatusResponse',
'ExportResponse',
'JobDetailsResponse',
'PackageInfomationResponse',
'ReturnAddressResponse',
'ReturnShippingResponse',
'ShippingInformationResponse',
]
@pulumi.output_type
class DriveBitLockerKeyResponseResult(dict):
"""
BitLocker recovery key or password to the specified drive
"""
def __init__(__self__, *,
bit_locker_key: Optional[str] = None,
drive_id: Optional[str] = None):
"""
BitLocker recovery key or password to the specified drive
:param str bit_locker_key: BitLocker recovery key or password
:param str drive_id: Drive ID
"""
if bit_locker_key is not None:
pulumi.set(__self__, "bit_locker_key", bit_locker_key)
if drive_id is not None:
pulumi.set(__self__, "drive_id", drive_id)
@property
@pulumi.getter(name="bitLockerKey")
def bit_locker_key(self) -> Optional[str]:
"""
BitLocker recovery key or password
"""
return pulumi.get(self, "bit_locker_key")
@property
@pulumi.getter(name="driveId")
def drive_id(self) -> Optional[str]:
"""
Drive ID
"""
return pulumi.get(self, "drive_id")
@pulumi.output_type
class DriveStatusResponse(dict):
"""
Provides information about the drive's status
"""
def __init__(__self__, *,
bit_locker_key: Optional[str] = None,
bytes_succeeded: Optional[int] = None,
copy_status: Optional[str] = None,
drive_header_hash: Optional[str] = None,
drive_id: Optional[str] = None,
error_log_uri: Optional[str] = None,
manifest_file: Optional[str] = None,
manifest_hash: Optional[str] = None,
manifest_uri: Optional[str] = None,
percent_complete: Optional[int] = None,
state: Optional[str] = None,
verbose_log_uri: Optional[str] = None):
"""
Provides information about the drive's status
:param str bit_locker_key: The BitLocker key used to encrypt the drive.
:param int bytes_succeeded: Bytes successfully transferred for the drive.
:param str copy_status: Detailed status about the data transfer process. This field is not returned in the response until the drive is in the Transferring state.
:param str drive_header_hash: The drive header hash value.
:param str drive_id: The drive's hardware serial number, without spaces.
:param str error_log_uri: A URI that points to the blob containing the error log for the data transfer operation.
:param str manifest_file: The relative path of the manifest file on the drive.
:param str manifest_hash: The Base16-encoded MD5 hash of the manifest file on the drive.
:param str manifest_uri: A URI that points to the blob containing the drive manifest file.
:param int percent_complete: Percentage completed for the drive.
:param str state: The drive's current state.
:param str verbose_log_uri: A URI that points to the blob containing the verbose log for the data transfer operation.
"""
if bit_locker_key is not None:
pulumi.set(__self__, "bit_locker_key", bit_locker_key)
if bytes_succeeded is not None:
pulumi.set(__self__, "bytes_succeeded", bytes_succeeded)
if copy_status is not None:
pulumi.set(__self__, "copy_status", copy_status)
if drive_header_hash is not None:
pulumi.set(__self__, "drive_header_hash", drive_header_hash)
if drive_id is not None:
pulumi.set(__self__, "drive_id", drive_id)
if error_log_uri is not None:
pulumi.set(__self__, "error_log_uri", error_log_uri)
if manifest_file is not None:
pulumi.set(__self__, "manifest_file", manifest_file)
if manifest_hash is not None:
pulumi.set(__self__, "manifest_hash", manifest_hash)
if manifest_uri is not None:
pulumi.set(__self__, "manifest_uri", manifest_uri)
if percent_complete is not None:
pulumi.set(__self__, "percent_complete", percent_complete)
if state is not None:
pulumi.set(__self__, "state", state)
if verbose_log_uri is not None:
pulumi.set(__self__, "verbose_log_uri", verbose_log_uri)
@property
@pulumi.getter(name="bitLockerKey")
def bit_locker_key(self) -> Optional[str]:
"""
The BitLocker key used to encrypt the drive.
"""
return pulumi.get(self, "bit_locker_key")
@property
@pulumi.getter(name="bytesSucceeded")
def bytes_succeeded(self) -> Optional[int]:
"""
Bytes successfully transferred for the drive.
"""
return pulumi.get(self, "bytes_succeeded")
@property
@pulumi.getter(name="copyStatus")
def copy_status(self) -> Optional[str]:
"""
Detailed status about the data transfer process. This field is not returned in the response until the drive is in the Transferring state.
"""
return pulumi.get(self, "copy_status")
@property
@pulumi.getter(name="driveHeaderHash")
def drive_header_hash(self) -> Optional[str]:
"""
The drive header hash value.
"""
return pulumi.get(self, "drive_header_hash")
@property
@pulumi.getter(name="driveId")
def drive_id(self) -> Optional[str]:
"""
The drive's hardware serial number, without spaces.
"""
return pulumi.get(self, "drive_id")
@property
@pulumi.getter(name="errorLogUri")
def error_log_uri(self) -> Optional[str]:
"""
A URI that points to the blob containing the error log for the data transfer operation.
"""
return pulumi.get(self, "error_log_uri")
@property
@pulumi.getter(name="manifestFile")
def manifest_file(self) -> Optional[str]:
"""
The relative path of the manifest file on the drive.
"""
return pulumi.get(self, "manifest_file")
@property
@pulumi.getter(name="manifestHash")
def manifest_hash(self) -> Optional[str]:
"""
The Base16-encoded MD5 hash of the manifest file on the drive.
"""
return pulumi.get(self, "manifest_hash")
@property
@pulumi.getter(name="manifestUri")
def manifest_uri(self) -> Optional[str]:
"""
A URI that points to the blob containing the drive manifest file.
"""
return pulumi.get(self, "manifest_uri")
@property
@pulumi.getter(name="percentComplete")
def percent_complete(self) -> Optional[int]:
"""
Percentage completed for the drive.
"""
return pulumi.get(self, "percent_complete")
@property
@pulumi.getter
def state(self) -> Optional[str]:
"""
The drive's current state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="verboseLogUri")
def verbose_log_uri(self) -> Optional[str]:
"""
A URI that points to the blob containing the verbose log for the data transfer operation.
"""
return pulumi.get(self, "verbose_log_uri")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExportResponse(dict):
"""
A property containing information about the blobs to be exported for an export job. This property is required for export jobs, but must not be specified for import jobs.
"""
def __init__(__self__, *,
blob_listblob_path: Optional[str] = None,
blob_path: Optional[Sequence[str]] = None,
blob_path_prefix: Optional[Sequence[str]] = None):
"""
A property containing information about the blobs to be exported for an export job. This property is required for export jobs, but must not be specified for import jobs.
:param str blob_listblob_path: The relative URI to the block blob that contains the list of blob paths or blob path prefixes as defined above, beginning with the container name. If the blob is in root container, the URI must begin with $root.
:param Sequence[str] blob_path: A collection of blob-path strings.
:param Sequence[str] blob_path_prefix: A collection of blob-prefix strings.
"""
if blob_listblob_path is not None:
pulumi.set(__self__, "blob_listblob_path", blob_listblob_path)
if blob_path is not None:
pulumi.set(__self__, "blob_path", blob_path)
if blob_path_prefix is not None:
pulumi.set(__self__, "blob_path_prefix", blob_path_prefix)
@property
@pulumi.getter(name="blobListblobPath")
def blob_listblob_path(self) -> Optional[str]:
"""
The relative URI to the block blob that contains the list of blob paths or blob path prefixes as defined above, beginning with the container name. If the blob is in root container, the URI must begin with $root.
"""
return pulumi.get(self, "blob_listblob_path")
@property
@pulumi.getter(name="blobPath")
def blob_path(self) -> Optional[Sequence[str]]:
"""
A collection of blob-path strings.
"""
return pulumi.get(self, "blob_path")
@property
@pulumi.getter(name="blobPathPrefix")
def blob_path_prefix(self) -> Optional[Sequence[str]]:
"""
A collection of blob-prefix strings.
"""
return pulumi.get(self, "blob_path_prefix")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class JobDetailsResponse(dict):
"""
Specifies the job properties
"""
def __init__(__self__, *,
backup_drive_manifest: Optional[bool] = None,
cancel_requested: Optional[bool] = None,
delivery_package: Optional['outputs.PackageInfomationResponse'] = None,
diagnostics_path: Optional[str] = None,
drive_list: Optional[Sequence['outputs.DriveStatusResponse']] = None,
export: Optional['outputs.ExportResponse'] = None,
incomplete_blob_list_uri: Optional[str] = None,
job_type: Optional[str] = None,
log_level: Optional[str] = None,
percent_complete: Optional[int] = None,
provisioning_state: Optional[str] = None,
return_address: Optional['outputs.ReturnAddressResponse'] = None,
return_package: Optional['outputs.PackageInfomationResponse'] = None,
return_shipping: Optional['outputs.ReturnShippingResponse'] = None,
shipping_information: Optional['outputs.ShippingInformationResponse'] = None,
state: Optional[str] = None,
storage_account_id: Optional[str] = None):
"""
Specifies the job properties
:param bool backup_drive_manifest: Default value is false. Indicates whether the manifest files on the drives should be copied to block blobs.
:param bool cancel_requested: Indicates whether a request has been submitted to cancel the job.
:param 'PackageInfomationResponseArgs' delivery_package: Contains information about the package being shipped by the customer to the Microsoft data center.
:param str diagnostics_path: The virtual blob directory to which the copy logs and backups of drive manifest files (if enabled) will be stored.
:param Sequence['DriveStatusResponseArgs'] drive_list: List of up to ten drives that comprise the job. The drive list is a required element for an import job; it is not specified for export jobs.
:param 'ExportResponseArgs' export: A property containing information about the blobs to be exported for an export job. This property is included for export jobs only.
:param str incomplete_blob_list_uri: A blob path that points to a block blob containing a list of blob names that were not exported due to insufficient drive space. If all blobs were exported successfully, then this element is not included in the response.
:param str job_type: The type of job
:param str log_level: Default value is Error. Indicates whether error logging or verbose logging will be enabled.
:param int percent_complete: Overall percentage completed for the job.
:param str provisioning_state: Specifies the provisioning state of the job.
:param 'ReturnAddressResponseArgs' return_address: Specifies the return address information for the job.
:param 'PackageInfomationResponseArgs' return_package: Contains information about the package being shipped from the Microsoft data center to the customer to return the drives. The format is the same as the deliveryPackage property above. This property is not included if the drives have not yet been returned.
:param 'ReturnShippingResponseArgs' return_shipping: Specifies the return carrier and customer's account with the carrier.
:param 'ShippingInformationResponseArgs' shipping_information: Contains information about the Microsoft datacenter to which the drives should be shipped.
:param str state: Current state of the job.
:param str storage_account_id: The resource identifier of the storage account where data will be imported to or exported from.
"""
if backup_drive_manifest is not None:
pulumi.set(__self__, "backup_drive_manifest", backup_drive_manifest)
if cancel_requested is not None:
pulumi.set(__self__, "cancel_requested", cancel_requested)
if delivery_package is not None:
pulumi.set(__self__, "delivery_package", delivery_package)
if diagnostics_path is not None:
pulumi.set(__self__, "diagnostics_path", diagnostics_path)
if drive_list is not None:
pulumi.set(__self__, "drive_list", drive_list)
if export is not None:
pulumi.set(__self__, "export", export)
if incomplete_blob_list_uri is not None:
pulumi.set(__self__, "incomplete_blob_list_uri", incomplete_blob_list_uri)
if job_type is not None:
pulumi.set(__self__, "job_type", job_type)
if log_level is not None:
pulumi.set(__self__, "log_level", log_level)
if percent_complete is not None:
pulumi.set(__self__, "percent_complete", percent_complete)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if return_address is not None:
pulumi.set(__self__, "return_address", return_address)
if return_package is not None:
pulumi.set(__self__, "return_package", return_package)
if return_shipping is not None:
pulumi.set(__self__, "return_shipping", return_shipping)
if shipping_information is not None:
pulumi.set(__self__, "shipping_information", shipping_information)
if state is not None:
pulumi.set(__self__, "state", state)
if storage_account_id is not None:
pulumi.set(__self__, "storage_account_id", storage_account_id)
@property
@pulumi.getter(name="backupDriveManifest")
def backup_drive_manifest(self) -> Optional[bool]:
"""
Default value is false. Indicates whether the manifest files on the drives should be copied to block blobs.
"""
return pulumi.get(self, "backup_drive_manifest")
@property
@pulumi.getter(name="cancelRequested")
def cancel_requested(self) -> Optional[bool]:
"""
Indicates whether a request has been submitted to cancel the job.
"""
return pulumi.get(self, "cancel_requested")
@property
@pulumi.getter(name="deliveryPackage")
def delivery_package(self) -> Optional['outputs.PackageInfomationResponse']:
"""
Contains information about the package being shipped by the customer to the Microsoft data center.
"""
return pulumi.get(self, "delivery_package")
@property
@pulumi.getter(name="diagnosticsPath")
def diagnostics_path(self) -> Optional[str]:
"""
The virtual blob directory to which the copy logs and backups of drive manifest files (if enabled) will be stored.
"""
return pulumi.get(self, "diagnostics_path")
@property
@pulumi.getter(name="driveList")
def drive_list(self) -> Optional[Sequence['outputs.DriveStatusResponse']]:
"""
List of up to ten drives that comprise the job. The drive list is a required element for an import job; it is not specified for export jobs.
"""
return pulumi.get(self, "drive_list")
@property
@pulumi.getter
def export(self) -> Optional['outputs.ExportResponse']:
"""
A property containing information about the blobs to be exported for an export job. This property is included for export jobs only.
"""
return pulumi.get(self, "export")
@property
@pulumi.getter(name="incompleteBlobListUri")
def incomplete_blob_list_uri(self) -> Optional[str]:
"""
A blob path that points to a block blob containing a list of blob names that were not exported due to insufficient drive space. If all blobs were exported successfully, then this element is not included in the response.
"""
return pulumi.get(self, "incomplete_blob_list_uri")
@property
@pulumi.getter(name="jobType")
def job_type(self) -> Optional[str]:
"""
The type of job
"""
return pulumi.get(self, "job_type")
@property
@pulumi.getter(name="logLevel")
def log_level(self) -> Optional[str]:
"""
Default value is Error. Indicates whether error logging or verbose logging will be enabled.
"""
return pulumi.get(self, "log_level")
@property
@pulumi.getter(name="percentComplete")
def percent_complete(self) -> Optional[int]:
"""
Overall percentage completed for the job.
"""
return pulumi.get(self, "percent_complete")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Specifies the provisioning state of the job.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="returnAddress")
def return_address(self) -> Optional['outputs.ReturnAddressResponse']:
"""
Specifies the return address information for the job.
"""
return pulumi.get(self, "return_address")
@property
@pulumi.getter(name="returnPackage")
def return_package(self) -> Optional['outputs.PackageInfomationResponse']:
"""
Contains information about the package being shipped from the Microsoft data center to the customer to return the drives. The format is the same as the deliveryPackage property above. This property is not included if the drives have not yet been returned.
"""
return pulumi.get(self, "return_package")
@property
@pulumi.getter(name="returnShipping")
def return_shipping(self) -> Optional['outputs.ReturnShippingResponse']:
"""
Specifies the return carrier and customer's account with the carrier.
"""
return pulumi.get(self, "return_shipping")
@property
@pulumi.getter(name="shippingInformation")
def shipping_information(self) -> Optional['outputs.ShippingInformationResponse']:
"""
Contains information about the Microsoft datacenter to which the drives should be shipped.
"""
return pulumi.get(self, "shipping_information")
@property
@pulumi.getter
def state(self) -> Optional[str]:
"""
Current state of the job.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> Optional[str]:
"""
The resource identifier of the storage account where data will be imported to or exported from.
"""
return pulumi.get(self, "storage_account_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PackageInfomationResponse(dict):
"""
Contains information about the package being shipped by the customer to the Microsoft data center.
"""
def __init__(__self__, *,
carrier_name: str,
drive_count: int,
ship_date: str,
tracking_number: str):
"""
Contains information about the package being shipped by the customer to the Microsoft data center.
:param str carrier_name: The name of the carrier that is used to ship the import or export drives.
:param int drive_count: The number of drives included in the package.
:param str ship_date: The date when the package is shipped.
:param str tracking_number: The tracking number of the package.
"""
pulumi.set(__self__, "carrier_name", carrier_name)
pulumi.set(__self__, "drive_count", drive_count)
pulumi.set(__self__, "ship_date", ship_date)
pulumi.set(__self__, "tracking_number", tracking_number)
@property
@pulumi.getter(name="carrierName")
def carrier_name(self) -> str:
"""
The name of the carrier that is used to ship the import or export drives.
"""
return pulumi.get(self, "carrier_name")
@property
@pulumi.getter(name="driveCount")
def drive_count(self) -> int:
"""
The number of drives included in the package.
"""
return pulumi.get(self, "drive_count")
@property
@pulumi.getter(name="shipDate")
def ship_date(self) -> str:
"""
The date when the package is shipped.
"""
return pulumi.get(self, "ship_date")
@property
@pulumi.getter(name="trackingNumber")
def tracking_number(self) -> str:
"""
The tracking number of the package.
"""
return pulumi.get(self, "tracking_number")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ReturnAddressResponse(dict):
"""
Specifies the return address information for the job.
"""
def __init__(__self__, *,
city: str,
country_or_region: str,
email: str,
phone: str,
postal_code: str,
recipient_name: str,
street_address1: str,
state_or_province: Optional[str] = None,
street_address2: Optional[str] = None):
"""
Specifies the return address information for the job.
:param str city: The city name to use when returning the drives.
:param str country_or_region: The country or region to use when returning the drives.
:param str email: Email address of the recipient of the returned drives.
:param str phone: Phone number of the recipient of the returned drives.
:param str postal_code: The postal code to use when returning the drives.
:param str recipient_name: The name of the recipient who will receive the hard drives when they are returned.
:param str street_address1: The first line of the street address to use when returning the drives.
:param str state_or_province: The state or province to use when returning the drives.
:param str street_address2: The second line of the street address to use when returning the drives.
"""
pulumi.set(__self__, "city", city)
pulumi.set(__self__, "country_or_region", country_or_region)
pulumi.set(__self__, "email", email)
pulumi.set(__self__, "phone", phone)
pulumi.set(__self__, "postal_code", postal_code)
pulumi.set(__self__, "recipient_name", recipient_name)
pulumi.set(__self__, "street_address1", street_address1)
if state_or_province is not None:
pulumi.set(__self__, "state_or_province", state_or_province)
if street_address2 is not None:
pulumi.set(__self__, "street_address2", street_address2)
@property
@pulumi.getter
def city(self) -> str:
"""
The city name to use when returning the drives.
"""
return pulumi.get(self, "city")
@property
@pulumi.getter(name="countryOrRegion")
def country_or_region(self) -> str:
"""
The country or region to use when returning the drives.
"""
return pulumi.get(self, "country_or_region")
@property
@pulumi.getter
def email(self) -> str:
"""
Email address of the recipient of the returned drives.
"""
return pulumi.get(self, "email")
@property
@pulumi.getter
def phone(self) -> str:
"""
Phone number of the recipient of the returned drives.
"""
return pulumi.get(self, "phone")
@property
@pulumi.getter(name="postalCode")
def postal_code(self) -> str:
"""
The postal code to use when returning the drives.
"""
return pulumi.get(self, "postal_code")
@property
@pulumi.getter(name="recipientName")
def recipient_name(self) -> str:
"""
The name of the recipient who will receive the hard drives when they are returned.
"""
return pulumi.get(self, "recipient_name")
@property
@pulumi.getter(name="streetAddress1")
def street_address1(self) -> str:
"""
The first line of the street address to use when returning the drives.
"""
return pulumi.get(self, "street_address1")
@property
@pulumi.getter(name="stateOrProvince")
def state_or_province(self) -> Optional[str]:
"""
The state or province to use when returning the drives.
"""
return pulumi.get(self, "state_or_province")
@property
@pulumi.getter(name="streetAddress2")
def street_address2(self) -> Optional[str]:
"""
The second line of the street address to use when returning the drives.
"""
return pulumi.get(self, "street_address2")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ReturnShippingResponse(dict):
"""
Specifies the return carrier and customer's account with the carrier.
"""
def __init__(__self__, *,
carrier_account_number: str,
carrier_name: str):
"""
Specifies the return carrier and customer's account with the carrier.
:param str carrier_account_number: The customer's account number with the carrier.
:param str carrier_name: The carrier's name.
"""
pulumi.set(__self__, "carrier_account_number", carrier_account_number)
pulumi.set(__self__, "carrier_name", carrier_name)
@property
@pulumi.getter(name="carrierAccountNumber")
def carrier_account_number(self) -> str:
"""
The customer's account number with the carrier.
"""
return pulumi.get(self, "carrier_account_number")
@property
@pulumi.getter(name="carrierName")
def carrier_name(self) -> str:
"""
The carrier's name.
"""
return pulumi.get(self, "carrier_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ShippingInformationResponse(dict):
"""
Contains information about the Microsoft datacenter to which the drives should be shipped.
"""
def __init__(__self__, *,
city: str,
country_or_region: str,
postal_code: str,
recipient_name: str,
state_or_province: str,
street_address1: str,
phone: Optional[str] = None,
street_address2: Optional[str] = None):
"""
Contains information about the Microsoft datacenter to which the drives should be shipped.
:param str city: The city name to use when returning the drives.
:param str country_or_region: The country or region to use when returning the drives.
:param str postal_code: The postal code to use when returning the drives.
:param str recipient_name: The name of the recipient who will receive the hard drives when they are returned.
:param str state_or_province: The state or province to use when returning the drives.
:param str street_address1: The first line of the street address to use when returning the drives.
:param str phone: Phone number of the recipient of the returned drives.
:param str street_address2: The second line of the street address to use when returning the drives.
"""
pulumi.set(__self__, "city", city)
pulumi.set(__self__, "country_or_region", country_or_region)
pulumi.set(__self__, "postal_code", postal_code)
pulumi.set(__self__, "recipient_name", recipient_name)
pulumi.set(__self__, "state_or_province", state_or_province)
pulumi.set(__self__, "street_address1", street_address1)
if phone is not None:
pulumi.set(__self__, "phone", phone)
if street_address2 is not None:
pulumi.set(__self__, "street_address2", street_address2)
@property
@pulumi.getter
def city(self) -> str:
"""
The city name to use when returning the drives.
"""
return pulumi.get(self, "city")
@property
@pulumi.getter(name="countryOrRegion")
def country_or_region(self) -> str:
"""
The country or region to use when returning the drives.
"""
return pulumi.get(self, "country_or_region")
@property
@pulumi.getter(name="postalCode")
def postal_code(self) -> str:
"""
The postal code to use when returning the drives.
"""
return pulumi.get(self, "postal_code")
@property
@pulumi.getter(name="recipientName")
def recipient_name(self) -> str:
"""
The name of the recipient who will receive the hard drives when they are returned.
"""
return pulumi.get(self, "recipient_name")
@property
@pulumi.getter(name="stateOrProvince")
def state_or_province(self) -> str:
"""
The state or province to use when returning the drives.
"""
return pulumi.get(self, "state_or_province")
@property
@pulumi.getter(name="streetAddress1")
def street_address1(self) -> str:
"""
The first line of the street address to use when returning the drives.
"""
return pulumi.get(self, "street_address1")
@property
@pulumi.getter
def phone(self) -> Optional[str]:
"""
Phone number of the recipient of the returned drives.
"""
return pulumi.get(self, "phone")
@property
@pulumi.getter(name="streetAddress2")
def street_address2(self) -> Optional[str]:
"""
The second line of the street address to use when returning the drives.
"""
return pulumi.get(self, "street_address2")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 40.611041
| 319
| 0.651157
|
78ec3c249f67bc2058141e704634f42967c3cc0d
| 653
|
py
|
Python
|
python/sorting/quicksort_test.py
|
giubueno/algorithms
|
950730befc4db7e85838e93d2bd4068abaa176a7
|
[
"MIT"
] | 2
|
2021-02-26T11:51:29.000Z
|
2021-02-26T11:51:43.000Z
|
python/sorting/quicksort_test.py
|
giubueno/algorithms
|
950730befc4db7e85838e93d2bd4068abaa176a7
|
[
"MIT"
] | null | null | null |
python/sorting/quicksort_test.py
|
giubueno/algorithms
|
950730befc4db7e85838e93d2bd4068abaa176a7
|
[
"MIT"
] | null | null | null |
import unittest
from Quicksort import sortArray
class TestStringMethods(unittest.TestCase):
def test_quicksort_simple(self):
self.assertListEqual(sortArray([1,3,5,0]), [0,1,3,5])
def test_quicksort_repeation(self):
self.assertListEqual(sortArray([0,3,5,0]), [0,0,3,5])
def test_quicksort_sorted(self):
self.assertListEqual(sortArray([0,1,2,3,4]), [0,1,2,3,4])
def test_quicksort_reverse(self):
self.assertListEqual(sortArray([4,3,2,1,0]), [0,1,2,3,4])
def test_quicksort_bin(self):
self.assertListEqual(sortArray([1,0,0,1,1]), [0,0,1,1,1])
if __name__ == '__main__':
unittest.main()
| 29.681818
| 65
| 0.669219
|
ceee26367273e233da6b1a8de4405aa732359f5a
| 1,409
|
py
|
Python
|
datastore/shared/util/__init__.py
|
jsangmeister/openslides-datastore-service
|
7170f008ccac0b31c37ffeee083b972bc314660d
|
[
"MIT"
] | 2
|
2020-01-20T13:56:28.000Z
|
2020-02-17T10:56:26.000Z
|
datastore/shared/util/__init__.py
|
jsangmeister/openslides-datastore-service
|
7170f008ccac0b31c37ffeee083b972bc314660d
|
[
"MIT"
] | 122
|
2020-01-16T15:13:37.000Z
|
2022-03-17T10:32:47.000Z
|
datastore/shared/util/__init__.py
|
jsangmeister/openslides-datastore-service
|
7170f008ccac0b31c37ffeee083b972bc314660d
|
[
"MIT"
] | 7
|
2020-02-20T12:04:17.000Z
|
2021-11-23T17:54:33.000Z
|
from ..typing import JSON, Collection, Field, Fqid, Id, Model, Position # noqa
from .deleted_models_behaviour import ( # noqa
DeletedModelsBehaviour,
get_exception_for_deleted_models_behaviour,
)
from .exceptions import ( # noqa
BadCodingError,
DatastoreException,
DatastoreNotEmpty,
InvalidDatastoreState,
InvalidFormat,
ModelDoesNotExist,
ModelExists,
ModelLocked,
ModelNotDeleted,
)
from .filter import ( # noqa
And,
Filter,
FilterOperator,
Not,
Or,
filter_definitions_schema,
)
from .key_strings import ( # noqa
KEYSEPARATOR,
META_DELETED,
META_FIELD_PREFIX,
META_POSITION,
is_reserved_field,
strip_reserved_fields,
)
from .key_transforms import ( # noqa
collection_and_id_from_fqid,
collection_from_collectionfield,
collection_from_fqid,
collectionfield_and_fqid_from_fqfield,
collectionfield_from_fqid_and_field,
field_from_collectionfield,
fqfield_from_fqid_and_field,
fqid_from_collection_and_id,
id_from_fqid,
)
from .key_types import ( # noqa
KEY_TYPE,
InvalidKeyFormat,
assert_is_collection,
assert_is_collectionfield,
assert_is_field,
assert_is_fqfield,
assert_is_fqid,
assert_is_id,
assert_string,
get_key_type,
)
from .logging import logger # noqa
from .self_validating_dataclass import SelfValidatingDataclass # noqa
| 24.293103
| 79
| 0.741661
|
7e9a70a8f79e27679402473b54012b84c6e87357
| 1,725
|
py
|
Python
|
backend-tests/tests/api/deviceauth.py
|
spockfish/mender-integration
|
09441e4ec88c837605aa12d70db4faa8f1a16b59
|
[
"Apache-2.0"
] | null | null | null |
backend-tests/tests/api/deviceauth.py
|
spockfish/mender-integration
|
09441e4ec88c837605aa12d70db4faa8f1a16b59
|
[
"Apache-2.0"
] | null | null | null |
backend-tests/tests/api/deviceauth.py
|
spockfish/mender-integration
|
09441e4ec88c837605aa12d70db4faa8f1a16b59
|
[
"Apache-2.0"
] | 1
|
2019-05-10T14:25:13.000Z
|
2019-05-10T14:25:13.000Z
|
# Copyright 2018 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA256
from base64 import b64encode, urlsafe_b64decode, urlsafe_b64encode
import json
import api.client
URL_MGMT = api.client.GATEWAY_URL + '/api/management/v1/devauth'
URL_DEVICES = api.client.GATEWAY_URL + '/api/devices/v1/authentication'
URL_LIST_DEVICES = '/devices'
URL_AUTH_REQS = '/auth_requests'
def auth_req(id_data, pubkey, privkey, tenant_token=''):
payload = {
"id_data": json.dumps(id_data),
"tenant_token": tenant_token,
"pubkey": pubkey,
}
signature = sign_data(json.dumps(payload), privkey)
return payload, {'X-MEN-Signature': signature}
def get_keypair():
private = RSA.generate(1024)
public = private.publickey()
return private.exportKey().decode(), public.exportKey().decode()
def sign_data(data, privkey):
rsakey = RSA.importKey(privkey)
signer = PKCS1_v1_5.new(rsakey)
digest = SHA256.new()
if type(data) is str:
data = data.encode()
digest.update(data)
sign = signer.sign(digest)
return b64encode(sign)
| 33.173077
| 77
| 0.717101
|
e94ef6a926e4f152f4b62d0b6c4e3cb379ebd6bc
| 3,065
|
py
|
Python
|
graphviz/lang.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1
|
2022-01-25T22:52:58.000Z
|
2022-01-25T22:52:58.000Z
|
graphviz/lang.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
graphviz/lang.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
# lang.py - dot language creation helpers
"""Quote strings to be valid DOT identifiers, assemble attribute lists."""
import re
from . import tools
__all__ = ['quote', 'quote_edge', 'a_list', 'attr_list']
# http://www.graphviz.org/doc/info/lang.html
ID = re.compile(r'([a-zA-Z_][a-zA-Z0-9_]*|-?(\.\d+|\d+(\.\d*)?))$')
KEYWORD = re.compile(r'((node)|(edge)|(graph)|(digraph)|(subgraph)|(strict))$', re.IGNORECASE)
HTML_STRING = re.compile(r'<.*>$', re.DOTALL)
COMPASS = re.compile(r'((n)|(ne)|(e)|(se)|(s)|(sw)|(w)|(nw)|(c)|(_))$')
def quote(identifier,
valid_id=ID.match, dot_keyword=KEYWORD.match, html=HTML_STRING.match):
"""Return DOT identifier from string, quote if needed.
>>> quote('')
'""'
>>> quote('spam')
'spam'
>>> quote('spam spam')
'"spam spam"'
>>> quote('-4.2')
'-4.2'
>>> quote('.42')
'.42'
>>> quote('<<b>spam</b>>')
'<<b>spam</b>>'
"""
if html(identifier):
pass
elif not valid_id(identifier) or dot_keyword(identifier):
return '"%s"' % identifier.replace('"', '\\"')
return identifier
def quote_edge(identifier):
"""Return DOT edge statement node_id from string, quote if needed.
>>> quote_edge('spam')
'spam'
>>> quote_edge('spam spam:eggs eggs')
'"spam spam":"eggs eggs"'
>>> quote_edge('spam:eggs:s')
'spam:eggs:s'
"""
node, _, rest = identifier.partition(':')
parts = [quote(node)]
if rest:
port, _, compass = rest.partition(':')
parts.append(quote(port))
if compass:
parts.append(compass)
return ':'.join(parts)
def a_list(label=None, kwargs=None, attributes=None):
"""Return assembled DOT a_list string.
>>> a_list('spam', {'spam': None, 'ham': 'ham ham', 'eggs': ''})
'label=spam eggs="" ham="ham ham"'
"""
result = ['label=%s' % quote(label)] if label is not None else []
if kwargs:
items = ['%s=%s' % (quote(k), quote(v))
for k, v in tools.mapping_items(kwargs) if v is not None]
result.extend(items)
if attributes:
if hasattr(attributes, 'items'):
attributes = tools.mapping_items(attributes)
items = ['%s=%s' % (quote(k), quote(v))
for k, v in attributes if v is not None]
result.extend(items)
return ' '.join(result)
def attr_list(label=None, kwargs=None, attributes=None):
"""Return assembled DOT attribute list string.
Sorts kwargs and attributes if they are plain dicts (to avoid
unpredictable order from hash randomization in Python 3 versions).
>>> attr_list()
''
>>> attr_list('spam spam', kwargs={'eggs': 'eggs', 'ham': 'ham ham'})
' [label="spam spam" eggs=eggs ham="ham ham"]'
>>> attr_list(kwargs={'spam': None, 'eggs': ''})
' [eggs=""]'
"""
content = a_list(label, kwargs, attributes)
if not content:
return ''
return ' [%s]' % content
| 27.366071
| 95
| 0.555628
|
901c98564713e19b7ad2c3415461ff46eae08ecc
| 8,664
|
py
|
Python
|
python/cusignal/convolution/correlate.py
|
sean-frye/cusignal
|
5e12771ca47e7ee653ebe79b236f86ce428ace84
|
[
"Apache-2.0"
] | null | null | null |
python/cusignal/convolution/correlate.py
|
sean-frye/cusignal
|
5e12771ca47e7ee653ebe79b236f86ce428ace84
|
[
"Apache-2.0"
] | null | null | null |
python/cusignal/convolution/correlate.py
|
sean-frye/cusignal
|
5e12771ca47e7ee653ebe79b236f86ce428ace84
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cupy as cp
from . import _convolution_cuda
from .convolve import convolve
from .convolution_utils import _reverse_and_conj, _inputs_swap_needed
_modedict = {"valid": 0, "same": 1, "full": 2}
def correlate(
in1, in2, mode="full", method="auto",
):
r"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
method : str {'auto', 'direct', 'fft'}, optional
A string indicating which method to use to calculate the correlation.
``direct``
The correlation is determined directly from sums, the definition of
correlation.
``fft``
The Fast Fourier Transform is used to perform the correlation more
quickly (only available for numerical arrays.)
``auto``
Automatically chooses direct or Fourier method based on an estimate
of which is faster (default). See `convolve` Notes for more detail.
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
See Also
--------
choose_conv_method : contains more documentation on `method`.
Notes
-----
The correlation z of two d-dimensional arrays x and y is defined as::
z[...,k,...] =
sum[..., i_l, ...] x[..., i_l,...] * conj(y[..., i_l - k,...])
This way, if x and y are 1-D arrays and ``z = correlate(x, y, 'full')``
then
.. math::
z[k] = (x * y)(k - N + 1)
= \sum_{l=0}^{||x||-1}x_l y_{l-k+N-1}^{*}
for :math:`k = 0, 1, ..., ||x|| + ||y|| - 2`
where :math:`||x||` is the length of ``x``, :math:`N = \max(||x||,||y||)`,
and :math:`y_m` is 0 when m is outside the range of y.
``method='fft'`` only works for numerical arrays as it relies on
`fftconvolve`. In certain cases (i.e., arrays of objects or when
rounding integers can lose precision), ``method='direct'`` is always used.
Examples
--------
Implement a matched filter using cross-correlation, to recover a signal
that has passed through a noisy channel.
>>> import cusignal
>>> import cupy as cp
>>> sig = cp.repeat([0., 1., 1., 0., 1., 0., 0., 1.], 128)
>>> sig_noise = sig + cp.random.randn(len(sig))
>>> corr = cusignal.correlate(sig_noise, cp.ones(128), mode='same') / 128
>>> import matplotlib.pyplot as plt
>>> clock = cp.arange(64, len(sig), 128)
>>> fig, (cp.asnumpy(ax_orig), cp.asnumpy(ax_noise), \
cp.asnumpy(ax_corr)) = plt.subplots(3, 1, sharex=True)
>>> ax_orig.plot(sig)
>>> ax_orig.plot(clock, sig[clock], 'ro')
>>> ax_orig.set_title('Original signal')
>>> ax_noise.plot(cp.asnumpy(sig_noise))
>>> ax_noise.set_title('Signal with noise')
>>> ax_corr.plot(cp.asnumpy(corr))
>>> ax_corr.plot(cp.asnumpy(clock), cp.asnumpy(corr[clock]), 'ro')
>>> ax_corr.axhline(0.5, ls=':')
>>> ax_corr.set_title('Cross-correlated with rectangular pulse')
>>> ax_orig.margins(0, 0.1)
>>> fig.tight_layout()
>>> fig.show()
"""
in1 = cp.asarray(in1)
in2 = cp.asarray(in2)
if in1.ndim == in2.ndim == 0:
return in1 * in2.conj()
elif in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
# this either calls fftconvolve or this function with method=='direct'
if method in ("fft", "auto"):
return convolve(in1, _reverse_and_conj(in2), mode, method)
elif method == "direct":
if in1.ndim > 1:
raise ValueError("Direct method is only implemented for 1D")
swapped_inputs = in2.size > in1.size
if swapped_inputs:
in1, in2 = in2, in1
return _convolution_cuda._convolve(
in1, in2, False, swapped_inputs, mode
)
else:
raise ValueError(
"Acceptable method flags are 'auto'," " 'direct', or 'fft'."
)
def correlate2d(
in1, in2, mode="full", boundary="fill", fillvalue=0,
):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
must be at least as large as the other in every dimension.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Examples
--------
Use 2D cross-correlation to find the location of a template in a noisy
image:
>>> import cusignal
>>> import cupy as cp
>>> from scipy import misc
>>> face = cp.asarray(misc.face(gray=True) - misc.face(gray=True).mean())
>>> template = cp.copy(face[300:365, 670:750]) # right eye
>>> template -= template.mean()
>>> face = face + cp.random.randn(*face.shape) * 50 # add noise
>>> corr = cusignal.correlate2d(face, template, boundary='symm', \
mode='same')
>>> y, x = cp.unravel_index(cp.argmax(corr), corr.shape) # find the match
>>> import matplotlib.pyplot as plt
>>> fig, (cp.asnumpy(ax_orig), cp.asnumpy(ax_template), \
cp.asnumpy(ax_corr)) = plt.subplots(3, 1, figsize=(6, 15))
>>> ax_orig.imshow(cp.asnumpy(face), cmap='gray')
>>> ax_orig.set_title('Original')
>>> ax_orig.set_axis_off()
>>> ax_template.imshow(cp.asnumpy(template), cmap='gray')
>>> ax_template.set_title('Template')
>>> ax_template.set_axis_off()
>>> ax_corr.imshow(cp.asnumpy(corr), cmap='gray')
>>> ax_corr.set_title('Cross-correlation')
>>> ax_corr.set_axis_off()
>>> ax_orig.plot(cp.asnumpy(x), cp.asnumpy(y), 'ro')
>>> fig.show()
"""
in1 = cp.asarray(in1)
in2 = cp.asarray(in2)
if not in1.ndim == in2.ndim == 2:
raise ValueError("correlate2d inputs must both be 2D arrays")
swapped_inputs = _inputs_swap_needed(mode, in1.shape, in2.shape)
if swapped_inputs:
in1, in2 = in2, in1
out = _convolution_cuda._convolve2d(
in1, in2.conj(), 0, mode, boundary, fillvalue,
)
if swapped_inputs:
out = out[::-1, ::-1]
return out
| 34.245059
| 79
| 0.610688
|
e756467804095e36c04994392f239e08007f49cf
| 331
|
py
|
Python
|
own_practice/one_eleven.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | null | null | null |
own_practice/one_eleven.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | 4
|
2019-11-07T12:32:19.000Z
|
2020-07-19T14:04:44.000Z
|
own_practice/one_eleven.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | 5
|
2019-12-04T15:56:55.000Z
|
2022-01-14T06:19:18.000Z
|
u"""
程式設計練習題 1-6 1-11 人口推算.
美國人口普查局根據以下假設來推算人口:
- 每7秒中有一個小孩出生
- 每13秒鐘有一個人死亡
- 每45秒鐘有一個新移民入境
請撰寫一程式,顯示接下來五年的人口數。假設目前人口為312,032,486,每年有365天。
提示:在Python中,如果兩個整數做除法運算,結果還是整數。小數點會被去掉。比方說,5//4結果會是1
(而不是1.25),而10//4結果會是2(而不是2.5)。
"""
print((312032486 + (365 * 24 * 3600) // 7 - (365 * 24 * 3600) // 13 + (
365 * 24 * 3600) // 45))
| 20.6875
| 71
| 0.664653
|
4363d545194d4331bed7de71c0b6e6f8c4d9718e
| 1,805
|
py
|
Python
|
FeiZhai/OK/ZiXun/dongmanzixun/dongmanzixun/spiders/dongzi.py
|
FSen0/FeiZhai
|
5fa635551066a1ba2866b345b39ecf13ef070103
|
[
"Apache-2.0"
] | null | null | null |
FeiZhai/OK/ZiXun/dongmanzixun/dongmanzixun/spiders/dongzi.py
|
FSen0/FeiZhai
|
5fa635551066a1ba2866b345b39ecf13ef070103
|
[
"Apache-2.0"
] | null | null | null |
FeiZhai/OK/ZiXun/dongmanzixun/dongmanzixun/spiders/dongzi.py
|
FSen0/FeiZhai
|
5fa635551066a1ba2866b345b39ecf13ef070103
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
from dongmanzixun.items import DongmanzixunItem
class DongziSpider(scrapy.Spider):
name = 'dongzi'
allowed_domains = ['news.comicst.com']
start_urls = ['http://news.comicst.com/']
page = 1
url = 'http://news.comicst.com/index.php?page={}'
def parse(self, response):
#创建一个item对象
item = DongmanzixunItem()
#首先找到所有的资讯li /div[3]/div[1]/text():所有标题
li_list = response.xpath('//dl[@class="bbdayy cl"]')
#遍历
for li in li_list:
#找到简介
dong_jian = response.xpath('//dd[@class="xs2 cl"]/a/text()').extract_first()
#资讯主页到详情页的链接
u = li.xpath('./dt/a/@href').extract_first()
yield scrapy.Request(url=u,callback=self.parse_detail,meta={'item':item})
if self.page < 5:
self.page += 1
hou_url = self.url.format(self.page)
yield scrapy.Request(url=hou_url,callback=self.parse)
def parse_detail(self,response):
#传递过来的item
item = response.meta['item']
#资讯类别
item['dong_type'] = '1'
# 资讯主页图片
# item['dong_first_url'] = response.xpath('./div[2]/a/img/@src').extract_first()
#资讯标题
item['title'] = response.xpath('//div[@class="h hm"]/h1[1]/text()').extract_first()
#资讯时间
item['dong_tiem'] = response.xpath('//div[@class="h hm"]/p/text()').extract_first()
#资讯作者
item['dong_author'] = response.xpath('//div[@class="h hm"]/p/a/text()').extract_first()
#资讯内容
item['dong_content'] = response.xpath('//td[@id="article_content"]//text()').extract()
#资讯页图片url
item['dong_url'] = response.xpath('//td[@id="article_content"]/p[2]/font/img/@src').extract_first()
yield item
| 35.392157
| 107
| 0.573407
|
cd5af835269313f16b34f389955041a7ae7e45d5
| 41,423
|
py
|
Python
|
tensorflow/python/framework/tensor_util.py
|
TOT0RoKR/tensorflow
|
12c2babf7dccc00c13d6e297c0f792f89f7408aa
|
[
"Apache-2.0"
] | 10
|
2021-05-25T17:43:04.000Z
|
2022-03-08T10:46:09.000Z
|
tensorflow/python/framework/tensor_util.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 1,056
|
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/framework/tensor_util.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 6
|
2016-09-07T04:00:15.000Z
|
2022-01-12T01:47:38.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to create TensorProtos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.types import core
from tensorflow.python.types import internal
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
# Fallback in case fast_tensor_util is not properly compiled.
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.framework import fast_tensor_util
_FAST_TENSOR_UTIL_AVAILABLE = True
except ImportError:
_FAST_TENSOR_UTIL_AVAILABLE = False
# pylint: enable=g-import-not-at-top
def ExtractBitsFromFloat16(x):
return np.asarray(x, dtype=np.float16).view(np.uint16).item()
def SlowAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend(
[ExtractBitsFromFloat16(x) for x in proto_values])
def _MediumAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
# TODO: Remove the conversion if cython supports np.float16_t
fast_tensor_util.AppendFloat16ArrayToTensorProto(
tensor_proto,
np.asarray(proto_values, dtype=np.float16).view(np.uint16))
def ExtractBitsFromBFloat16(x):
return np.asarray(
x, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16).item()
def SlowAppendBFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend(
[ExtractBitsFromBFloat16(x) for x in proto_values])
def FastAppendBFloat16ArrayToTensorProto(tensor_proto, proto_values):
fast_tensor_util.AppendBFloat16ArrayToTensorProto(
tensor_proto, np.asarray(
proto_values, dtype=dtypes.bfloat16.as_numpy_dtype).view(np.uint16))
if _FAST_TENSOR_UTIL_AVAILABLE:
_NP_TO_APPEND_FN = {
dtypes.bfloat16.as_numpy_dtype:
FastAppendBFloat16ArrayToTensorProto,
np.float16:
_MediumAppendFloat16ArrayToTensorProto,
np.float32:
fast_tensor_util.AppendFloat32ArrayToTensorProto,
np.float64:
fast_tensor_util.AppendFloat64ArrayToTensorProto,
np.int32:
fast_tensor_util.AppendInt32ArrayToTensorProto,
np.int64:
fast_tensor_util.AppendInt64ArrayToTensorProto,
np.uint8:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
np.uint16:
fast_tensor_util.AppendUInt16ArrayToTensorProto,
np.uint32:
fast_tensor_util.AppendUInt32ArrayToTensorProto,
np.uint64:
fast_tensor_util.AppendUInt64ArrayToTensorProto,
np.int8:
fast_tensor_util.AppendInt8ArrayToTensorProto,
np.int16:
fast_tensor_util.AppendInt16ArrayToTensorProto,
np.complex64:
fast_tensor_util.AppendComplex64ArrayToTensorProto,
np.complex128:
fast_tensor_util.AppendComplex128ArrayToTensorProto,
np.object:
fast_tensor_util.AppendObjectArrayToTensorProto,
np.bool:
fast_tensor_util.AppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint8.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint16.as_numpy_dtype:
fast_tensor_util.AppendInt16ArrayToTensorProto,
dtypes.quint16.as_numpy_dtype:
fast_tensor_util.AppendUInt16ArrayToTensorProto,
dtypes.qint32.as_numpy_dtype:
fast_tensor_util.AppendInt32ArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
else:
def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.float_val.extend([x.item() for x in proto_values])
def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.double_val.extend([x.item() for x in proto_values])
def SlowAppendIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([x.item() for x in proto_values])
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int64_val.extend([x.item() for x in proto_values])
def SlowAppendQIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([x.item()[0] for x in proto_values])
def SlowAppendUInt32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.uint32_val.extend([x.item() for x in proto_values])
def SlowAppendUInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.uint64_val.extend([x.item() for x in proto_values])
def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.scomplex_val.extend(
[v.item() for x in proto_values for v in [x.real, x.imag]])
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.dcomplex_val.extend(
[v.item() for x in proto_values for v in [x.real, x.imag]])
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.bool_val.extend([x.item() for x in proto_values])
_NP_TO_APPEND_FN = {
dtypes.bfloat16.as_numpy_dtype: SlowAppendBFloat16ArrayToTensorProto,
np.float16: SlowAppendFloat16ArrayToTensorProto,
np.float32: SlowAppendFloat32ArrayToTensorProto,
np.float64: SlowAppendFloat64ArrayToTensorProto,
np.int32: SlowAppendIntArrayToTensorProto,
np.int64: SlowAppendInt64ArrayToTensorProto,
np.uint8: SlowAppendIntArrayToTensorProto,
np.uint16: SlowAppendIntArrayToTensorProto,
np.uint32: SlowAppendUInt32ArrayToTensorProto,
np.uint64: SlowAppendUInt64ArrayToTensorProto,
np.int8: SlowAppendIntArrayToTensorProto,
np.int16: SlowAppendIntArrayToTensorProto,
np.complex64: SlowAppendComplex64ArrayToTensorProto,
np.complex128: SlowAppendComplex128ArrayToTensorProto,
np.object: SlowAppendObjectArrayToTensorProto,
np.bool: SlowAppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint32.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
def GetFromNumpyDTypeDict(dtype_dict, dtype):
# NOTE: dtype_dict.get(dtype) always returns None.
for key, val in six.iteritems(dtype_dict):
if key == dtype:
return val
return None
def GetNumpyAppendFn(dtype):
# numpy dtype for strings are variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if dtype.type == np.string_ or dtype.type == np.unicode_:
if _FAST_TENSOR_UTIL_AVAILABLE:
return fast_tensor_util.AppendObjectArrayToTensorProto
else:
return SlowAppendObjectArrayToTensorProto
return GetFromNumpyDTypeDict(_NP_TO_APPEND_FN, dtype)
def TensorShapeProtoToList(shape):
"""Convert a TensorShape to a list.
Args:
shape: A TensorShapeProto.
Returns:
List of integers representing the dimensions of the tensor.
"""
return [dim.size for dim in shape.dim]
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
def _FlattenToStrings(nested_strings):
if isinstance(nested_strings, (list, tuple)):
for inner in nested_strings:
for flattened_string in _FlattenToStrings(inner):
yield flattened_string
else:
yield nested_strings
_TENSOR_CONTENT_TYPES = frozenset([
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8,
dtypes.int16, dtypes.int8, dtypes.int64, dtypes.qint8, dtypes.quint8,
dtypes.qint16, dtypes.quint16, dtypes.qint32, dtypes.uint32, dtypes.uint64
])
# pylint: disable=invalid-name
def _check_failed(v):
# NB. none of the _check_* functions could raise a ValueError, so
# it is safe to use here.
raise ValueError(v)
def _check_quantized(values):
# Cannot rely on `nest` because the leaves are tuples.
if not isinstance(values, (list, tuple)):
_check_failed(values)
if isinstance(values, tuple):
_ = [_check_int(v) for v in values]
else:
_ = [_check_quantized(v) for v in values]
def _generate_isinstance_check(expected_types):
def inner(values):
for v in nest.flatten(values):
if not (isinstance(v, expected_types) or
(isinstance(v, np.ndarray) and
issubclass(v.dtype.type, expected_types))):
_check_failed(v)
return inner
_check_int = _generate_isinstance_check(
(compat.integral_types, tensor_shape.Dimension))
_check_float = _generate_isinstance_check(compat.real_types)
_check_complex = _generate_isinstance_check(compat.complex_types)
_check_str = _generate_isinstance_check(compat.bytes_or_text_types)
_check_bool = _generate_isinstance_check(bool)
def _check_not_tensor(values):
_ = [_check_failed(v) for v in nest.flatten(values)
if isinstance(v, ops.Tensor)]
# pylint: enable=invalid-name
_TF_TO_IS_OK = {
dtypes.bool: _check_bool,
dtypes.complex128: _check_complex,
dtypes.complex64: _check_complex,
dtypes.float16: _check_float,
dtypes.float32: _check_float,
dtypes.float64: _check_float,
dtypes.int16: _check_int,
dtypes.int32: _check_int,
dtypes.int64: _check_int,
dtypes.int8: _check_int,
dtypes.qint16: _check_quantized,
dtypes.qint32: _check_quantized,
dtypes.qint8: _check_quantized,
dtypes.quint16: _check_quantized,
dtypes.quint8: _check_quantized,
dtypes.string: _check_str,
dtypes.uint16: _check_int,
dtypes.uint8: _check_int,
dtypes.uint32: _check_int,
dtypes.uint64: _check_int,
}
def _AssertCompatible(values, dtype):
if dtype is None:
fn = _check_not_tensor
else:
try:
fn = _TF_TO_IS_OK[dtype]
except KeyError:
# There isn't a specific fn, so we try to do the best possible.
if dtype.is_integer:
fn = _check_int
elif dtype.is_floating:
fn = _check_float
elif dtype.is_complex:
fn = _check_complex
elif dtype.is_quantized:
fn = _check_quantized
else:
fn = _check_not_tensor
try:
fn(values)
except ValueError as e:
[mismatch] = e.args
if dtype is None:
raise TypeError("Expected any non-tensor type, got a tensor instead.")
else:
raise TypeError("Expected %s, got %s of type '%s' instead." %
(dtype.name, repr(mismatch), type(mismatch).__name__))
def _is_array_like(obj): # pylint: disable=invalid-name
"""Check if a given object is array-like."""
if isinstance(obj, ops.Tensor) and not isinstance(obj, ops._EagerTensorBase): # pylint: disable=protected-access
# Tensor implements __array__ only so it can inform the user that it is not
# a valid array.
return False
# TODO(slebedev): an object could also implement C-level array interface.
if (callable(getattr(obj, "__array__", None)) or
isinstance(getattr(obj, "__array_interface__", None), dict)):
return True
try:
memoryview(obj)
except TypeError:
return False
else:
return not isinstance(obj, bytes)
# pylint: disable=invalid-name
@tf_export("make_tensor_proto")
def make_tensor_proto(values, dtype=None, shape=None, verify_shape=False,
allow_broadcast=False):
"""Create a TensorProto.
In TensorFlow 2.0, representing tensors as protos should no longer be a
common workflow. That said, this utility function is still useful for
generating TF Serving request protos:
```python
request = tensorflow_serving.apis.predict_pb2.PredictRequest()
request.model_spec.name = "my_model"
request.model_spec.signature_name = "serving_default"
request.inputs["images"].CopyFrom(tf.make_tensor_proto(X_new))
```
`make_tensor_proto` accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a compatible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto-converted) must have the compatible type with dtype.
`make_tensor_proto` then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
verify_shape: Boolean that enables verification of a shape of values.
allow_broadcast: Boolean that enables allowing scalars and 1 length vector
broadcasting. Cannot be true when verify_shape is true.
Returns:
A `TensorProto`. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with `tf.make_ndarray(proto)`.
If `values` is a `TensorProto`, it is immediately returned; `dtype` and
`shape` are ignored.
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values or if verify_shape is
True and shape of values is not equals to a shape from the argument.
"""
if allow_broadcast and verify_shape:
raise ValueError("allow_broadcast and verify_shape are not both allowed.")
if isinstance(values, tensor_pb2.TensorProto):
return values
if dtype:
dtype = dtypes.as_dtype(dtype)
is_quantized = (
dtype in [
dtypes.qint8, dtypes.quint8, dtypes.qint16, dtypes.quint16,
dtypes.qint32
])
if _is_array_like(values):
values = np.asarray(values)
# We first convert value to a numpy array or scalar.
if isinstance(values, (np.ndarray, np.generic)):
if dtype and dtype.is_numpy_compatible:
nparray = values.astype(dtype.as_numpy_dtype)
else:
nparray = values
else:
if values is None:
raise ValueError("None values not supported.")
# if dtype is provided, forces numpy array to be the type
# provided if possible.
if dtype and dtype.is_numpy_compatible:
np_dt = dtype.as_numpy_dtype
else:
np_dt = None
# If shape is None, numpy.prod returns None when dtype is not set, but
# raises exception when dtype is set to np.int64
if shape is not None and np.prod(shape, dtype=np.int64) == 0:
nparray = np.empty(shape, dtype=np_dt)
else:
_AssertCompatible(values, dtype)
nparray = np.array(values, dtype=np_dt)
# check to them.
# We need to pass in quantized values as tuples, so don't apply the shape
if (list(nparray.shape) != _GetDenseDimensions(values) and
not is_quantized):
raise ValueError("""Argument must be a dense tensor: %s"""
""" - got shape %s, but wanted %s.""" %
(values, list(nparray.shape),
_GetDenseDimensions(values)))
# python/numpy default float type is float64. We prefer float32 instead.
if (nparray.dtype == np.float64) and dtype is None:
nparray = nparray.astype(np.float32)
# python/numpy default int type is int64. We prefer int32 instead.
elif (nparray.dtype == np.int64) and dtype is None:
downcasted_array = nparray.astype(np.int32)
# Do not down cast if it leads to precision loss.
if np.array_equal(downcasted_array, nparray):
nparray = downcasted_array
# if dtype is provided, it must be compatible with what numpy
# conversion says.
numpy_dtype = dtypes.as_dtype(nparray.dtype)
if numpy_dtype is None:
raise TypeError("Unrecognized data type: %s" % nparray.dtype)
# If dtype was specified and is a quantized type, we convert
# numpy_dtype back into the quantized version.
if is_quantized:
numpy_dtype = dtype
if dtype is not None and (not hasattr(dtype, "base_dtype") or
dtype.base_dtype != numpy_dtype.base_dtype):
raise TypeError("Incompatible types: %s vs. %s. Value is %s" %
(dtype, nparray.dtype, values))
# If shape is not given, get the shape from the numpy array.
if shape is None:
shape = nparray.shape
is_same_size = True
shape_size = nparray.size
else:
shape = [int(dim) for dim in shape]
shape_size = np.prod(shape, dtype=np.int64)
is_same_size = shape_size == nparray.size
if allow_broadcast:
if nparray.shape == (1,) or nparray.shape == tuple():
pass
elif nparray.size != shape_size:
raise TypeError("Expected Tensor's shape: %s, got %s." %
(tuple(shape), nparray.shape))
else:
if verify_shape and nparray.shape != tuple(shape):
raise TypeError("Expected Tensor's shape: %s, got %s." %
(tuple(shape), nparray.shape))
if nparray.size > shape_size:
raise ValueError(
"Too many elements provided. Needed at most %d, but received %d" %
(shape_size, nparray.size))
tensor_proto = tensor_pb2.TensorProto(
dtype=numpy_dtype.as_datatype_enum,
tensor_shape=tensor_shape.as_shape(shape).as_proto())
if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
if nparray.size * nparray.itemsize >= (1 << 31):
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
tensor_proto.tensor_content = nparray.tobytes()
return tensor_proto
# If we were not given values as a numpy array, compute the proto_values
# from the given values directly, to avoid numpy trimming nulls from the
# strings. Since values could be a list of strings, or a multi-dimensional
# list of lists that might or might not correspond to the given shape,
# we flatten it conservatively.
if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
proto_values = _FlattenToStrings(values)
# At this point, values may be a list of objects that we could not
# identify a common type for (hence it was inferred as
# np.object/dtypes.string). If we are unable to convert it to a
# string, we raise a more helpful error message.
#
# Ideally, we'd be able to convert the elements of the list to a
# common type, but this type inference requires some thinking and
# so we defer it for now.
try:
str_values = [compat.as_bytes(x) for x in proto_values]
except TypeError:
raise TypeError("Failed to convert object of type %s to Tensor. "
"Contents: %s. Consider casting elements to a "
"supported type." % (type(values), values))
tensor_proto.string_val.extend(str_values)
return tensor_proto
# TensorFlow expects C order (a.k.a., eigen row major).
proto_values = nparray.ravel()
append_fn = GetNumpyAppendFn(proto_values.dtype)
if append_fn is None:
raise TypeError(
"Element type not supported in TensorProto: %s" % numpy_dtype.name)
append_fn(tensor_proto, proto_values)
return tensor_proto
# pylint: enable=invalid-name
@tf_export("make_ndarray")
def MakeNdarray(tensor):
"""Create a numpy ndarray from a tensor.
Create a numpy ndarray with the same shape and data as the tensor.
For example:
```python
# Tensor a has shape (2,3)
a = tf.constant([[1,2,3],[4,5,6]])
proto_tensor = tf.make_tensor_proto(a) # convert `tensor a` to a proto tensor
tf.make_ndarray(proto_tensor) # output: array([[1, 2, 3],
# [4, 5, 6]], dtype=int32)
# output has shape (2,3)
```
Args:
tensor: A TensorProto.
Returns:
A numpy array with the tensor contents.
Raises:
TypeError: if tensor has unsupported type.
"""
shape = [d.size for d in tensor.tensor_shape.dim]
num_elements = np.prod(shape, dtype=np.int64)
tensor_dtype = dtypes.as_dtype(tensor.dtype)
dtype = tensor_dtype.as_numpy_dtype
if tensor.tensor_content:
return (np.frombuffer(tensor.tensor_content,
dtype=dtype).copy().reshape(shape))
if tensor_dtype == dtypes.string:
# np.pad throws on these arrays of type np.object.
values = list(tensor.string_val)
padding = num_elements - len(values)
if padding > 0:
last = values[-1] if values else ""
values.extend([last] * padding)
return np.array(values, dtype=dtype).reshape(shape)
if tensor_dtype == dtypes.float16 or tensor_dtype == dtypes.bfloat16:
# the half_val field of the TensorProto stores the binary representation
# of the fp16: we need to reinterpret this as a proper float16
values = np.fromiter(tensor.half_val, dtype=np.uint16)
values.dtype = tensor_dtype.as_numpy_dtype
elif tensor_dtype == dtypes.float32:
values = np.fromiter(tensor.float_val, dtype=dtype)
elif tensor_dtype == dtypes.float64:
values = np.fromiter(tensor.double_val, dtype=dtype)
elif tensor_dtype in [
dtypes.int32, dtypes.uint8, dtypes.uint16, dtypes.int16, dtypes.int8,
dtypes.qint32, dtypes.quint8, dtypes.qint8, dtypes.qint16, dtypes.quint16
]:
values = np.fromiter(tensor.int_val, dtype=dtype)
elif tensor_dtype == dtypes.int64:
values = np.fromiter(tensor.int64_val, dtype=dtype)
elif tensor_dtype == dtypes.uint32:
values = np.fromiter(tensor.uint32_val, dtype=dtype)
elif tensor_dtype == dtypes.uint64:
values = np.fromiter(tensor.uint64_val, dtype=dtype)
elif tensor_dtype == dtypes.complex64:
it = iter(tensor.scomplex_val)
values = np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype)
elif tensor_dtype == dtypes.complex128:
it = iter(tensor.dcomplex_val)
values = np.array([complex(x[0], x[1]) for x in zip(it, it)], dtype=dtype)
elif tensor_dtype == dtypes.bool:
values = np.fromiter(tensor.bool_val, dtype=dtype)
else:
raise TypeError("Unsupported tensor type: %s" % tensor.dtype)
if values.size == 0:
return np.zeros(shape, dtype)
if values.size != num_elements:
values = np.pad(values, (0, num_elements - values.size), "edge")
return values.reshape(shape)
def ShapeEquals(tensor_proto, shape):
"""Returns True if "tensor_proto" has the given "shape".
Args:
tensor_proto: A TensorProto.
shape: A tensor shape, expressed as a TensorShape, list, or tuple.
Returns:
True if "tensor_proto" has the given "shape", otherwise False.
Raises:
TypeError: If "tensor_proto" is not a TensorProto, or shape is not a
TensorShape, list, or tuple.
"""
if not isinstance(tensor_proto, tensor_pb2.TensorProto):
raise TypeError("tensor_proto is not a tensor_pb2.TensorProto object")
if isinstance(shape, tensor_shape_pb2.TensorShapeProto):
shape = [d.size for d in shape.dim]
elif not isinstance(shape, (list, tuple)):
raise TypeError("shape is not a list or tuple")
tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]
return all(x == y for x, y in zip(tensor_shape_list, shape))
def _ConstantValue(tensor, partial):
# TODO(touts): Support Variables?
if not isinstance(tensor, ops.Tensor):
raise TypeError("%r is not a Tensor, has type %s" % (tensor, type(tensor)))
if tensor.op.type == "Const":
return MakeNdarray(tensor.op.get_attr("value"))
elif tensor.op.type == "Shape":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.array(
[dim.value for dim in input_shape.dims],
dtype=tensor.dtype.as_numpy_dtype)
else:
return None
elif tensor.op.type == "Size":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.prod([dim.value for dim in input_shape.dims], dtype=np.int32)
else:
return None
elif tensor.op.type == "Rank":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.ndims is not None:
return np.ndarray(
shape=(),
buffer=np.array([input_shape.ndims], dtype=np.int32),
dtype=np.int32)
else:
return None
elif tensor.op.type == "Range":
start = constant_value(tensor.op.inputs[0])
if start is None:
return None
limit = constant_value(tensor.op.inputs[1])
if limit is None:
return None
delta = constant_value(tensor.op.inputs[2])
if delta is None:
return None
return np.arange(start, limit, delta, dtype=tensor.dtype.as_numpy_dtype)
elif tensor.op.type == "Cast":
pre_cast = constant_value(tensor.op.inputs[0])
if pre_cast is None:
return None
cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
return pre_cast.astype(cast_dtype.as_numpy_dtype)
elif tensor.op.type == "Concat":
dim = constant_value(tensor.op.inputs[0])
if dim is None:
return None
values = []
for x in tensor.op.inputs[1:]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "ConcatV2":
dim = constant_value(tensor.op.inputs[-1])
if dim is None:
return None
values = []
for x in tensor.op.inputs[:-1]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "Pack":
values = []
# Some imported GraphDefs have Pack ops with zero inputs. Those are invalid
# and shouldn't be produced, but to deal sensibly with them here we check
# and return None.
if not tensor.op.inputs:
return None
# We can't handle axis != 0 Packs at the moment.
if tensor.op.get_attr("axis") != 0:
return None
for x in tensor.op.inputs:
value = constant_value(x, partial)
if value is None and not partial:
return None
values.append(value)
return np.array(values)
elif tensor.op.type == "Unpack":
# We can't handle axis != 0 Unpacks at the moment.
if tensor.op.get_attr("axis") != 0:
return None
value = constant_value(tensor.op.inputs[0], partial)
if value is None:
return None
return value[tensor.value_index]
elif tensor.op.type == "Split":
dim = constant_value(tensor.op.inputs[0])
value = constant_value(tensor.op.inputs[1], partial)
if value is None or dim is None:
return None
split = np.split(value, tensor.op.get_attr("num_split"), dim)
return split[tensor.value_index]
elif tensor.op.type == "Fill":
fill_shape = tensor.shape
fill_value = constant_value(tensor.op.inputs[1])
if fill_shape.is_fully_defined() and fill_value is not None:
return np.full(fill_shape.as_list(), fill_value, dtype=fill_value.dtype)
else:
return None
elif tensor.op.type == "Equal":
value1 = constant_value(tensor.op.inputs[0])
if value1 is None:
return None
value2 = constant_value(tensor.op.inputs[1])
if value2 is None:
return None
return np.equal(value1, value2)
elif tensor.op.type == "NotEqual":
value1 = constant_value(tensor.op.inputs[0])
if value1 is None:
return None
value2 = constant_value(tensor.op.inputs[1])
if value2 is None:
return None
return np.not_equal(value1, value2)
elif tensor.op.type == "StopGradient":
return constant_value(tensor.op.inputs[0], partial)
elif tensor.op.type in ("CheckNumericsV2", "DebugIdentityV2", "Identity"):
return constant_value(tensor.op.inputs[0], partial)
else:
return None
@tf_export("get_static_value")
def constant_value(tensor, partial=False): # pylint: disable=invalid-name
"""Returns the constant value of the given tensor, if efficiently calculable.
This function attempts to partially evaluate the given tensor, and
returns its value as a numpy ndarray if this succeeds.
Example usage:
>>> a = tf.constant(10)
>>> tf.get_static_value(a)
10
>>> b = tf.constant(20)
>>> tf.get_static_value(tf.add(a, b))
30
>>> # `tf.Variable` is not supported.
>>> c = tf.Variable(30)
>>> print(tf.get_static_value(c))
None
Using `partial` option is most relevant when calling `get_static_value` inside
a `tf.function`. Setting it to `True` will return the results but for the
values that cannot be evaluated will be `None`. For example:
```python
class Foo(object):
def __init__(self):
self.a = tf.Variable(1)
self.b = tf.constant(2)
@tf.function
def bar(self, partial):
packed = tf.raw_ops.Pack(values=[self.a, self.b])
static_val = tf.get_static_value(packed, partial=partial)
tf.print(static_val)
f = Foo()
f.bar(partial=True) # `array([None, array(2, dtype=int32)], dtype=object)`
f.bar(partial=False) # `None`
```
Compatibility(V1): If `constant_value(tensor)` returns a non-`None` result, it
will no longer be possible to feed a different value for `tensor`. This allows
the result of this function to influence the graph that is constructed, and
permits static shape optimizations.
Args:
tensor: The Tensor to be evaluated.
partial: If True, the returned numpy array is allowed to have partially
evaluated values. Values that can't be evaluated will be None.
Returns:
A numpy ndarray containing the constant value of the given `tensor`,
or None if it cannot be calculated.
Raises:
TypeError: if tensor is not an ops.Tensor.
"""
if isinstance(tensor, ops.EagerTensor):
try:
return tensor.numpy()
except errors_impl.UnimplementedError:
# Some EagerTensors may not implement .numpy/resolve, e.g. parallel
# tensors with multiple components on different devices.
return None
if not is_tensor(tensor):
return tensor
if not isinstance(tensor, ops.Tensor):
return None
ret = _ConstantValue(tensor, partial)
if ret is not None:
# The caller may now depend on the constant value of `tensor`, so we
# conservatively prevent it from being fed.
tensor.graph.prevent_feeding(tensor)
return ret
def constant_value_as_shape(tensor): # pylint: disable=invalid-name
"""A version of `constant_value()` that returns a `TensorShape`.
This version should be used when a constant tensor value is
interpreted as a (possibly partial) shape, e.g. in the shape
function for `tf.reshape()`. By explicitly requesting a
`TensorShape` as the return value, it is possible to represent
unknown dimensions; by contrast, `constant_value()` is
all-or-nothing.
Args:
tensor: The rank-0 or rank-1 Tensor to be evaluated.
Returns:
A `TensorShape` based on the constant value of the given `tensor`.
Raises:
ValueError: If the shape is rank-0 and is not statically known to be -1.
"""
if isinstance(tensor, ops.EagerTensor):
return tensor_shape.TensorShape(
[dim if dim != -1 else None for dim in tensor.numpy()])
if tensor.get_shape().ndims == 0:
value = constant_value(tensor)
if value is None:
raise ValueError(
"Received a scalar with unknown value as shape; require a statically "
"known scalar with value '-1' to describe an unknown shape.")
if value != -1:
raise ValueError(
"Received a scalar value '%s' as shape; require a statically known "
"scalar with value '-1' to describe an unknown shape." % value)
return tensor_shape.unknown_shape()
shape = tensor.get_shape().with_rank(1)
if shape == [0]:
return tensor_shape.TensorShape([])
elif tensor.op.type == "Cast":
pre_cast = constant_value_as_shape(tensor.op.inputs[0])
if pre_cast.dims is None:
# the input to cast has a totally undefined shape; just return that.
return pre_cast
cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
if cast_dtype not in (dtypes.int32, dtypes.int64):
return tensor_shape.unknown_shape(shape.dims[0].value)
dest_dtype_shape_array = np.array(
[x if x is not None else -1 for x in pre_cast.as_list()]).astype(
cast_dtype.as_numpy_dtype)
return tensor_shape.TensorShape([
x if x >= 0 else None
for x in dest_dtype_shape_array])
elif tensor.op.type == "Shape":
return tensor.op.inputs[0].get_shape()
elif tensor.op.type == "Pack":
ret = tensor_shape.TensorShape([]) # Empty list.
# Since we expect rank 1 inputs, Pack's axis must be zero, otherwise it
# would not be rank 1.
assert tensor.op.get_attr("axis") == 0
for pack_input in tensor.op.inputs:
# `pack_input` must be a scalar. Attempt to evaluate it, and append it
# to `ret`.
pack_input_val = constant_value(pack_input)
if pack_input_val is None or pack_input_val < 0:
new_dim = tensor_shape.Dimension(None)
else:
new_dim = tensor_shape.Dimension(pack_input_val)
ret = ret.concatenate([new_dim])
return ret
elif tensor.op.type == "Concat":
# We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.TensorShape([]) # Empty list.
for concat_input in tensor.op.inputs[1:]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "ConcatV2":
# We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.TensorShape([]) # Empty list.
for concat_input in tensor.op.inputs[:-1]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "StridedSlice":
try:
begin = constant_value(tensor.op.inputs[1])
end = constant_value(tensor.op.inputs[2])
strides = constant_value(tensor.op.inputs[3])
if begin is not None and end is not None and strides is not None:
begin = begin[0]
end = end[0]
strides = strides[0]
begin_mask = tensor.op.get_attr("begin_mask")
if begin_mask == 1:
begin = None
end_mask = tensor.op.get_attr("end_mask")
if end_mask == 1:
end = None
ellipsis_mask = tensor.op.get_attr("ellipsis_mask")
new_axis_mask = tensor.op.get_attr("new_axis_mask")
shrink_axis_mask = tensor.op.get_attr("shrink_axis_mask")
valid_attributes = (not ellipsis_mask and not new_axis_mask and
not shrink_axis_mask and (not begin_mask or
(begin_mask == 1)) and
(not end_mask or (end_mask == 1)))
if valid_attributes: # additional inputs not supported
prev = constant_value_as_shape(tensor.op.inputs[0])
prev = prev[begin:end:strides]
ret = tensor_shape.TensorShape(prev)
return ret
except ValueError: # Could come from get_attr or slicing prev.
pass
except TypeError: # Could come from slicing prev.
pass
elif (tensor.op.type == "Placeholder" and
tensor.op.graph.building_function and
hasattr(tensor.op.graph, "internal_captures")):
# If we are inside a FuncGraph try to lookup the constant value of the
# corresponding external capture. Note that we only look at captures and
# not the fed inputs because those can be fed different values in different
# instantiations of the function call or different iterations of a
# tf.while_loop.
for i, capture in enumerate(tensor.op.graph.internal_captures):
if capture is tensor:
external_capture = tensor.op.graph.external_captures[i]
return constant_value_as_shape(external_capture)
ret = tensor_shape.unknown_shape(shape.dims[0].value)
value = constant_value(tensor)
if value is not None:
ret = ret.merge_with(
tensor_shape.TensorShape([d if d >= 0 else None for d in value]))
return ret
# TODO(mdan): Deprecate in favor of more static-friendly types.
@tf_export("is_tensor")
def is_tf_type(x): # pylint: disable=invalid-name
"""Checks whether `x` is a TF-native type that can be passed to many TF ops.
Use `is_tensor` to differentiate types that can ingested by TensorFlow ops
without any conversion (e.g., `tf.Tensor`, `tf.SparseTensor`, and
`tf.RaggedTensor`) from types that need to be converted into tensors before
they are ingested (e.g., numpy `ndarray` and Python scalars).
For example, in the following code block:
```python
if not tf.is_tensor(t):
t = tf.convert_to_tensor(t)
return t.shape, t.dtype
```
we check to make sure that `t` is a tensor (and convert it if not) before
accessing its `shape` and `dtype`. (But note that not all TensorFlow native
types have shapes or dtypes; `tf.data.Dataset` is an example of a TensorFlow
native type that has neither shape nor dtype.)
Args:
x: A python object to check.
Returns:
`True` if `x` is a TensorFlow-native type.
"""
return (isinstance(x, internal.NativeObject) or
isinstance(x, core.Tensor) or
getattr(x, "is_tensor_like", False))
# Deprecated alias for tensor_util.is_tf_type.
is_tensor = is_tf_type
def shape_tensor(shape): # pylint: disable=invalid-name
"""Convert to an int32 or int64 tensor, defaulting to int32 if empty."""
dtype = None
if isinstance(shape, (tuple, list)):
if not shape:
dtype = dtypes.int32
else:
# If there are Dimension objects in the shape, unwrap them. This can be a
# problem if v1 and v2 TensorShape objects get mixed up in partial
# conversions, leading to shapes such as (1, 2, Dimension(5)), which are
# not convertible to Tensors because of mixed content.
shape = tuple(map(tensor_shape.dimension_value, shape))
return ops.convert_to_tensor(shape, dtype=dtype, name="shape")
# DO NOT USE: For testing only.
_ENABLE_MAYBE_SET_STATIC_SHAPE = True
def maybe_set_static_shape(tensor, shape): # pylint: disable=invalid-name
"""Sets the shape of `tensor` to the `shape`'s constant value, if inferrable.
This is a temporary workaround to fix shape inference across functional op
boundaries. E.g.
```python
shape = tf.constant([3])
@tf.function
def f():
u = tf.random_uniform(shape)
return u
```
If we were to rely solely on C++ shape inference, the shape of `u` inside
`f` would be unknown because C++ shape inference is not aware of the outer
graph and all it sees is a Placeholder node when backtracing the captured
tensor for `shape`. `maybe_set_static_shape` computes the static shape value
of `shape` by traversing the `FuncGraph` boundaries and sets the correct
shape.
A longer term solution would be to fix C++ shape inference.
Args:
tensor: A tensor.
shape: A shape tensor.
"""
if (_ENABLE_MAYBE_SET_STATIC_SHAPE and not context.executing_eagerly() and
ops.get_default_graph().building_function and
not tensor.shape.is_fully_defined() and is_tensor(shape)):
shape = shape_tensor(shape)
const_shape = constant_value_as_shape(shape)
tensor.set_shape(const_shape)
| 36.984821
| 115
| 0.702508
|
10ec64423763f715851c20ce5c6252de470dc841
| 4,361
|
py
|
Python
|
Chapter04/Chapter_4_ARIMA.py
|
stciaischoolrnn/Practical-Time-Series-Analysis
|
72eeabbcf2a3af742b2a114026cfd841b0ea9184
|
[
"MIT"
] | 267
|
2017-10-04T10:10:39.000Z
|
2022-03-26T03:54:44.000Z
|
Chapter04/Chapter_4_ARIMA.py
|
stciaischoolrnn/Practical-Time-Series-Analysis
|
72eeabbcf2a3af742b2a114026cfd841b0ea9184
|
[
"MIT"
] | 5
|
2018-03-08T10:11:26.000Z
|
2022-01-22T07:48:48.000Z
|
Chapter04/Chapter_4_ARIMA.py
|
stciaischoolrnn/Practical-Time-Series-Analysis
|
72eeabbcf2a3af742b2a114026cfd841b0ea9184
|
[
"MIT"
] | 215
|
2017-09-28T13:52:06.000Z
|
2022-03-27T14:14:37.000Z
|
# Load Modules
from __future__ import print_function
import os
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from statsmodels.tsa.arima_model import ARIMA
import statsmodels.api as sm
import statsmodels.tsa.api as smtsa
# Function to plot signal, ACF and PACF
def plotds(xt, nlag=30, fig_size=(12, 10)):
if not isinstance(xt, pd.Series):
xt = pd.Series(xt)
plt.figure(figsize=fig_size)
layout = (2, 2)
# Assign axes
ax_xt = plt.subplot2grid(layout, (0, 0), colspan=2)
ax_acf= plt.subplot2grid(layout, (1, 0))
ax_pacf = plt.subplot2grid(layout, (1, 1))
# Plot graphs
xt.plot(ax=ax_xt)
ax_xt.set_title('Time Series')
plot_acf(xt, lags=50, ax=ax_acf)
plot_pacf(xt, lags=50, ax=ax_pacf)
plt.tight_layout()
return None
############# IBM EXAMPLE for ARIMA
# Change working Directory
os.chdir('/data')
#Read data from Excel file
djia_df = pd.read_excel('datasets/DJIA_Jan2016_Dec2016.xlsx')
#Rename the second column
djia_df.head(10)
#Let us parse the Date column and use as row index for the DataFrame and drop it as a column
djia_df['Date'] = pd.to_datetime(djia_df['Date'], '%Y-%m-%d')
djia_df.index = djia_df['Date']
djia_df.drop('Date', axis=1, inplace=True)
#Let us see first few rows of the modified DataFrame
djia_df.head(10)
# Plot ACF and PACF
djia_df=djia_df.dropna()
plotds(djia_df['Close'], nlag=50)
# Evaluate mean and variance at mid values
mean1, mean2 =djia_df.iloc[:125].Close.mean(), djia_df.iloc[125:].Close.mean()
var1, var2 = djia_df.iloc[:125].Close.var(), djia_df.iloc[125:].Close.var()
print('mean1=%f, mean2=%f' % (mean1, mean2))
print('variance1=%f, variance2=%f' % (var1, var2))
# ADF Test
from statsmodels.tsa.stattools import adfuller
adf_result= adfuller(djia_df.Close.tolist())
print('ADF Statistic: %f' % adf_result[0])
print('p-value: %f' % adf_result[1])
# QQ plot and probability plot
sm.qqplot(djia_df['Close'], line='s')
# Optimize ARMA parameters (Will return a non-stationary error)
arma_obj = smtsa.ARMA(djia_df['Close'].tolist(), order=(1, 1)).fit(maxlag=30, method='mle', trend='nc')
#Let us plot the original time series and first-differences
first_order_diff = djia_df['Close'].diff(1).dropna()
fig, ax = plt.subplots(2, sharex=True)
fig.set_size_inches(5.5, 5.5)
djia_df['Close'].plot(ax=ax[0], color='b')
ax[0].set_title('Close values of DJIA during Jan 2016-Dec 2016')
first_order_diff.plot(ax=ax[1], color='r')
ax[1].set_title('First-order differences of DJIA during Jan 2016-Dec 2016')
# plot signal
plotds(first_order_diff, nlag=50)
adf_result= adfuller(first_order_diff)
print('ADF Statistic: %f' % adf_result[0])
print('p-value: %f' % adf_result[1])
# Optimize ARMA parameters
aicVal=[]
for d in range(1,3):
for ari in range(0, 3):
for maj in range(0,3):
try:
arima_obj = ARIMA(djia_df['Close'].tolist(), order=(ari,d,maj))
arima_obj_fit=arima_obj.fit()
aicVal.append([ari, d, maj, arima_obj_fit.aic])
except ValueError:
pass
# Optimal ARIMA model
arima_obj = ARIMA(djia_df['Close'].tolist(), order=(0,2,1))
arima_obj_fit = arima_obj.fit(disp=0)
arima_obj_fit.summary()
# Evaluate prediction
pred=np.append([0,0],arima_obj_fit.fittedvalues.tolist())
djia_df['ARIMA']=pred
diffval=np.append([0,0], arima_obj_fit.resid+arima_obj_fit.fittedvalues)
djia_df['diffval']=diffval
# QQ plot and probability plot
sm.qqplot(arima_obj_fit.resid, line='s')
# Plot output
f, axarr = plt.subplots(1, sharex=True)
f.set_size_inches(5.5, 5.5)
djia_df['diffval'].iloc[2:].plot(color='b', linestyle = '-', ax=axarr)
djia_df['ARIMA'].iloc[2:].plot(color='r', linestyle = '--', ax=axarr)
axarr.set_title('ARIMA(0,2,1)')
plt.xlabel('Index')
plt.ylabel('Closing')
# Forecasting
f, err, ci=arima_obj_fit.forecast(40)
djia_df['forecast'] = arima_obj_fit.forecast(10)
djia_df[['Close', 'forecast']].plot(figsize=(12, 8))
##############
# SARIMAX
##############
# Seasonality (based on first difference ACF shows significance at 42 lag)
x=djia_df['Close']-djia_df['Close'].shift(42)
mod = sm.tsa.statespace.SARIMAX(djia_df['Close'], trend='n', order=(0,2,1), seasonal_order=(1,1,1,42))
sarimax= mod.fit()
sarimax.summary()
| 30.929078
| 103
| 0.690667
|
1d67756efa059874c1c33a0e2b4a7a42273ee83e
| 180
|
py
|
Python
|
fudgeit/recommendation/admin.py
|
fahimtran/hackgt-8
|
2746cd334b73268ea1f5872c796873125056e61d
|
[
"MIT"
] | null | null | null |
fudgeit/recommendation/admin.py
|
fahimtran/hackgt-8
|
2746cd334b73268ea1f5872c796873125056e61d
|
[
"MIT"
] | null | null | null |
fudgeit/recommendation/admin.py
|
fahimtran/hackgt-8
|
2746cd334b73268ea1f5872c796873125056e61d
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from recommendation.models import Restaurant, FoodItem
# Register your models here.
admin.site.register(Restaurant)
admin.site.register(FoodItem)
| 25.714286
| 54
| 0.833333
|
bf578b99fd912ddc1437dbe9a6b1bcbdc031b3e5
| 833
|
py
|
Python
|
V8_SensorDistancia_Recepcion.py
|
Cellista33/Trabajo_9
|
c00abfaf909df3aeb168fc7468dc54edb50bad8b
|
[
"MIT"
] | null | null | null |
V8_SensorDistancia_Recepcion.py
|
Cellista33/Trabajo_9
|
c00abfaf909df3aeb168fc7468dc54edb50bad8b
|
[
"MIT"
] | null | null | null |
V8_SensorDistancia_Recepcion.py
|
Cellista33/Trabajo_9
|
c00abfaf909df3aeb168fc7468dc54edb50bad8b
|
[
"MIT"
] | null | null | null |
import serial
import time
from turtle import *
def grafico(dis):
x1 = 217-233-dis
x4 = 217-dis
x3 = x4
x2 = x1
y1 = -69
y2 = 69
y4 = y1
y3 = y2
setup (434, 200, 0, 0)
screensize(433, 200 )
title("EL TWIZY QUE APARCA")
hideturtle()
pencolor("red")
pensize(5)
begin_fill()
goto (x4, y1)
goto (x3, y3)
goto (x2, y2)
goto (x1, y1)
end_fill
return
arduino=serial.Serial('/dev/ttyUSB0', baudrate=9600, timeout = 3.0)
"""arduino.open()"""
txt=''
while True:
time.sleep(0.01)
while arduino.inWaiting()>0:
txt += arduino.read(1)
distancia = arduino.read(1)
if distancia > 2:
distancia = 2
grafico(distancia)
print txt
txt = ''
arduino.close()
| 12.815385
| 67
| 0.519808
|
347472d387f62c0f0e1c4919f0e49d214eac0d4d
| 1,025
|
py
|
Python
|
cli_app/engine/notifier.py
|
namuan/news-rider
|
2f8f5204eda717e39ab7d4c048692d5ec2eb5449
|
[
"MIT"
] | 5
|
2021-04-26T20:46:30.000Z
|
2021-05-03T07:29:31.000Z
|
cli_app/engine/notifier.py
|
namuan/news-rider
|
2f8f5204eda717e39ab7d4c048692d5ec2eb5449
|
[
"MIT"
] | null | null | null |
cli_app/engine/notifier.py
|
namuan/news-rider
|
2f8f5204eda717e39ab7d4c048692d5ec2eb5449
|
[
"MIT"
] | null | null | null |
import json
import os
import sys
import requests
from dotenv import load_dotenv
from .log_helper import logger
sys.path.append(os.getcwd())
load_dotenv(verbose=True)
PUSHOVER_TOKEN = os.getenv("PUSHOVER_TOKEN")
PUSHOVER_USER_KEY = os.getenv("PUSHOVER_USER_KEY")
def notify_user(title, msg):
try:
response = requests.post(
url="https://api.pushover.net/1/messages.json",
headers={
"Content-Type": "application/json; charset=utf-8",
},
data=json.dumps({
"message": msg,
"title": title,
"token": PUSHOVER_TOKEN,
"user": PUSHOVER_USER_KEY
})
)
logger.info('Response HTTP Status Code: {status_code}'.format(
status_code=response.status_code))
logger.info('Response HTTP Response Body: {content}'.format(
content=response.content))
except requests.exceptions.RequestException:
logger.error('HTTP Request failed')
| 27.702703
| 70
| 0.612683
|
5b0febff823bed0ec74bed39945cfbfa6f4e237f
| 3,442
|
py
|
Python
|
tests/gallery/test_summarystatsagg.py
|
krisHans3n/geoalchemy2-mysql
|
38a44d51c242d867f40d4c5503c91f52a8269ff4
|
[
"MIT"
] | null | null | null |
tests/gallery/test_summarystatsagg.py
|
krisHans3n/geoalchemy2-mysql
|
38a44d51c242d867f40d4c5503c91f52a8269ff4
|
[
"MIT"
] | null | null | null |
tests/gallery/test_summarystatsagg.py
|
krisHans3n/geoalchemy2-mysql
|
38a44d51c242d867f40d4c5503c91f52a8269ff4
|
[
"MIT"
] | null | null | null |
"""
Use CompositeType
=================
Some functions return composite types. This example shows how to deal with this
kind of functions.
"""
import pytest
from pkg_resources import parse_version
from sqlalchemy import Column
from sqlalchemy import Float
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import __version__ as SA_VERSION
from sqlalchemy.ext.declarative import declarative_base
from geoalchemy2 import Raster
from geoalchemy2 import WKTElement
from geoalchemy2.functions import GenericFunction
from geoalchemy2.types import CompositeType
# Tests imports
from tests import select
from tests import test_only_with_dialects
class SummaryStatsCustomType(CompositeType):
"""Define the composite type returned by the function ST_SummaryStatsAgg."""
typemap = {
'count': Integer,
'sum': Float,
'mean': Float,
'stddev': Float,
'min': Float,
'max': Float,
}
cache_ok = True
class ST_SummaryStatsAgg(GenericFunction):
type = SummaryStatsCustomType
# Set a specific identifier to not override the actual ST_SummaryStatsAgg function
identifier = "ST_SummaryStatsAgg_custom"
inherit_cache = True
metadata = MetaData()
Base = declarative_base(metadata=metadata)
class Ocean(Base):
__tablename__ = 'ocean'
id = Column(Integer, primary_key=True)
rast = Column(Raster)
def __init__(self, rast):
self.rast = rast
@test_only_with_dialects("postgresql")
class TestSTSummaryStatsAgg():
@pytest.mark.skipif(
parse_version(SA_VERSION) < parse_version("1.4"),
reason="requires SQLAlchely>1.4",
)
def test_st_summary_stats_agg(self, session, conn):
metadata.drop_all(conn, checkfirst=True)
metadata.create_all(conn)
# Create a new raster
polygon = WKTElement('POLYGON((0 0,1 1,0 1,0 0))', srid=4326)
o = Ocean(polygon.ST_AsRaster(5, 6))
session.add(o)
session.flush()
# Define the query to compute stats
stats_agg = select([
Ocean.rast.ST_SummaryStatsAgg_custom(1, True, 1).label("stats")
])
stats_agg_alias = stats_agg.alias("stats_agg")
# Use these stats
query = select([
stats_agg_alias.c.stats.count.label("count"),
stats_agg_alias.c.stats.sum.label("sum"),
stats_agg_alias.c.stats.mean.label("mean"),
stats_agg_alias.c.stats.stddev.label("stddev"),
stats_agg_alias.c.stats.min.label("min"),
stats_agg_alias.c.stats.max.label("max")
])
# Check the query
assert str(query.compile(dialect=session.bind.dialect)) == (
"SELECT "
"(stats_agg.stats).count AS count, "
"(stats_agg.stats).sum AS sum, "
"(stats_agg.stats).mean AS mean, "
"(stats_agg.stats).stddev AS stddev, "
"(stats_agg.stats).min AS min, "
"(stats_agg.stats).max AS max \n"
"FROM ("
"SELECT "
"ST_SummaryStatsAgg("
"ocean.rast, "
"%(ST_SummaryStatsAgg_1)s, %(ST_SummaryStatsAgg_2)s, %(ST_SummaryStatsAgg_3)s"
") AS stats \n"
"FROM ocean) AS stats_agg"
)
# Execute the query
res = session.execute(query).fetchall()
# Check the result
assert res == [(15, 15.0, 1.0, 0.0, 1.0, 1.0)]
| 29.169492
| 90
| 0.639454
|
5c944b92c4fa28a2cc2f05731987a9d9239eb590
| 6,583
|
py
|
Python
|
lib/gui/gridview.py
|
frontinc-ayau/dsce
|
39051752f8f2e75f912903b0b07f7ad0aba680d8
|
[
"Apache-2.0"
] | null | null | null |
lib/gui/gridview.py
|
frontinc-ayau/dsce
|
39051752f8f2e75f912903b0b07f7ad0aba680d8
|
[
"Apache-2.0"
] | null | null | null |
lib/gui/gridview.py
|
frontinc-ayau/dsce
|
39051752f8f2e75f912903b0b07f7ad0aba680d8
|
[
"Apache-2.0"
] | null | null | null |
# This file is part of the DomainSharedContactsEditor (DSCE) application.
#
# DSCE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DSCE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DSCE. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright (c) 2010 Klaus Melcher (melcher.kla@gmail.com)
"""Grid table interface to the contacts.
"""
import wx
import wx.grid
import domaindata
from domaindata import metadata
import observer
from observer import *
from emaileditor import EmailEditDialog
from emailcellrenderer import EmailCellRenderer
from addresseditor import AddressEditDialog
from addressrenderer import AddressCellRenderer
from phonerenderer import PhoneCellRenderer
from phoneeditor import PhoneEditDialog
from orgeditor import OrgEditDialog
from orgrenderer import OrgCellRenderer
from grouprenderer import GroupCellRenderer
from groupcelleditor import GroupCellEditDialog
import logging
class GridView(wx.grid.Grid):
def __init__(self, parent, id=-1):
wx.grid.Grid.__init__(self,parent,id, wx.Point(0, 0), wx.DefaultSize,
wx.NO_BORDER | wx.WANTS_CHARS)
self.SetRowMinimalAcceptableHeight(0)
self.table = domaindata.get_grid_table(self)
self.hiddenRows = []
self.SetTable(self.table, True)
self.setRenderer()
self.setEditors()
self.bind()
self.subscribe()
def bind(self):
self.Bind(wx.grid.EVT_GRID_EDITOR_SHOWN, self.gridEditorRequest, self)
self.Bind(wx.grid.EVT_GRID_CELL_CHANGE, self.gridCellChanged, self)
def subscribe(self):
observer.subscribe(self.appendRow, pmsg.CONTACT_ADDED) # interested if contact added
observer.subscribe(self.forceRefresh, pmsg.DATA_UPLOADED) # because of label changes
observer.subscribe(self.forceRefresh, pmsg.CONTACT_DELETED) # because of label changes
observer.subscribe(self.hideRows, pmsg.HIDE_ROWS) # used by search
observer.subscribe(self.unhideAll, pmsg.UNHIDE_ROWS) # used by search
def appendRow(self, event):
logging.debug("In Grid.appendRow())")
self.ProcessTableMessage(wx.grid.GridTableMessage(self.table,
wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED, 1)
)
logging.debug(self.table.GetNumberRows())
# position the cursor and scroll to the end of the grid
self.SetFocus()
self.SetGridCursor((self.table.GetNumberRows()-1),0)
self.scrollToBottom()
def hideRows(self, event):
self.BeginBatch()
self.unhideRows()
self.SetRowLabelSize(0)
self.SetColLabelSize(0)
for r in event.data:
self.HideRow(r)
self.hiddenRows += event.data
self.EndBatch()
def HideRow(self, row):
self.SetRowSize(row, 0)
def unhideRows(self):
for i in self.hiddenRows:
self.SetRowSize(i, self.GetDefaultRowSize())
self.hiddenRows = []
def unhideLabels(self):
self.SetRowLabelSize(self.GetDefaultRowLabelSize())
self.SetColLabelSize(self.GetDefaultColLabelSize())
def unhideAll(self, event):
self.BeginBatch()
self.unhideLabels()
self.unhideRows()
self.EndBatch()
def getActiveRows(self):
"""Returns the first row where any kind of selection or cursor is found
"""
rows = []
if self.IsSelection():
rows = self.GetSelectedRows()
logging.debug("Rows Sel %s" % str(rows))
else:
rows.append(self.GetGridCursorRow())
logging.debug("Rows Cur %s" % str(rows))
return rows
def gridCellChanged(self, evt):
logging.debug("Cell changed")
self.forceRefresh(None)
def gridEditorRequest(self, evt):
"""Used when others than PyGridCellEditors have to be used.
"""
c = evt.GetCol()
if c == metadata.get_col_idx("email"):
EmailEditDialog(self, -1, self.table, evt.GetRow(), c)
evt.Veto()
elif c == metadata.get_col_idx("postal_address"):
AddressEditDialog(self, -1, self.table, evt.GetRow(), c)
evt.Veto()
elif c == metadata.get_col_idx("phone"):
PhoneEditDialog(self, -1, self.table, evt.GetRow(), c)
evt.Veto()
elif c == metadata.get_col_idx("organization"):
OrgEditDialog(self, -1, self.table, evt.GetRow(), c)
evt.Veto()
elif c == metadata.get_col_idx("groups"):
GroupCellEditDialog(self, -1, self.table, evt.GetRow(), c)
evt.Veto()
else:
evt.Skip()
def scrollToBottom(self):
r = self.GetScrollRange(wx.VERTICAL)
self.Scroll(0, r)
def forceRefresh(self, evt):
logging.debug("Force Refresh() number of rows %d", self.GetNumberRows())
self.ForceRefresh()
def setRenderer(self):
attr = wx.grid.GridCellAttr()
attr.SetRenderer(EmailCellRenderer())
self.SetColAttr(metadata.get_col_idx("email"), attr)
attr = wx.grid.GridCellAttr()
attr.SetRenderer(AddressCellRenderer())
self.SetColAttr(metadata.get_col_idx("postal_address"), attr)
attr = wx.grid.GridCellAttr()
attr.SetRenderer(PhoneCellRenderer())
self.SetColAttr(metadata.get_col_idx("phone"), attr)
attr = wx.grid.GridCellAttr()
attr.SetRenderer(OrgCellRenderer())
self.SetColAttr(metadata.get_col_idx("organization"), attr)
attr = wx.grid.GridCellAttr()
attr.SetRenderer(GroupCellRenderer())
self.SetColAttr(metadata.get_col_idx("groups"), attr)
def setEditors(self):
attr = wx.grid.GridCellAttr()
# attr.SetEditor(wx.grid.GridCellAutoWrapStringEditor())
# self.SetColAttr(metadata.get_col_idx("postal_address"), attr)
| 35.972678
| 100
| 0.634969
|
1072433d27472659e823d353b8c2d85c0d1ecbd9
| 17,273
|
py
|
Python
|
nova/virt/libvirt/utils.py
|
vasart/nova
|
bca5004d367e0418e35f8a72fe0f2e106e977ab0
|
[
"Apache-2.0"
] | 1
|
2021-09-10T15:29:02.000Z
|
2021-09-10T15:29:02.000Z
|
nova/virt/libvirt/utils.py
|
PFZheng/nova
|
84be8abbccb5ddc2d7c5a7db59019ed1edb19e7f
|
[
"Apache-2.0"
] | null | null | null |
nova/virt/libvirt/utils.py
|
PFZheng/nova
|
84be8abbccb5ddc2d7c5a7db59019ed1edb19e7f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import platform
from lxml import etree
from oslo.config import cfg
from nova.openstack.common.gettextutils import _
from nova.openstack.common.gettextutils import _LI
from nova.openstack.common.gettextutils import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
from nova.virt import images
from nova.virt import volumeutils
libvirt_opts = [
cfg.BoolOpt('snapshot_compression',
default=False,
help='Compress snapshot images when possible. This '
'currently applies exclusively to qcow2 images'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts, 'libvirt')
CONF.import_opt('instances_path', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
def execute(*args, **kwargs):
return utils.execute(*args, **kwargs)
def get_iscsi_initiator():
return volumeutils.get_iscsi_initiator()
def get_fc_hbas():
"""Get the Fibre Channel HBA information."""
out = None
try:
out, err = execute('systool', '-c', 'fc_host', '-v',
run_as_root=True)
except processutils.ProcessExecutionError as exc:
# This handles the case where rootwrap is used
# and systool is not installed
# 96 = nova.cmd.rootwrap.RC_NOEXECFOUND:
if exc.exit_code == 96:
LOG.warn(_LW("systool is not installed"))
return []
except OSError as exc:
# This handles the case where rootwrap is NOT used
# and systool is not installed
if exc.errno == errno.ENOENT:
LOG.warn(_LW("systool is not installed"))
return []
if out is None:
raise RuntimeError(_("Cannot find any Fibre Channel HBAs"))
lines = out.split('\n')
# ignore the first 2 lines
lines = lines[2:]
hbas = []
hba = {}
lastline = None
for line in lines:
line = line.strip()
# 2 newlines denotes a new hba port
if line == '' and lastline == '':
if len(hba) > 0:
hbas.append(hba)
hba = {}
else:
val = line.split('=')
if len(val) == 2:
key = val[0].strip().replace(" ", "")
value = val[1].strip()
hba[key] = value.replace('"', '')
lastline = line
return hbas
def get_fc_hbas_info():
"""Get Fibre Channel WWNs and device paths from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
hbas_info = []
for hba in hbas:
wwpn = hba['port_name'].replace('0x', '')
wwnn = hba['node_name'].replace('0x', '')
device_path = hba['ClassDevicepath']
device = hba['ClassDevice']
hbas_info.append({'port_name': wwpn,
'node_name': wwnn,
'host_device': device,
'device_path': device_path})
return hbas_info
def get_fc_wwpns():
"""Get Fibre Channel WWPNs from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
wwpns = []
if hbas:
for hba in hbas:
if hba['port_state'] == 'Online':
wwpn = hba['port_name'].replace('0x', '')
wwpns.append(wwpn)
return wwpns
def get_fc_wwnns():
"""Get Fibre Channel WWNNs from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
wwnns = []
if hbas:
for hba in hbas:
if hba['port_state'] == 'Online':
wwnn = hba['node_name'].replace('0x', '')
wwnns.append(wwnn)
return wwnns
def create_image(disk_format, path, size):
"""Create a disk image
:param disk_format: Disk image format (as known by qemu-img)
:param path: Desired location of the disk image
:param size: Desired size of disk image. May be given as an int or
a string. If given as an int, it will be interpreted
as bytes. If it's a string, it should consist of a number
with an optional suffix ('K' for Kibibytes,
M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
If no suffix is given, it will be interpreted as bytes.
"""
execute('qemu-img', 'create', '-f', disk_format, path, size)
def create_cow_image(backing_file, path, size=None):
"""Create COW image
Creates a COW image with the given backing file
:param backing_file: Existing image on which to base the COW image
:param path: Desired location of the COW image
"""
base_cmd = ['qemu-img', 'create', '-f', 'qcow2']
cow_opts = []
if backing_file:
cow_opts += ['backing_file=%s' % backing_file]
base_details = images.qemu_img_info(backing_file)
else:
base_details = None
# This doesn't seem to get inherited so force it to...
# http://paste.ubuntu.com/1213295/
# TODO(harlowja) probably file a bug against qemu-img/qemu
if base_details and base_details.cluster_size is not None:
cow_opts += ['cluster_size=%s' % base_details.cluster_size]
# For now don't inherit this due the following discussion...
# See: http://www.gossamer-threads.com/lists/openstack/dev/10592
# if 'preallocation' in base_details:
# cow_opts += ['preallocation=%s' % base_details['preallocation']]
if base_details and base_details.encrypted:
cow_opts += ['encryption=%s' % base_details.encrypted]
if size is not None:
cow_opts += ['size=%s' % size]
if cow_opts:
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
cow_opts = ['-o', csv_opts]
cmd = base_cmd + cow_opts + [path]
execute(*cmd)
def import_rbd_image(*args):
execute('rbd', 'import', *args)
def _run_rbd(*args, **kwargs):
total = list(args)
if CONF.libvirt.rbd_user:
total.extend(['--id', str(CONF.libvirt.rbd_user)])
if CONF.libvirt.images_rbd_ceph_conf:
total.extend(['--conf', str(CONF.libvirt.images_rbd_ceph_conf)])
return utils.execute(*total, **kwargs)
def list_rbd_volumes(pool):
"""List volumes names for given ceph pool.
:param pool: ceph pool name
"""
try:
out, err = _run_rbd('rbd', '-p', pool, 'ls')
except processutils.ProcessExecutionError:
# No problem when no volume in rbd pool
return []
return [line.strip() for line in out.splitlines()]
def remove_rbd_volumes(pool, *names):
"""Remove one or more rbd volume."""
for name in names:
rbd_remove = ['rbd', '-p', pool, 'rm', name]
try:
_run_rbd(*rbd_remove, attempts=3, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.warn(_LW("rbd remove %(name)s in pool %(pool)s failed"),
{'name': name, 'pool': pool})
def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
"""Pick the libvirt primary backend driver name
If the hypervisor supports multiple backend drivers, then the name
attribute selects the primary backend driver name, while the optional
type attribute provides the sub-type. For example, xen supports a name
of "tap", "tap2", "phy", or "file", with a type of "aio" or "qcow2",
while qemu only supports a name of "qemu", but multiple types including
"raw", "bochs", "qcow2", and "qed".
:param is_block_dev:
:returns: driver_name or None
"""
if CONF.libvirt.virt_type == "xen":
if is_block_dev:
return "phy"
else:
# 4000000 == 4.0.0
if hypervisor_version == 4000000:
return "tap"
else:
return "tap2"
elif CONF.libvirt.virt_type in ('kvm', 'qemu'):
return "qemu"
else:
# UML doesn't want a driver_name set
return None
def get_disk_size(path):
"""Get the (virtual) size of a disk image
:param path: Path to the disk image
:returns: Size (in bytes) of the given disk image as it would be seen
by a virtual machine.
"""
size = images.qemu_img_info(path).virtual_size
return int(size)
def get_disk_backing_file(path, basename=True):
"""Get the backing file of a disk image
:param path: Path to the disk image
:returns: a path to the image's backing store
"""
backing_file = images.qemu_img_info(path).backing_file
if backing_file and basename:
backing_file = os.path.basename(backing_file)
return backing_file
def copy_image(src, dest, host=None):
"""Copy a disk image to an existing directory
:param src: Source image
:param dest: Destination path
:param host: Remote host
"""
if not host:
# We shell out to cp because that will intelligently copy
# sparse files. I.E. holes will not be written to DEST,
# rather recreated efficiently. In addition, since
# coreutils 8.11, holes can be read efficiently too.
execute('cp', src, dest)
else:
dest = "%s:%s" % (host, dest)
# Try rsync first as that can compress and create sparse dest files.
# Note however that rsync currently doesn't read sparse files
# efficiently: https://bugzilla.samba.org/show_bug.cgi?id=8918
# At least network traffic is mitigated with compression.
try:
# Do a relatively light weight test first, so that we
# can fall back to scp, without having run out of space
# on the destination for example.
execute('rsync', '--sparse', '--compress', '--dry-run', src, dest)
except processutils.ProcessExecutionError:
execute('scp', src, dest)
else:
execute('rsync', '--sparse', '--compress', src, dest)
def write_to_file(path, contents, umask=None):
"""Write the given contents to a file
:param path: Destination file
:param contents: Desired contents of the file
:param umask: Umask to set when creating this file (will be reset)
"""
if umask:
saved_umask = os.umask(umask)
try:
with open(path, 'w') as f:
f.write(contents)
finally:
if umask:
os.umask(saved_umask)
def chown(path, owner):
"""Change ownership of file or directory
:param path: File or directory whose ownership to change
:param owner: Desired new owner (given as uid or username)
"""
execute('chown', owner, path, run_as_root=True)
def extract_snapshot(disk_path, source_fmt, out_path, dest_fmt):
"""Extract a snapshot from a disk image.
Note that nobody should write to the disk image during this operation.
:param disk_path: Path to disk image
:param out_path: Desired path of extracted snapshot
"""
# NOTE(markmc): ISO is just raw to qemu-img
if dest_fmt == 'iso':
dest_fmt = 'raw'
qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt)
# Conditionally enable compression of snapshots.
if CONF.libvirt.snapshot_compression and dest_fmt == "qcow2":
qemu_img_cmd += ('-c',)
qemu_img_cmd += (disk_path, out_path)
execute(*qemu_img_cmd)
def load_file(path):
"""Read contents of file
:param path: File to read
"""
with open(path, 'r') as fp:
return fp.read()
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def file_delete(path):
"""Delete (unlink) file
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return os.unlink(path)
def find_disk(virt_dom):
"""Find root device path for instance
May be file or device
"""
xml_desc = virt_dom.XMLDesc(0)
domain = etree.fromstring(xml_desc)
if CONF.libvirt.virt_type == 'lxc':
source = domain.find('devices/filesystem/source')
disk_path = source.get('dir')
disk_path = disk_path[0:disk_path.rfind('rootfs')]
disk_path = os.path.join(disk_path, 'disk')
else:
source = domain.find('devices/disk/source')
disk_path = source.get('file') or source.get('dev')
if not disk_path and CONF.libvirt.images_type == 'rbd':
disk_path = source.get('name')
if disk_path:
disk_path = 'rbd:' + disk_path
if not disk_path:
raise RuntimeError(_("Can't retrieve root device path "
"from instance libvirt configuration"))
return disk_path
def get_disk_type(path):
"""Retrieve disk type (raw, qcow2, lvm) for given file."""
if path.startswith('/dev'):
return 'lvm'
elif path.startswith('rbd:'):
return 'rbd'
return images.qemu_img_info(path).file_format
def get_fs_info(path):
"""Get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing:
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesystem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_frsize * hddinfo.f_blocks
free = hddinfo.f_frsize * hddinfo.f_bavail
used = hddinfo.f_frsize * (hddinfo.f_blocks - hddinfo.f_bfree)
return {'total': total,
'free': free,
'used': used}
def fetch_image(context, target, image_id, user_id, project_id, max_size=0):
"""Grab image."""
images.fetch_to_raw(context, image_id, target, user_id, project_id,
max_size=max_size)
def get_instance_path(instance, forceold=False, relative=False):
"""Determine the correct path for instance storage.
This method determines the directory name for instance storage, while
handling the fact that we changed the naming style to something more
unique in the grizzly release.
:param instance: the instance we want a path for
:param forceold: force the use of the pre-grizzly format
:param relative: if True, just the relative path is returned
:returns: a path to store information about that instance
"""
pre_grizzly_name = os.path.join(CONF.instances_path, instance['name'])
if forceold or os.path.exists(pre_grizzly_name):
if relative:
return instance['name']
return pre_grizzly_name
if relative:
return instance['uuid']
return os.path.join(CONF.instances_path, instance['uuid'])
def get_arch(image_meta):
"""Determine the architecture of the guest (or host).
This method determines the CPU architecture that must be supported by
the hypervisor. It gets the (guest) arch info from image_meta properties,
and it will fallback to the nova-compute (host) arch if no architecture
info is provided in image_meta.
:param image_meta: the metadata associated with the instance image
:returns: guest (or host) architecture
"""
if image_meta:
arch = image_meta.get('properties', {}).get('architecture')
if arch is not None:
return arch
return platform.processor()
def is_mounted(mount_path, source=None):
"""Check if the given source is mounted at given destination point."""
try:
check_cmd = ['findmnt', '--target', mount_path]
if source:
check_cmd.extend(['--source', source])
utils.execute(*check_cmd)
return True
except processutils.ProcessExecutionError as exc:
return False
except OSError as exc:
#info since it's not required to have this tool.
if exc.errno == errno.ENOENT:
LOG.info(_LI("findmnt tool is not installed"))
return False
| 32.468045
| 78
| 0.634111
|
2b47a6dee3d34acd702ffeff9b13940608c41b21
| 3,699
|
py
|
Python
|
Desktop/cs61a/lab/lab10/reader.py
|
cpvb13/cal-hack-5-proj
|
13e31fff3f56b57030c34147b04cef1d6309c62b
|
[
"MIT"
] | 6
|
2018-09-01T15:11:11.000Z
|
2022-03-23T00:34:31.000Z
|
Desktop/cs61a/lab/lab10/reader.py
|
cpvb13/cal-hack-5-proj
|
13e31fff3f56b57030c34147b04cef1d6309c62b
|
[
"MIT"
] | null | null | null |
Desktop/cs61a/lab/lab10/reader.py
|
cpvb13/cal-hack-5-proj
|
13e31fff3f56b57030c34147b04cef1d6309c62b
|
[
"MIT"
] | 3
|
2020-07-25T22:03:58.000Z
|
2022-01-05T18:54:52.000Z
|
import string
from buffer import Buffer
from expr import *
SYMBOL_STARTS = set(string.ascii_lowercase + string.ascii_uppercase + '_')
SYMBOL_INNERS = SYMBOL_STARTS | set(string.digits)
NUMERAL = set(string.digits + '-.')
WHITESPACE = set(' \t\n\r')
DELIMITERS = set('(),:')
def read(s):
"""Parse an expression from a string. If the string does not contain an
expression, None is returned. If the string cannot be parsed, a SyntaxError
is raised.
>>> read('lambda f: f(0)')
LambdaExpr(['f'], CallExpr(Name('f'), [Literal(0)]))
>>> read('(lambda x: x)(5)')
CallExpr(LambdaExpr(['x'], Name('x')), [Literal(5)])
>>> read('(lambda: 5)()')
CallExpr(LambdaExpr([], Literal(5)), [])
>>> read('lambda x y: 10')
Traceback (most recent call last):
...
SyntaxError: expected ':' but got 'y'
>>> read(' ') # returns None
"""
src = Buffer(tokenize(s))
if src.current() is not None:
return read_expr(src)
###########
## Lexer ##
###########
def tokenize(s):
"""Splits the string s into tokens and returns a list of them.
>>> tokenize('lambda f: f(0, 4.2)')
['lambda', 'f', ':', 'f', '(', 0, ',', 4.2, ')']
"""
src = Buffer(s)
tokens = []
while True:
token = next_token(src)
if token is None:
return tokens
tokens.append(token)
def take(src, allowed_characters):
result = ''
while src.current() in allowed_characters:
result += src.remove_front()
return result
def next_token(src):
take(src, WHITESPACE) # skip whitespace
c = src.current()
if c is None:
return None
elif c in NUMERAL:
literal = take(src, NUMERAL)
try:
return int(literal)
except ValueError:
try:
return float(literal)
except ValueError:
raise SyntaxError("'{}' is not a numeral".format(literal))
elif c in SYMBOL_STARTS:
return take(src, SYMBOL_INNERS)
elif c in DELIMITERS:
src.remove_front()
return c
else:
raise SyntaxError("'{}' is not a token".format(c))
def is_literal(s):
return isinstance(s, int) or isinstance(s, float)
def is_name(s):
return isinstance(s, str) and s not in DELIMITERS and s != 'lambda'
############
## Parser ##
############
def read_expr(src):
token = src.remove_front()
if token is None:
raise SyntaxError('Incomplete expression')
elif is_literal(token):
return read_call_expr(src, Literal(token))
elif is_name(token):
return read_call_expr(src, Name(token))
elif token == 'lambda':
params = read_comma_separated(src, read_param)
src.expect(':')
body = read_expr(src)
return LambdaExpr(params, body)
elif token == '(':
inner_expr = read_expr(src)
src.expect(')')
return read_call_expr(src, inner_expr)
else:
raise SyntaxError("'{}' is not the start of an expression".format(token))
def read_comma_separated(src, reader):
if src.current() in (':', ')'):
return []
else:
s = [reader(src)]
while src.current() == ',':
src.remove_front()
s.append(reader(src))
return s
def read_call_expr(src, operator):
while src.current() == '(':
src.remove_front()
operands = read_comma_separated(src, read_expr)
src.expect(')')
operator = CallExpr(operator, operands)
return operator
def read_param(src):
token = src.remove_front()
if is_name(token):
return token
else:
raise SyntaxError("Expected parameter name but got '{}'".format(token))
| 28.453846
| 81
| 0.583401
|
d0877eb244ce75015a69bff9c7ebf32d79f3262d
| 1,008
|
py
|
Python
|
2018/codemotion/cnd/demo/vote/app.py
|
pchico83/talks
|
8335e8740e764a4e45c443597b70bca684ba0238
|
[
"Apache-2.0"
] | null | null | null |
2018/codemotion/cnd/demo/vote/app.py
|
pchico83/talks
|
8335e8740e764a4e45c443597b70bca684ba0238
|
[
"Apache-2.0"
] | null | null | null |
2018/codemotion/cnd/demo/vote/app.py
|
pchico83/talks
|
8335e8740e764a4e45c443597b70bca684ba0238
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, render_template, request, make_response, g
from redis import Redis
import os
import socket
import random
import json
option_a = os.getenv('OPTION_A', "AAA")
option_b = os.getenv('OPTION_B', "Dogs")
hostname = socket.gethostname()
app = Flask(__name__)
def get_redis():
if not hasattr(g, 'redis'):
g.redis = Redis(host="redis", db=0, socket_timeout=5)
return g.redis
@app.route("/", methods=['POST','GET'])
def hello():
voter_id = hex(random.getrandbits(64))[2:-1]
vote = None
if request.method == 'POST':
redis = get_redis()
vote = request.form['vote']
data = json.dumps({'voter_id': voter_id, 'vote': vote})
redis.rpush('votes', data)
resp = make_response(render_template(
'index.html',
option_a=option_a,
option_b=option_b,
hostname=hostname,
vote=vote,
))
return resp
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, debug=True, threaded=True)
| 24
| 67
| 0.632937
|
5c4dfd3d25d55e3ed71a388ba4c9a33ce854e381
| 684
|
py
|
Python
|
8ball.py
|
cccepc/dpr228
|
175613d086d2c544d6bee1e3482294326979f9ae
|
[
"Apache-2.0"
] | null | null | null |
8ball.py
|
cccepc/dpr228
|
175613d086d2c544d6bee1e3482294326979f9ae
|
[
"Apache-2.0"
] | null | null | null |
8ball.py
|
cccepc/dpr228
|
175613d086d2c544d6bee1e3482294326979f9ae
|
[
"Apache-2.0"
] | null | null | null |
import random
def getAnswer(answerNumber):
if answerNumber == 1:
return 'It is certain'
elif answerNumber == 2:
return 'It is decidedly so'
elif answerNumber == 3:
return 'Yes'
elif answerNumber == 4:
return 'Reply hazy try again'
elif answerNumber == 5:
return 'Ask again later'
elif answerNumber == 6:
return 'Concentrate and ask again'
elif answerNumber == 7:
return 'My reply is no'
elif answerNumber ==8:
return 'Outlook not so good'
elif answerNumber == 9:
return 'Very doubtful'
r = random.randint(1, 9)
fortune = getAnswer(r)
print(fortune)
| 26.307692
| 43
| 0.593567
|
8ca93981a5ebe08e6349e061d96013b8fae414f8
| 961
|
py
|
Python
|
isi_sdk_9_0_0/test/test_dedupe_settings_settings.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_9_0_0/test/test_dedupe_settings_settings.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_9_0_0/test/test_dedupe_settings_settings.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_9_0_0
from isi_sdk_9_0_0.models.dedupe_settings_settings import DedupeSettingsSettings # noqa: E501
from isi_sdk_9_0_0.rest import ApiException
class TestDedupeSettingsSettings(unittest.TestCase):
"""DedupeSettingsSettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDedupeSettingsSettings(self):
"""Test DedupeSettingsSettings"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_9_0_0.models.dedupe_settings_settings.DedupeSettingsSettings() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.439024
| 102
| 0.723205
|
2aec9d11c3117c74123550aab275ffd0a5c9dac5
| 1,879
|
py
|
Python
|
examples/gallery/embellishments/colorbars_multiple.py
|
arleaman/pygmt
|
7f53b9dae66fae3f0cd91c7feb92ef53bf7d9f42
|
[
"BSD-3-Clause"
] | 1
|
2021-11-16T01:29:59.000Z
|
2021-11-16T01:29:59.000Z
|
examples/gallery/embellishments/colorbars_multiple.py
|
PeiyanXi/pygmt
|
1b74259a0346f45ff4e42244185450d0e70cc2ff
|
[
"BSD-3-Clause"
] | 18
|
2021-11-02T21:16:06.000Z
|
2022-03-22T21:15:40.000Z
|
examples/gallery/embellishments/colorbars_multiple.py
|
geodeepak/Pygmt
|
77949bba289102d3077cfa9b7fda26f74ef6aed0
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Multiple colormaps
------------------
This gallery example shows how to create multiple colormaps for different
subplots. To better understand how GMT modern mode maintains several levels of
colormaps, please refer to
:gmt-docs:`cookbook/features.html#gmt-modern-mode-hierarchical-levels` for
details.
"""
import pygmt
fig = pygmt.Figure()
# Load Earth relief data for the entire globe and a subset region
grid_globe = pygmt.datasets.load_earth_relief(resolution="01d")
subset_region = [-14, 30, 35, 60]
grid_subset = pygmt.datasets.load_earth_relief(resolution="10m", region=subset_region)
# Define a 1-row, 2-column subplot layout. The overall figure dimensions is set
# to be 15 cm wide and 8 cm high. Each subplot is automatically labelled.
# The space between the subplots is set to be 0.5 cm.
with fig.subplot(
nrows=1, ncols=2, figsize=("15c", "8c"), autolabel=True, margins="0.5c"
):
# Activate the first panel so that the colormap created by the makecpt
# method is a panel-level CPT
with fig.set_panel(panel=0):
pygmt.makecpt(cmap="geo", series=[-8000, 8000])
# "R?" means Winkel Tripel projection with map width automatically
# determined from the subplot width.
fig.grdimage(grid=grid_globe, projection="R?", region="g", frame=True)
fig.colorbar(frame=["a4000f2000", "x+lElevation", "y+lm"])
# Activate the second panel so that the colormap created by the makecpt
# method is a panel-level CPT
with fig.set_panel(panel=1):
pygmt.makecpt(cmap="globe", series=[-6000, 3000])
# "M?" means Mercator projection with map width also automatically
# determined from the subplot width.
fig.grdimage(
grid=grid_subset, projection="M?", region=subset_region, frame=True
)
fig.colorbar(frame=["a2000f1000", "x+lElevation", "y+lm"])
fig.show()
| 41.755556
| 86
| 0.700905
|
e98774dc3bf7d3d1a4dbd13092bd5cc1d058e576
| 296
|
py
|
Python
|
Players/AIPlayer.py
|
Lunalulululu/Cardgame
|
97756464d8ea5ed252ae6817e6c121590c6b4130
|
[
"Apache-2.0"
] | null | null | null |
Players/AIPlayer.py
|
Lunalulululu/Cardgame
|
97756464d8ea5ed252ae6817e6c121590c6b4130
|
[
"Apache-2.0"
] | null | null | null |
Players/AIPlayer.py
|
Lunalulululu/Cardgame
|
97756464d8ea5ed252ae6817e6c121590c6b4130
|
[
"Apache-2.0"
] | null | null | null |
from OptimalDiscard import OptimalDiscard
from OptimalGrouping import OptimalGrouping
from Players.Player import Player
class AIPlayer(Player):
def do_discard(self):
OptimalDiscard(self)
def do_grouping(self):
if len(self.hand) == 10:
OptimalGrouping(self)
| 22.769231
| 43
| 0.719595
|
7daa81a0ac0e13b744a6b44d3531c130a2a4092f
| 2,598
|
py
|
Python
|
src/poetry/console/commands/publish.py
|
robin92/poetry
|
7cc684981983963dc202e1a249a4b66667b468bd
|
[
"MIT"
] | null | null | null |
src/poetry/console/commands/publish.py
|
robin92/poetry
|
7cc684981983963dc202e1a249a4b66667b468bd
|
[
"MIT"
] | null | null | null |
src/poetry/console/commands/publish.py
|
robin92/poetry
|
7cc684981983963dc202e1a249a4b66667b468bd
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from typing import Optional
from cleo.helpers import option
from poetry.console.commands.command import Command
class PublishCommand(Command):
name = "publish"
description = "Publishes a package to a remote repository."
options = [
option(
"repository", "r", "The repository to publish the package to.", flag=False
),
option("username", "u", "The username to access the repository.", flag=False),
option("password", "p", "The password to access the repository.", flag=False),
option(
"cert", None, "Certificate authority to access the repository.", flag=False
),
option(
"client-cert",
None,
"Client certificate to access the repository.",
flag=False,
),
option("build", None, "Build the package before publishing."),
option("dry-run", None, "Perform all actions except upload the package."),
]
help = """The publish command builds and uploads the package to a remote repository.
By default, it will upload to PyPI but if you pass the --repository option it will
upload to it instead.
The --repository option should match the name of a configured repository using
the config command.
"""
loggers = ["poetry.masonry.publishing.publisher"]
def handle(self) -> Optional[int]:
from poetry.publishing.publisher import Publisher
publisher = Publisher(self.poetry, self.io)
# Building package first, if told
if self.option("build"):
if publisher.files and not self.confirm(
f"There are <info>{len(publisher.files)}</info> files ready for"
" publishing. Build anyway?"
):
self.line_error("<error>Aborted!</error>")
return 1
self.call("build")
files = publisher.files
if not files:
self.line_error(
"<error>No files to publish. "
"Run poetry build first or use the --build option.</error>"
)
return 1
self.line("")
cert = Path(self.option("cert")) if self.option("cert") else None
client_cert = (
Path(self.option("client-cert")) if self.option("client-cert") else None
)
publisher.publish(
self.option("repository"),
self.option("username"),
self.option("password"),
cert,
client_cert,
self.option("dry-run"),
)
return None
| 29.862069
| 88
| 0.585835
|
9cdab62b48749bea48ad417b986b14e915053aae
| 12,168
|
py
|
Python
|
google/cloud/firestore_v1/async_client.py
|
tswast/python-firestore
|
1f44a45419a85d8646ded5f22d6cbab697761651
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/firestore_v1/async_client.py
|
tswast/python-firestore
|
1f44a45419a85d8646ded5f22d6cbab697761651
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/firestore_v1/async_client.py
|
tswast/python-firestore
|
1f44a45419a85d8646ded5f22d6cbab697761651
|
[
"Apache-2.0"
] | 1
|
2020-10-04T12:11:36.000Z
|
2020-10-04T12:11:36.000Z
|
# Copyright 2020 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google Cloud Firestore API.
This is the base from which all interactions with the API occur.
In the hierarchy of API concepts
* a :class:`~google.cloud.firestore_v1.client.Client` owns a
:class:`~google.cloud.firestore_v1.async_collection.AsyncCollectionReference`
* a :class:`~google.cloud.firestore_v1.client.Client` owns a
:class:`~google.cloud.firestore_v1.async_document.AsyncDocumentReference`
"""
from google.cloud.firestore_v1.base_client import (
BaseClient,
DEFAULT_DATABASE,
_CLIENT_INFO,
_reference_info, # type: ignore
_parse_batch_get, # type: ignore
_get_doc_mask,
_path_helper,
)
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.async_query import AsyncQuery
from google.cloud.firestore_v1.async_batch import AsyncWriteBatch
from google.cloud.firestore_v1.async_collection import AsyncCollectionReference
from google.cloud.firestore_v1.async_document import (
AsyncDocumentReference,
DocumentSnapshot,
)
from google.cloud.firestore_v1.async_transaction import AsyncTransaction
from google.cloud.firestore_v1.services.firestore import (
async_client as firestore_client,
)
from google.cloud.firestore_v1.services.firestore.transports import (
grpc_asyncio as firestore_grpc_transport,
)
from typing import Any, AsyncGenerator
class AsyncClient(BaseClient):
"""Client for interacting with Google Cloud Firestore API.
.. note::
Since the Cloud Firestore API requires the gRPC transport, no
``_http`` argument is accepted by this class.
Args:
project (Optional[str]): The project which the client acts on behalf
of. If not passed, falls back to the default inferred
from the environment.
credentials (Optional[~google.auth.credentials.Credentials]): The
OAuth2 Credentials to use for this client. If not passed, falls
back to the default inferred from the environment.
database (Optional[str]): The database name that the client targets.
For now, :attr:`DEFAULT_DATABASE` (the default value) is the
only valid database.
client_info (Optional[google.api_core.gapic_v1.client_info.ClientInfo]):
The client info used to send a user-agent string along with API
requests. If ``None``, then default info will be used. Generally,
you only need to set this if you're developing your own library
or partner tool.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
def __init__(
self,
project=None,
credentials=None,
database=DEFAULT_DATABASE,
client_info=_CLIENT_INFO,
client_options=None,
) -> None:
super(AsyncClient, self).__init__(
project=project,
credentials=credentials,
database=database,
client_info=client_info,
client_options=client_options,
)
@property
def _firestore_api(self):
"""Lazy-loading getter GAPIC Firestore API.
Returns:
:class:`~google.cloud.gapic.firestore.v1`.async_firestore_client.FirestoreAsyncClient:
The GAPIC client with the credentials of the current client.
"""
return self._firestore_api_helper(
firestore_grpc_transport.FirestoreGrpcAsyncIOTransport,
firestore_client.FirestoreAsyncClient,
firestore_client,
)
@property
def _target(self):
"""Return the target (where the API is).
Eg. "firestore.googleapis.com"
Returns:
str: The location of the API.
"""
return self._target_helper(firestore_client.FirestoreAsyncClient)
def collection(self, *collection_path) -> AsyncCollectionReference:
"""Get a reference to a collection.
For a top-level collection:
.. code-block:: python
>>> client.collection('top')
For a sub-collection:
.. code-block:: python
>>> client.collection('mydocs/doc/subcol')
>>> # is the same as
>>> client.collection('mydocs', 'doc', 'subcol')
Sub-collections can be nested deeper in a similar fashion.
Args:
collection_path (Tuple[str, ...]): Can either be
* A single ``/``-delimited path to a collection
* A tuple of collection path segments
Returns:
:class:`~google.cloud.firestore_v1.async_collection.AsyncCollectionReference`:
A reference to a collection in the Firestore database.
"""
return AsyncCollectionReference(*_path_helper(collection_path), client=self)
def collection_group(self, collection_id) -> AsyncQuery:
"""
Creates and returns a new AsyncQuery that includes all documents in the
database that are contained in a collection or subcollection with the
given collection_id.
.. code-block:: python
>>> query = client.collection_group('mygroup')
Args:
collection_id (str) Identifies the collections to query over.
Every collection or subcollection with this ID as the last segment of its
path will be included. Cannot contain a slash.
Returns:
:class:`~google.cloud.firestore_v1.async_query.AsyncQuery`:
The created AsyncQuery.
"""
return AsyncQuery(
self._get_collection_reference(collection_id), all_descendants=True
)
def document(self, *document_path) -> AsyncDocumentReference:
"""Get a reference to a document in a collection.
For a top-level document:
.. code-block:: python
>>> client.document('collek/shun')
>>> # is the same as
>>> client.document('collek', 'shun')
For a document in a sub-collection:
.. code-block:: python
>>> client.document('mydocs/doc/subcol/child')
>>> # is the same as
>>> client.document('mydocs', 'doc', 'subcol', 'child')
Documents in sub-collections can be nested deeper in a similar fashion.
Args:
document_path (Tuple[str, ...]): Can either be
* A single ``/``-delimited path to a document
* A tuple of document path segments
Returns:
:class:`~google.cloud.firestore_v1.document.AsyncDocumentReference`:
A reference to a document in a collection.
"""
return AsyncDocumentReference(
*self._document_path_helper(*document_path), client=self
)
async def get_all(
self, references, field_paths=None, transaction=None
) -> AsyncGenerator[DocumentSnapshot, Any]:
"""Retrieve a batch of documents.
.. note::
Documents returned by this method are not guaranteed to be
returned in the same order that they are given in ``references``.
.. note::
If multiple ``references`` refer to the same document, the server
will only return one result.
See :meth:`~google.cloud.firestore_v1.client.Client.field_path` for
more information on **field paths**.
If a ``transaction`` is used and it already has write operations
added, this method cannot be used (i.e. read-after-write is not
allowed).
Args:
references (List[.AsyncDocumentReference, ...]): Iterable of document
references to be retrieved.
field_paths (Optional[Iterable[str, ...]]): An iterable of field
paths (``.``-delimited list of field names) to use as a
projection of document fields in the returned results. If
no value is provided, all fields will be returned.
transaction (Optional[:class:`~google.cloud.firestore_v1.async_transaction.AsyncTransaction`]):
An existing transaction that these ``references`` will be
retrieved in.
Yields:
.DocumentSnapshot: The next document snapshot that fulfills the
query, or :data:`None` if the document does not exist.
"""
document_paths, reference_map = _reference_info(references)
mask = _get_doc_mask(field_paths)
response_iterator = await self._firestore_api.batch_get_documents(
request={
"database": self._database_string,
"documents": document_paths,
"mask": mask,
"transaction": _helpers.get_transaction_id(transaction),
},
metadata=self._rpc_metadata,
)
async for get_doc_response in response_iterator:
yield _parse_batch_get(get_doc_response, reference_map, self)
async def collections(self) -> AsyncGenerator[AsyncCollectionReference, Any]:
"""List top-level collections of the client's database.
Returns:
Sequence[:class:`~google.cloud.firestore_v1.async_collection.AsyncCollectionReference`]:
iterator of subcollections of the current document.
"""
iterator = await self._firestore_api.list_collection_ids(
request={"parent": "{}/documents".format(self._database_string)},
metadata=self._rpc_metadata,
)
while True:
for i in iterator.collection_ids:
yield self.collection(i)
if iterator.next_page_token:
iterator = await self._firestore_api.list_collection_ids(
request={
"parent": "{}/documents".format(self._database_string),
"page_token": iterator.next_page_token,
},
metadata=self._rpc_metadata,
)
else:
return
# TODO(microgen): currently this method is rewritten to iterate/page itself.
# https://github.com/googleapis/gapic-generator-python/issues/516
# it seems the generator ought to be able to do this itself.
# iterator.client = self
# iterator.item_to_value = _item_to_collection_ref
# return iterator
def batch(self) -> AsyncWriteBatch:
"""Get a batch instance from this client.
Returns:
:class:`~google.cloud.firestore_v1.async_batch.AsyncWriteBatch`:
A "write" batch to be used for accumulating document changes and
sending the changes all at once.
"""
return AsyncWriteBatch(self)
def transaction(self, **kwargs) -> AsyncTransaction:
"""Get a transaction that uses this client.
See :class:`~google.cloud.firestore_v1.async_transaction.AsyncTransaction` for
more information on transactions and the constructor arguments.
Args:
kwargs (Dict[str, Any]): The keyword arguments (other than
``client``) to pass along to the
:class:`~google.cloud.firestore_v1.async_transaction.AsyncTransaction`
constructor.
Returns:
:class:`~google.cloud.firestore_v1.async_transaction.AsyncTransaction`:
A transaction attached to this client.
"""
return AsyncTransaction(self, **kwargs)
| 37.555556
| 107
| 0.644313
|
7e65a59da65ea9219aba1b59c7958de1b4b9109c
| 2,818
|
py
|
Python
|
quorum/quorum_algorithm.py
|
Blackjack92/distributed-decision-trees-corrution-analysis
|
fb12a29eb8b767996e1cdcbb0a73bb7f0ad8de75
|
[
"MIT"
] | null | null | null |
quorum/quorum_algorithm.py
|
Blackjack92/distributed-decision-trees-corrution-analysis
|
fb12a29eb8b767996e1cdcbb0a73bb7f0ad8de75
|
[
"MIT"
] | null | null | null |
quorum/quorum_algorithm.py
|
Blackjack92/distributed-decision-trees-corrution-analysis
|
fb12a29eb8b767996e1cdcbb0a73bb7f0ad8de75
|
[
"MIT"
] | null | null | null |
import os
import sys
import functools
import itertools
import random
scriptpath = "../"
# Add the directory containing your module to the Python path (wants absolute paths)
sys.path.append(os.path.abspath(scriptpath))
import dt_algorithm
def calculate_quorum_tree_len(quorum_tree):
return functools.reduce(lambda count, node: count + len(node), quorum_tree, 0)
def calculate_node_number_of_all_depths_for_quorum_tree(max_depth, quorums):
return dt_algorithm.calculate_node_number_of_all_depths(max_depth) + (2 * quorums)
def quorum_corruption_validator(node):
return (sum(node) / len(node)) > 0.5
def build_all_quorum_tree_combinations(max_depth, number_of_quorums):
size = dt_algorithm.calculate_node_number_of_all_depths(max_depth)
for positions in itertools.combinations(range(size), number_of_quorums):
p = [[0] for _ in range(size)]
for i in positions:
p[i] = [0, 0, 0]
yield p
def corrupt_node_for_quorum_tree(quorum_tree, positions):
size = calculate_quorum_tree_len(quorum_tree)
currentPosition = 0
for i, node in enumerate(quorum_tree):
for j, subnode in enumerate(node):
if currentPosition in positions:
quorum_tree[i][j] = 1
currentPosition += 1
def build_all_corrupted_tree_combinations_for_quorum_tree(quorum_tree, number_of_corrupted_nodes):
size = calculate_quorum_tree_len(quorum_tree)
for positions in itertools.combinations(range(size), number_of_corrupted_nodes):
cp = [x[:] for x in quorum_tree]
corrupt_node_for_quorum_tree(cp, positions)
yield cp
def build_all_corrupted_quorum_tree_combinations(max_depth, number_of_quorums, number_of_corrupted_nodes):
quorum_tree_combinations = build_all_quorum_tree_combinations(max_depth, number_of_quorums)
for combination in quorum_tree_combinations:
corrupted_combinations = build_all_corrupted_tree_combinations_for_quorum_tree(combination, number_of_corrupted_nodes)
for corrupted_combination in corrupted_combinations:
yield corrupted_combination
def build_random_corrupted_quorum_tree_combinations(max_depth, number_of_quorums, number_of_corrupted_nodes, iterations):
size = dt_algorithm.calculate_node_number_of_all_depths(max_depth)
for i in range(iterations):
p = [[0] for _ in range(size)]
# Set quorum at random position
for quorum_position in random.sample(range(size), number_of_quorums):
p[quorum_position] = [0, 0, 0]
# Set corrupted nodes at random positions
overall_size = calculate_node_number_of_all_depths_for_quorum_tree(max_depth, number_of_quorums)
corrupt_node_for_quorum_tree(p, list(random.sample(range(overall_size), number_of_corrupted_nodes)))
yield p
| 40.257143
| 126
| 0.75763
|
a5f8a33d1408ca06ddf63d241f325859f87bc8e4
| 291
|
py
|
Python
|
1066.py
|
heltonricardo/URI
|
160cca22d94aa667177c9ebf2a1c9864c5e55b41
|
[
"MIT"
] | 6
|
2021-04-13T00:33:43.000Z
|
2022-02-10T10:23:59.000Z
|
1066.py
|
heltonricardo/URI
|
160cca22d94aa667177c9ebf2a1c9864c5e55b41
|
[
"MIT"
] | null | null | null |
1066.py
|
heltonricardo/URI
|
160cca22d94aa667177c9ebf2a1c9864c5e55b41
|
[
"MIT"
] | 3
|
2021-03-23T18:42:24.000Z
|
2022-02-10T10:24:07.000Z
|
pa = im = po = ne = 0
for i in range(5):
n = int(input())
if n % 2 == 0: pa += 1
else: im += 1
if n > 0: po += 1
elif n < 0: ne += 1
print(pa , 'valor(es) par(es)')
print(im , 'valor(es) impar(es)')
print(po , 'valor(es) positivo(s)')
print(ne , 'valor(es) negativo(s)')
| 24.25
| 35
| 0.505155
|
45e46b8e066aa7f46a3d3a93ad1781a92a5ad9d6
| 5,615
|
py
|
Python
|
zvt/recorders/eastmoney/meta/china_stock_category_recorder.py
|
doncat99/zvt
|
831183bdf7a6d0fc3acd3ea51984df590078eec6
|
[
"MIT"
] | 10
|
2020-08-08T04:43:00.000Z
|
2021-07-23T05:38:11.000Z
|
zvt/recorders/eastmoney/meta/china_stock_category_recorder.py
|
doncat99/zvt
|
831183bdf7a6d0fc3acd3ea51984df590078eec6
|
[
"MIT"
] | 1
|
2021-08-14T12:19:18.000Z
|
2021-09-30T06:44:04.000Z
|
zvt/recorders/eastmoney/meta/china_stock_category_recorder.py
|
doncat99/zvt
|
831183bdf7a6d0fc3acd3ea51984df590078eec6
|
[
"MIT"
] | 1
|
2021-12-16T01:57:37.000Z
|
2021-12-16T01:57:37.000Z
|
# -*- coding: utf-8 -*-
import pandas as pd
from numba import njit
from zvt import zvt_config
from zvt.api.data_type import Region, Provider, EntityType
from zvt.api.quote import china_stock_code_to_id
from zvt.domain import BlockStock, BlockCategory, Block
from zvt.contract.api import df_to_db
from zvt.contract.recorder import RecorderForEntities, TimeSeriesDataRecorder
from zvt.networking.request import sync_get
from zvt.utils.time_utils import now_pd_timestamp, PD_TIME_FORMAT_DAY
from zvt.utils.utils import json_callback_param
class EastmoneyChinaBlockRecorder(RecorderForEntities):
provider = Provider.EastMoney
data_schema = Block
region = Region.CHN
# 用于抓取行业/概念/地域列表
category_map_url = {
BlockCategory.industry: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKHY&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=200&lvl=&cb=jsonp_F1A61014DE5E45B7A50068EA290BC918&token=4f1862fc3b5e77c150a2b985b12db0fd&_=08766',
BlockCategory.concept: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKGN&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=300&lvl=&cb=jsonp_3071689CC1E6486A80027D69E8B33F26&token=4f1862fc3b5e77c150a2b985b12db0fd&_=08251',
# BlockCategory.area: 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C._BKDY&sty=DCRRBKCPAL&st=(ChangePercent)&sr=-1&p=1&ps=200&lvl=&cb=jsonp_A597D4867B3D4659A203AADE5B3B3AD5&token=4f1862fc3b5e77c150a2b985b12db0fd&_=02443'
}
def init_entities(self):
self.entities = [(category, url) for category, url in self.category_map_url.items()]
def process_loop(self, entity, http_session):
category, url = entity
text = sync_get(http_session, url, return_type='text')
if text is None:
return
results = json_callback_param(text)
@njit(nopython=True)
def numba_boost_up(results):
the_list = []
for result in results:
items = result.split(',')
code = items[1]
name = items[2]
entity_id = f'block_cn_{code}'
the_list.append({
'id': entity_id,
'entity_id': entity_id,
'entity_type': EntityType.Block.value,
'exchange': 'cn',
'code': code,
'name': name,
'category': category.value
})
return the_list
the_list = numba_boost_up(results)
if the_list:
df = pd.DataFrame.from_records(the_list)
df_to_db(df=df, ref_df=None, region=Region.CHN, data_schema=self.data_schema, provider=self.provider)
self.logger.info(f"finish record sina blocks:{category.value}")
class EastmoneyChinaBlockStockRecorder(TimeSeriesDataRecorder):
region = Region.CHN
provider = Provider.EastMoney
entity_schema = Block
data_schema = BlockStock
# 用于抓取行业包含的股票
category_stocks_url = 'https://nufm.dfcfw.com/EM_Finance2014NumericApplication/JS.aspx?type=CT&cmd=C.{}{}&sty=SFCOO&st=(Close)&sr=-1&p=1&ps=300&cb=jsonp_B66B5BAA1C1B47B5BB9778045845B947&token=7bc05d0d4c3c22ef9fca8c2a912d779c'
def __init__(self, exchanges=None, entity_ids=None, codes=None, batch_size=10, force_update=False, sleeping_time=5,
default_size=zvt_config['batch_size'], real_time=False, fix_duplicate_way='add',
start_timestamp=None, end_timestamp=None, close_hour=0, close_minute=0) -> None:
super().__init__(EntityType.Block, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute)
def generate_domain_id(self, entity, df, time_fmt=PD_TIME_FORMAT_DAY):
return entity.id + '_' + df['stock_id']
def record(self, entity, start, end, size, timestamps, http_session):
url = self.category_stocks_url.format(entity.code, '1')
text = sync_get(http_session, url, return_type='text')
if text is None:
return None
results = json_callback_param(text)
# @njit(nopython=True)
def numba_boost_up(results):
the_list = []
for result in results:
items = result.split(',')
stock_code = items[1]
stock_id = china_stock_code_to_id(stock_code)
the_list.append({
'stock_id': stock_id,
'stock_code': stock_code,
'stock_name': items[2],
})
return the_list
the_list = numba_boost_up(results)
if the_list:
df = pd.DataFrame.from_records(the_list)
return df
self.sleep()
return None
def format(self, entity, df):
df['timestamp'] = now_pd_timestamp(Region.CHN)
df['entity_id'] = entity.id
df['provider'] = self.provider.value
df['code'] = entity.code
df['name'] = entity.name
df['level'] = self.level.value
df['exchange'] = entity.exchange
df['entity_type'] = EntityType.Block.value
df['id'] = self.generate_domain_id(entity, df)
return df
__all__ = ['EastmoneyChinaBlockRecorder', 'EastmoneyChinaBlockStockRecorder']
if __name__ == '__main__':
# init_log('china_stock_category.log')
recorder = EastmoneyChinaBlockStockRecorder(codes=['BK0727'])
recorder.run()
| 40.395683
| 263
| 0.654675
|
58feb55017462e7efa4d72ed4df75abfd5f318ad
| 16,784
|
py
|
Python
|
backend/modules/watson_developer_cloud/watson_service.py
|
RaitzeR/FinnBros
|
a2d7e3e755af7bb22bb2ce779ea1f36c6bed961b
|
[
"MIT"
] | null | null | null |
backend/modules/watson_developer_cloud/watson_service.py
|
RaitzeR/FinnBros
|
a2d7e3e755af7bb22bb2ce779ea1f36c6bed961b
|
[
"MIT"
] | 10
|
2020-06-05T18:08:03.000Z
|
2022-03-11T23:19:52.000Z
|
backend/modules/watson_developer_cloud/watson_service.py
|
RaitzeR/FinnBros
|
a2d7e3e755af7bb22bb2ce779ea1f36c6bed961b
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# Copyright 2017 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json as json_import
import platform
import os
import requests
import sys
from requests.structures import CaseInsensitiveDict
import dateutil.parser as date_parser
from .iam_token_manager import IAMTokenManager
try:
from http.cookiejar import CookieJar # Python 3
except ImportError:
from cookielib import CookieJar # Python 2
from .version import __version__
BEARER = 'Bearer'
# Uncomment this to enable http debugging
# try:
# import http.client as http_client
# except ImportError:
# # Python 2
# import httplib as http_client
# http_client.HTTPConnection.debuglevel = 1
def load_from_vcap_services(service_name):
vcap_services = os.getenv("VCAP_SERVICES")
if vcap_services is not None:
services = json_import.loads(vcap_services)
if service_name in services:
return services[service_name][0]["credentials"]
else:
return None
class WatsonException(Exception):
"""
Custom exception class for Watson Services.
"""
pass
class WatsonApiException(WatsonException):
"""
Custom exception class for errors returned from Watson APIs.
:param int code: The HTTP status code returned.
:param str message: A message describing the error.
:param dict info: A dictionary of additional information about the error.
:param response httpResponse: response
"""
def __init__(self, code, message, info=None, httpResponse=None):
# Call the base class constructor with the parameters it needs
super(WatsonApiException, self).__init__(message)
self.message = message
self.code = code
self.info = info
self.httpResponse = httpResponse
self.transactionId = None
self.globalTransactionId = None
if httpResponse is not None:
self.transactionId = httpResponse.headers.get('X-DP-Watson-Tran-ID')
self.globalTransactionId = httpResponse.headers.get('X-Global-Transaction-ID')
def __str__(self):
msg = 'Error: ' + str(self.message) + ', Code: ' + str(self.code)
if self.transactionId is not None:
msg += ' , X-dp-watson-tran-id: ' + str(self.transactionId)
if self.globalTransactionId is not None:
msg += ' , X-global-transaction-id: ' + str(self.globalTransactionId)
return msg
class WatsonInvalidArgument(WatsonException):
pass
def datetime_to_string(datetime):
"""
Serializes a datetime to a string.
:param datetime: datetime value
:return: string. containing iso8601 format date string
"""
return datetime.isoformat().replace('+00:00', 'Z')
def string_to_datetime(string):
"""
Deserializes string to datetime.
:param string: string containing datetime in iso8601 format
:return: datetime.
"""
return date_parser.parse(string)
def _cleanup_param_value(value):
if isinstance(value, bool):
return 'true' if value else 'false'
return value
def _cleanup_param_values(dictionary):
if isinstance(dictionary, dict):
return dict(
[(k, _cleanup_param_value(v)) for k, v in dictionary.items()])
return dictionary
def _remove_null_values(dictionary):
if isinstance(dictionary, dict):
return dict([(k, v) for k, v in dictionary.items() if v is not None])
return dictionary
def _convert_boolean_value(value):
if isinstance(value, bool):
return 1 if value else 0
return value
def _convert_boolean_values(dictionary):
if isinstance(dictionary, dict):
return dict(
[(k, _convert_boolean_value(v)) for k, v in dictionary.items()])
return dictionary
def get_error_message(response):
"""
Gets the error message from a JSON response.
:return: the error message
:rtype: string
"""
error_message = 'Unknown error'
try:
error_json = response.json()
if 'error' in error_json:
if isinstance(error_json['error'], dict) and 'description' in \
error_json['error']:
error_message = error_json['error']['description']
else:
error_message = error_json['error']
elif 'error_message' in error_json:
error_message = error_json['error_message']
elif 'errorMessage' in error_json:
error_message = error_json['errorMessage']
elif 'msg' in error_json:
error_message = error_json['msg']
elif 'statusInfo' in error_json:
error_message = error_json['statusInfo']
return error_message
except:
return response.text or error_message
class DetailedResponse(object):
"""
Custom class for detailed response returned from Watson APIs.
:param Response response: Either json response or http Response as requested.
:param dict headers: A dict of response headers
"""
def __init__(self, response=None, headers=None):
self.result = response
self.headers = headers
def get_result(self):
return self.result
def get_headers(self):
return self.headers
def _to_dict(self):
_dict = {}
if hasattr(self, 'result') and self.result is not None:
_dict['result'] = self.result if isinstance(self.result, dict) else 'HTTP response'
if hasattr(self, 'headers') and self.headers is not None:
_dict['headers'] = self.headers
return _dict
def __str__(self):
return json_import.dumps(self._to_dict(), indent=2, default=lambda o: o.__dict__)
class WatsonService(object):
def __init__(self, vcap_services_name, url, username=None, password=None,
use_vcap_services=True, api_key=None,
x_watson_learning_opt_out=False,
iam_api_key=None, iam_access_token=None, iam_url=None):
"""
Loads credentials from the VCAP_SERVICES environment variable if
available, preferring credentials explicitly
set in the request.
If VCAP_SERVICES is not found (or use_vcap_services is set to False),
username and password credentials must
be specified.
"""
self.url = url
self.jar = None
self.api_key = None
self.username = None
self.password = None
self.default_headers = None
self.http_config = {}
self.detailed_response = False
self.iam_api_key = None
self.iam_access_token = None
self.iam_url = None
self.token_manager = None
user_agent_string = 'watson-apis-python-sdk-' + __version__ # SDK version
user_agent_string += ' ' + platform.system() # OS
user_agent_string += ' ' + platform.release() # OS version
user_agent_string += ' ' + platform.python_version() # Python version
self.user_agent_header = {'user-agent': user_agent_string}
if x_watson_learning_opt_out:
self.default_headers = {'x-watson-learning-opt-out': 'true'}
if api_key is not None:
self.set_api_key(api_key)
elif username is not None and password is not None:
self.set_username_and_password(username, password)
elif iam_access_token is not None or iam_api_key is not None:
self.set_token_manager(iam_api_key, iam_access_token, iam_url)
if use_vcap_services and not self.username and not self.api_key:
self.vcap_service_credentials = load_from_vcap_services(
vcap_services_name)
if self.vcap_service_credentials is not None and isinstance(
self.vcap_service_credentials, dict):
self.url = self.vcap_service_credentials['url']
if 'username' in self.vcap_service_credentials:
self.username = self.vcap_service_credentials['username']
if 'password' in self.vcap_service_credentials:
self.password = self.vcap_service_credentials['password']
if 'apikey' in self.vcap_service_credentials:
self.api_key = self.vcap_service_credentials['apikey']
if 'api_key' in self.vcap_service_credentials:
self.api_key = self.vcap_service_credentials['api_key']
if ('iam_api_key' or 'apikey') in self.vcap_service_credentials:
self.iam_api_key = self.vcap_service_credentials.get('iam_api_key') or self.vcap_service_credentials.get('apikey')
if 'iam_access_token' in self.vcap_service_credentials:
self.iam_access_token = self.vcap_service_credentials['iam_access_token']
if 'iam_url' in self.vcap_service_credentials:
self.iam_url = self.vcap_service_credentials['iam_url']
if (self.username is None or self.password is None)\
and self.api_key is None and self.token_manager is None:
raise ValueError(
'You must specify your IAM api key or username and password service '
'credentials (Note: these are different from your Bluemix id)')
def set_username_and_password(self, username=None, password=None):
if username == 'YOUR SERVICE USERNAME':
username = None
if password == 'YOUR SERVICE PASSWORD':
password = None
self.username = username
self.password = password
self.jar = CookieJar()
def set_api_key(self, api_key):
if api_key == 'YOUR API KEY':
api_key = None
self.api_key = api_key
self.jar = CookieJar()
def set_token_manager(self, iam_api_key, iam_access_token, iam_url):
if iam_api_key == 'YOUR IAM API KEY':
iam_api_key = None
self.iam_api_key = iam_api_key
self.iam_access_token = iam_access_token
self.iam_url = iam_url
self.token_manager = IAMTokenManager(iam_api_key, iam_access_token, iam_url)
self.jar = CookieJar()
def set_iam_access_token(self, iam_access_token):
if self.token_manager:
self.token_manager.set_access_token(iam_access_token)
else:
self.token_manager = IAMTokenManager(iam_access_token=iam_access_token)
self.iam_access_token = iam_access_token
def set_url(self, url):
self.url = url
def set_default_headers(self, headers):
"""
Set http headers to be sent in every request.
:param headers: A dictionary of header names and values
"""
if isinstance(headers, dict):
self.default_headers = headers
else:
raise TypeError("headers parameter must be a dictionary")
def set_http_config(self, http_config):
"""
Sets the http client config like timeout, proxies, etc.
"""
if isinstance(http_config, dict):
self.http_config = http_config
else:
raise TypeError("http_config parameter must be a dictionary")
def set_detailed_response(self, detailed_response):
self.detailed_response = detailed_response
# Could make this compute the label_id based on the variable name of the
# dictionary passed in (using **kwargs), but
# this might be confusing to understand.
@staticmethod
def unpack_id(dictionary, label_id):
if isinstance(dictionary, dict) and label_id in dictionary:
return dictionary[label_id]
return dictionary
@staticmethod
def _convert_model(val, classname=None):
if classname is not None and not hasattr(val, "_from_dict"):
if isinstance(val, str):
val = json_import.loads(val)
val = classname._from_dict(dict(val))
if hasattr(val, "_to_dict"):
return val._to_dict()
return val
@staticmethod
def _convert_list(val):
if isinstance(val, list):
return ",".join(val)
return val
@staticmethod
def _encode_path_vars(*args):
return (requests.utils.quote(x, safe='') for x in args)
@staticmethod
def _get_error_info(response):
"""
Gets the error info (if any) from a JSON response.
:return: A `dict` containing additional information about the error.
:rtype: dict
"""
info_keys = ['code_description', 'description', 'errors', 'help',
'sub_code', 'warnings']
error_info = {}
try:
error_json = response.json()
error_info = {k:v for k, v in error_json.items() if k in info_keys}
except:
pass
return error_info if any(error_info) else None
def request(self, method, url, accept_json=False, headers=None,
params=None, json=None, data=None, files=None, **kwargs):
full_url = self.url + url
input_headers = _remove_null_values(headers) if headers else {}
headers = CaseInsensitiveDict(self.user_agent_header)
if self.default_headers is not None:
headers.update(self.default_headers)
if accept_json:
headers['accept'] = 'application/json'
headers.update(input_headers)
# Remove keys with None values
params = _remove_null_values(params)
params = _cleanup_param_values(params)
json = _remove_null_values(json)
data = _remove_null_values(data)
files = _remove_null_values(files)
if sys.version_info >= (3, 0) and isinstance(data, str):
data = data.encode('utf-8')
# Support versions of requests older than 2.4.2 without the json input
if not data and json is not None:
data = json_import.dumps(json)
headers.update({'content-type': 'application/json'})
auth = None
if self.token_manager:
access_token = self.token_manager.get_token()
headers['Authorization'] = '{0} {1}'.format(BEARER, access_token)
if self.username and self.password:
auth = (self.username, self.password)
if self.api_key is not None:
if params is None:
params = {}
if full_url.startswith(
'https://gateway-a.watsonplatform.net/calls'):
params['apikey'] = self.api_key
else:
params['api_key'] = self.api_key
kwargs = dict(kwargs, **self.http_config)
response = requests.request(method=method, url=full_url,
cookies=self.jar, auth=auth,
headers=headers,
params=params, data=data, files=files,
**kwargs)
if 200 <= response.status_code <= 299:
if response.status_code == 204:
return None
if accept_json:
response_json = response.json()
if 'status' in response_json and response_json['status'] \
== 'ERROR':
status_code = 400
error_message = 'Unknown error'
if 'statusInfo' in response_json:
error_message = response_json['statusInfo']
if error_message == 'invalid-api-key':
status_code = 401
raise WatsonApiException(status_code, error_message, httpResponse=response)
return DetailedResponse(response_json, response.headers) if self.detailed_response else response_json
return DetailedResponse(response, response.headers) if self.detailed_response else response
else:
if response.status_code == 401:
error_message = 'Unauthorized: Access is denied due to ' \
'invalid credentials '
else:
error_message = get_error_message(response)
error_info = self._get_error_info(response)
raise WatsonApiException(response.status_code, error_message,
info=error_info, httpResponse=response)
| 37.380846
| 134
| 0.633878
|
e12825b9a5af5d59a3a50a92c8c06b6e0e080535
| 575
|
py
|
Python
|
deepbgc/output/pfam_tsv.py
|
gkapatai/deepbgc
|
977fa56972a38b9405725315f566a939d5d21759
|
[
"MIT"
] | 60
|
2019-02-01T14:40:32.000Z
|
2022-03-10T14:15:01.000Z
|
deepbgc/output/pfam_tsv.py
|
gkapatai/deepbgc
|
977fa56972a38b9405725315f566a939d5d21759
|
[
"MIT"
] | 43
|
2019-01-31T17:17:47.000Z
|
2022-03-22T21:14:43.000Z
|
deepbgc/output/pfam_tsv.py
|
gkapatai/deepbgc
|
977fa56972a38b9405725315f566a939d5d21759
|
[
"MIT"
] | 24
|
2019-01-14T19:12:16.000Z
|
2021-11-02T08:32:02.000Z
|
import logging
from deepbgc import util
from deepbgc.output.writer import TSVWriter
class PfamTSVWriter(TSVWriter):
@classmethod
def get_description(cls):
return 'Table of Pfam domains (pfam_id) from given sequence (sequence_id) in genomic order, with BGC detection scores'
@classmethod
def get_name(cls):
return 'pfam-tsv'
def record_to_df(self, record):
df = util.create_pfam_dataframe(record, add_scores=True, add_in_cluster=True)
logging.debug('Writing %s Pfams to: %s', len(df), self.out_path)
return df
| 27.380952
| 126
| 0.707826
|
5dd1ccf8332fc9a9227f9a245c243895e11fd837
| 39
|
py
|
Python
|
QUANTAXIS_Trade/QA_status_center/__init__.py
|
xiongyixiaoyang/QUANTAXIS
|
08441ce711e55385e2b01f80df17d34e7e89f564
|
[
"MIT"
] | 92
|
2017-03-22T07:27:21.000Z
|
2021-04-04T06:59:26.000Z
|
QUANTAXIS_Trade/QA_status_center/__init__.py
|
xiongyixiaoyang/QUANTAXIS
|
08441ce711e55385e2b01f80df17d34e7e89f564
|
[
"MIT"
] | 2
|
2017-12-27T02:34:32.000Z
|
2018-04-18T02:50:13.000Z
|
QUANTAXIS_Trade/QA_status_center/__init__.py
|
xiongyixiaoyang/QUANTAXIS
|
08441ce711e55385e2b01f80df17d34e7e89f564
|
[
"MIT"
] | 7
|
2017-03-22T07:27:25.000Z
|
2020-04-28T08:44:03.000Z
|
#coding:utf-8
"""
状态维护中心,确定订单状态等等
"""
| 6.5
| 15
| 0.615385
|
0990e38f2a1cf6f0d6f25d16cd680e2208a55f3e
| 11,696
|
py
|
Python
|
external/vcm/vcm/cubedsphere/regridz.py
|
jacnugent/fv3net
|
84958651bdd17784fdab98f87ad0d65414c03368
|
[
"MIT"
] | null | null | null |
external/vcm/vcm/cubedsphere/regridz.py
|
jacnugent/fv3net
|
84958651bdd17784fdab98f87ad0d65414c03368
|
[
"MIT"
] | null | null | null |
external/vcm/vcm/cubedsphere/regridz.py
|
jacnugent/fv3net
|
84958651bdd17784fdab98f87ad0d65414c03368
|
[
"MIT"
] | null | null | null |
import dask
import numpy as np
import xarray as xr
from typing import Tuple, Union
import vcm.mappm
from ..calc.thermo import pressure_at_interface
from ..cubedsphere import edge_weighted_block_average, weighted_block_average
from ..cubedsphere.coarsen import block_upsample_like
from ..cubedsphere.constants import (
RESTART_Z_CENTER,
RESTART_Z_OUTER,
FV_CORE_X_CENTER,
FV_CORE_X_OUTER,
FV_CORE_Y_CENTER,
FV_CORE_Y_OUTER,
)
from .xgcm import create_fv3_grid
def regrid_to_area_weighted_pressure(
ds: xr.Dataset,
delp: xr.DataArray,
area: xr.DataArray,
coarsening_factor: int,
x_dim: str = FV_CORE_X_CENTER,
y_dim: str = FV_CORE_Y_CENTER,
z_dim: str = RESTART_Z_CENTER,
) -> Union[xr.Dataset, xr.DataArray]:
""" Vertically regrid a dataset of cell-centered quantities to coarsened
pressure levels.
Args:
ds: input Dataset
delp: pressure thicknesses
area: area weights
coarsening_factor: coarsening-factor for pressure levels
x_dim (optional): x-dimension name. Defaults to "xaxis_1"
y_dim (optional): y-dimension name. Defaults to "yaxis_2"
z_dim (optional): z-dimension name. Defaults to "zaxis_1"
Returns:
tuple of regridded input Dataset and area masked wherever coarse
pressure bottom interfaces are below fine surface pressure
"""
delp_coarse = weighted_block_average(
delp, area, coarsening_factor, x_dim=x_dim, y_dim=y_dim
)
return _regrid_given_delp(
ds, delp, delp_coarse, area, x_dim=x_dim, y_dim=y_dim, z_dim=z_dim
)
def regrid_to_edge_weighted_pressure(
ds: xr.Dataset,
delp: xr.DataArray,
length: xr.DataArray,
coarsening_factor: int,
x_dim: str = FV_CORE_X_CENTER,
y_dim: str = FV_CORE_Y_OUTER,
z_dim: str = RESTART_Z_CENTER,
edge: str = "x",
) -> Union[xr.Dataset, xr.DataArray]:
""" Vertically regrid a dataset of edge-valued quantities to coarsened
pressure levels.
Args:
ds: input Dataset
delp: pressure thicknesses
length: edge length weights
coarsening_factor: coarsening-factor for pressure levels
x_dim (optional): x-dimension name. Defaults to "xaxis_1"
y_dim (optional): y-dimension name. Defaults to "yaxis_1"
z_dim (optional): z-dimension name. Defaults to "zaxis_1"
edge (optional): grid cell side to coarse-grain along {"x", "y"}
Returns:
tuple of regridded input Dataset and length masked wherever coarse
pressure bottom interfaces are below fine surface pressure
"""
hor_dims = {"x": x_dim, "y": y_dim}
grid = create_fv3_grid(
xr.Dataset({"delp": delp}),
x_center=FV_CORE_X_CENTER,
x_outer=FV_CORE_X_OUTER,
y_center=FV_CORE_Y_CENTER,
y_outer=FV_CORE_Y_OUTER,
)
interp_dim = "x" if edge == "y" else "y"
delp_staggered = grid.interp(delp, interp_dim).assign_coords(
{hor_dims[interp_dim]: np.arange(1, delp.sizes[hor_dims[edge]] + 2)}
)
delp_staggered_coarse = edge_weighted_block_average(
delp_staggered, length, coarsening_factor, x_dim=x_dim, y_dim=y_dim, edge=edge
)
return _regrid_given_delp(
ds,
delp_staggered,
delp_staggered_coarse,
length,
x_dim=x_dim,
y_dim=y_dim,
z_dim=z_dim,
)
def _regrid_given_delp(
ds,
delp_fine,
delp_coarse,
weights,
x_dim: str = FV_CORE_X_CENTER,
y_dim: str = FV_CORE_Y_CENTER,
z_dim: str = RESTART_Z_CENTER,
):
"""Given a fine and coarse delp, do vertical regridding to coarse pressure levels
and mask weights below fine surface pressure.
"""
delp_coarse_on_fine = block_upsample_like(
delp_coarse, delp_fine, x_dim=x_dim, y_dim=y_dim
)
phalf_coarse_on_fine = pressure_at_interface(
delp_coarse_on_fine, dim_center=z_dim, dim_outer=RESTART_Z_OUTER
)
phalf_fine = pressure_at_interface(
delp_fine, dim_center=z_dim, dim_outer=RESTART_Z_OUTER
)
ds_regrid = xr.zeros_like(ds)
for var in ds:
ds_regrid[var] = regrid_vertical(
phalf_fine, ds[var], phalf_coarse_on_fine, z_dim_center=z_dim
)
masked_weights = _mask_weights(
weights, phalf_coarse_on_fine, phalf_fine, dim_center=z_dim
)
return ds_regrid, masked_weights
def _mask_weights(
weights,
phalf_coarse_on_fine,
phalf_fine,
dim_center=RESTART_Z_CENTER,
dim_outer=RESTART_Z_OUTER,
):
return weights.where(
phalf_coarse_on_fine.isel({dim_outer: slice(1, None)}).variable
< phalf_fine.isel({dim_outer: -1}).variable,
other=0.0,
).rename({dim_outer: dim_center})
def regrid_vertical(
p_in: xr.DataArray,
f_in: xr.DataArray,
p_out: xr.DataArray,
iv: int = 1,
kord: int = 1,
z_dim_center: str = RESTART_Z_CENTER,
z_dim_outer: str = RESTART_Z_OUTER,
) -> xr.DataArray:
"""Do vertical regridding using Fortran mappm subroutine.
Args:
p_in: pressure at layer edges in original vertical coordinate
f_in: variable to be regridded, defined for layer averages
p_out: pressure at layer edges in new vertical coordinate
iv (optional): flag for monotinicity conservation method. Defaults to 1.
comments from mappm indicate that iv should be chosen depending on variable:
iv = -2: vertical velocity
iv = -1: winds
iv = 0: positive definite scalars
iv = 1: others
iv = 2: temperature
kord (optional): method number for vertical regridding. Defaults to 1.
z_dim_center (optional): name of centered z-dimension. Defaults to "zaxis_1".
z_dim_outer (optional): name of staggered z-dimension. Defaults to "zaxis_2".
Returns:
f_in regridded to p_out pressure levels
Raises:
ValueError: if the vertical dimensions for cell centers and cell edges have
the same name.
ValueError: if the number of columns in each input array does not
match.
ValueError: if the length of the vertical dimension in input field is
not one less than the length of the dimension of the input pressure
field.
"""
if z_dim_center == z_dim_outer:
raise ValueError("'z_dim_center' and 'z_dim_outer' must not be equal.")
original_dim_order = f_in.dims
dims_except_z = f_in.isel({z_dim_center: 0}).dims
# Ensure dims are in same order for all inputs, with the vertical dimension
# at the end.
p_in = p_in.transpose(*dims_except_z, z_dim_outer)
f_in = f_in.transpose(*dims_except_z, z_dim_center)
p_out = p_out.transpose(*dims_except_z, z_dim_outer)
# Rename vertical dimension in p_out temporarily to allow for it to have a
# different size than in p_in.
z_dim_outer_p_out = f"{z_dim_outer}_p_out"
p_out = p_out.rename({z_dim_outer: z_dim_outer_p_out}) # type: ignore
# Provide a temporary name for the output vertical dimension, again
# allowing for it to have a different size than the input vertical
# dimension.
z_dim_center_f_out = f"{z_dim_center}_f_out"
_assert_equal_number_of_columns(p_in, f_in, p_out)
_assert_valid_vertical_dimension_sizes(p_in, f_in, z_dim_outer, z_dim_center)
return (
xr.apply_ufunc(
_columnwise_mappm,
p_in,
f_in,
p_out,
input_core_dims=[[z_dim_outer], [z_dim_center], [z_dim_outer_p_out]],
output_core_dims=[[z_dim_center_f_out]],
dask="allowed",
kwargs={"iv": iv, "kord": kord},
)
.rename({z_dim_center_f_out: z_dim_center})
.transpose(*original_dim_order)
.assign_attrs(f_in.attrs)
)
def _columnwise_mappm(
p_in: Union[np.ndarray, dask.array.Array],
f_in: Union[np.ndarray, dask.array.Array],
p_out: Union[np.ndarray, dask.array.Array],
iv: int = 1,
kord: int = 1,
) -> Union[np.ndarray, dask.array.Array]:
"""An internal function to apply mappm along all columns. Assumes the
vertical dimension is the last dimension of each array."""
if any(isinstance(arg, dask.array.Array) for arg in [p_in, f_in, p_out]):
p_in, f_in, p_out = _adjust_chunks_for_mappm(p_in, f_in, p_out)
output_chunks = _output_chunks_for_mappm(f_in, p_out)
return dask.array.map_blocks(
_columnwise_mappm,
p_in,
f_in,
p_out,
dtype=f_in.dtype,
chunks=output_chunks,
iv=iv,
kord=kord,
)
else:
output_shape = _output_shape_for_mappm(p_out)
p_in, f_in, p_out = _reshape_for_mappm(p_in, f_in, p_out)
dummy_ptop = 0.0 # Not used by mappm, but required as an argument
n_columns = p_in.shape[0]
return vcm.mappm.mappm(
p_in, f_in, p_out, 1, n_columns, iv, kord, dummy_ptop
).reshape(output_shape)
def _adjust_chunks_for_mappm(
p_in: dask.array.Array, f_in: dask.array.Array, p_out: dask.array.Array
) -> Tuple[dask.array.Array, dask.array.Array, dask.array.Array]:
"""Adjusts the chunks of the input arguments to _columnwise_mappm.
Ensures that chunks are vertically-contiguous and that chunks across
columns are aligned for p_in, f_in, and p_out."""
# Align non-vertical chunks.
p_in_dims_tuple = tuple(range(p_in.ndim))
f_in_dims_tuple = p_in_dims_tuple[:-1] + (p_in.ndim + 1,)
p_out_dims_tuple = p_in_dims_tuple[:-1] + (p_in.ndim + 2,)
_, (p_in, f_in, p_out) = dask.array.core.unify_chunks(
p_in, p_in_dims_tuple, f_in, f_in_dims_tuple, p_out, p_out_dims_tuple
)
# Ensure vertical chunks are contiguous.
p_in = p_in.rechunk({-1: -1})
f_in = f_in.rechunk({-1: -1})
p_out = p_out.rechunk({-1: -1})
return p_in, f_in, p_out
def _output_chunks_for_mappm(
f_in: dask.array.Array, p_out: dask.array.Array
) -> Tuple[Tuple[int]]:
"""Determine the chunks of the output field of mappm applied to dask arrays."""
return f_in.chunks[:-1] + (p_out.shape[-1] - 1,)
def _output_shape_for_mappm(p_out: np.ndarray) -> Tuple[int]:
"""Calculate the shape of the expected output field of mappm."""
return p_out.shape[:-1] + (p_out.shape[-1] - 1,)
def _reshape_for_mappm(
p_in: np.ndarray, f_in: np.ndarray, p_out: np.ndarray
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Reshape input arrays to have a single 'column' dimension and a
'vertical' dimension."""
p_in = p_in.reshape((-1, p_in.shape[-1]))
f_in = f_in.reshape((-1, f_in.shape[-1]))
p_out = p_out.reshape((-1, p_out.shape[-1]))
return p_in, f_in, p_out
def _n_columns(da: xr.DataArray) -> int:
"""Determine the number of columns in a DataArray, assuming the last
dimension is the vertical dimension."""
return np.product(da.shape[:-1])
def _assert_equal_number_of_columns(
p_in: xr.DataArray, f_in: xr.DataArray, p_out: xr.DataArray
):
"""Ensure the number of columns in each of the inputs is the same."""
n_columns = _n_columns(p_in)
other_arguments = [f_in, p_out]
if any(_n_columns(da) != n_columns for da in other_arguments):
raise ValueError(
"All dimensions except vertical must be same size for p_in, f_in and p_out"
)
def _assert_valid_vertical_dimension_sizes(
p_in: xr.DataArray, f_in: xr.DataArray, z_dim_outer: str, z_dim_center: str
):
if f_in.sizes[z_dim_center] != p_in.sizes[z_dim_outer] - 1:
raise ValueError("f_in must have a vertical dimension one shorter than p_in")
| 34.501475
| 88
| 0.671939
|
3409dfa4972d6dae58406ce1cd4526ddc8d2a72b
| 29,771
|
py
|
Python
|
tensorflow/python/keras/testing_utils.py
|
ouakif/tensorflow
|
63c45aacf30e819b00e74b85bd1c9f11b0760cd3
|
[
"Apache-2.0"
] | 27
|
2020-02-29T04:13:22.000Z
|
2022-02-07T21:54:50.000Z
|
tensorflow/python/keras/testing_utils.py
|
top-on/tensorflow
|
6efce9a74d4ba2ba2182d92ac1e4f144b5d755d2
|
[
"Apache-2.0"
] | 5
|
2020-06-01T18:50:38.000Z
|
2021-07-16T07:13:52.000Z
|
tensorflow/python/keras/testing_utils.py
|
top-on/tensorflow
|
6efce9a74d4ba2ba2182d92ac1e4f144b5d755d2
|
[
"Apache-2.0"
] | 10
|
2020-12-15T03:55:24.000Z
|
2021-12-17T23:14:11.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for unit-testing Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import threading
import numpy as np
from tensorflow.python import keras
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.optimizer_v2 import adadelta as adadelta_v2
from tensorflow.python.keras.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.python.keras.optimizer_v2 import adam as adam_v2
from tensorflow.python.keras.optimizer_v2 import adamax as adamax_v2
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.keras.optimizer_v2 import nadam as nadam_v2
from tensorflow.python.keras.optimizer_v2 import rmsprop as rmsprop_v2
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def get_test_data(train_samples,
test_samples,
input_shape,
num_classes,
random_seed=None):
"""Generates test data to train a model on.
Arguments:
train_samples: Integer, how many training samples to generate.
test_samples: Integer, how many test samples to generate.
input_shape: Tuple of integers, shape of the inputs.
num_classes: Integer, number of classes for the data and targets.
random_seed: Integer, random seed used by numpy to generate data.
Returns:
A tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
if random_seed is not None:
np.random.seed(random_seed)
num_sample = train_samples + test_samples
templates = 2 * num_classes * np.random.random((num_classes,) + input_shape)
y = np.random.randint(0, num_classes, size=(num_sample,))
x = np.zeros((num_sample,) + input_shape, dtype=np.float32)
for i in range(num_sample):
x[i] = templates[y[i]] + np.random.normal(loc=0, scale=1., size=input_shape)
return ((x[:train_samples], y[:train_samples]),
(x[train_samples:], y[train_samples:]))
@test_util.use_deterministic_cudnn
def layer_test(layer_cls, kwargs=None, input_shape=None, input_dtype=None,
input_data=None, expected_output=None,
expected_output_dtype=None, expected_output_shape=None,
validate_training=True, adapt_data=None):
"""Test routine for a layer with a single input and single output.
Arguments:
layer_cls: Layer class object.
kwargs: Optional dictionary of keyword arguments for instantiating the
layer.
input_shape: Input shape tuple.
input_dtype: Data type of the input data.
input_data: Numpy array of input data.
expected_output: Numpy array of the expected output.
expected_output_dtype: Data type expected for the output.
expected_output_shape: Shape tuple for the expected shape of the output.
validate_training: Whether to attempt to validate training on this layer.
This might be set to False for non-differentiable layers that output
string or integer values.
adapt_data: Optional data for an 'adapt' call. If None, adapt() will not
be tested for this layer. This is only relevant for PreprocessingLayers.
Returns:
The output data (Numpy array) returned by the layer, for additional
checks to be done by the calling code.
Raises:
ValueError: if `input_shape is None`.
"""
if input_data is None:
if input_shape is None:
raise ValueError('input_shape is None')
if not input_dtype:
input_dtype = 'float32'
input_data_shape = list(input_shape)
for i, e in enumerate(input_data_shape):
if e is None:
input_data_shape[i] = np.random.randint(1, 4)
input_data = 10 * np.random.random(input_data_shape)
if input_dtype[:5] == 'float':
input_data -= 0.5
input_data = input_data.astype(input_dtype)
elif input_shape is None:
input_shape = input_data.shape
if input_dtype is None:
input_dtype = input_data.dtype
if expected_output_dtype is None:
expected_output_dtype = input_dtype
# instantiation
kwargs = kwargs or {}
layer = layer_cls(**kwargs)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
# test get_weights , set_weights at layer level
weights = layer.get_weights()
layer.set_weights(weights)
# test and instantiation from weights
if 'weights' in tf_inspect.getargspec(layer_cls.__init__):
kwargs['weights'] = weights
layer = layer_cls(**kwargs)
# test in functional API
x = keras.layers.Input(shape=input_shape[1:], dtype=input_dtype)
y = layer(x)
if keras.backend.dtype(y) != expected_output_dtype:
raise AssertionError('When testing layer %s, for input %s, found output '
'dtype=%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
keras.backend.dtype(y),
expected_output_dtype,
kwargs))
def assert_shapes_equal(expected, actual):
"""Asserts that the output shape from the layer matches the actual shape."""
if len(expected) != len(actual):
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
for expected_dim, actual_dim in zip(expected, actual):
if isinstance(expected_dim, tensor_shape.Dimension):
expected_dim = expected_dim.value
if isinstance(actual_dim, tensor_shape.Dimension):
actual_dim = actual_dim.value
if expected_dim is not None and expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s, for input %s, found output_shape='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual, expected, kwargs))
if expected_output_shape is not None:
assert_shapes_equal(tensor_shape.TensorShape(expected_output_shape),
y.shape)
# check shape inference
model = keras.models.Model(x, y)
computed_output_shape = tuple(
layer.compute_output_shape(
tensor_shape.TensorShape(input_shape)).as_list())
computed_output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(shape=input_shape, dtype=input_dtype))
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
assert_shapes_equal(computed_output_shape, actual_output_shape)
assert_shapes_equal(computed_output_signature.shape, actual_output_shape)
if computed_output_signature.dtype != actual_output.dtype:
raise AssertionError(
'When testing layer %s, for input %s, found output_dtype='
'%s but expected to find %s.\nFull kwargs: %s' %
(layer_cls.__name__, x, actual_output.dtype,
computed_output_signature.dtype, kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output,
rtol=1e-3, atol=1e-6)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = keras.models.Model.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3, atol=1e-6)
# test training mode (e.g. useful for dropout tests)
# Rebuild the model to avoid the graph being reused between predict() and
# See b/120160788 for more details. This should be mitigated after 2.0.
if validate_training:
model = keras.models.Model(x, layer(x))
if _thread_local_data.run_eagerly is not None:
model.compile(
'rmsprop',
'mse',
weighted_metrics=['acc'],
run_eagerly=should_run_eagerly())
else:
model.compile('rmsprop', 'mse', weighted_metrics=['acc'])
model.train_on_batch(input_data, actual_output)
# test as first layer in Sequential API
layer_config = layer.get_config()
layer_config['batch_input_shape'] = input_shape
layer = layer.__class__.from_config(layer_config)
# Test adapt, if data was passed.
if adapt_data is not None:
layer.adapt(adapt_data)
model = keras.models.Sequential()
model.add(layer)
actual_output = model.predict(input_data)
actual_output_shape = actual_output.shape
for expected_dim, actual_dim in zip(computed_output_shape,
actual_output_shape):
if expected_dim is not None:
if expected_dim != actual_dim:
raise AssertionError(
'When testing layer %s **after deserialization**, '
'for input %s, found output_shape='
'%s but expected to find inferred shape %s.\nFull kwargs: %s' %
(layer_cls.__name__,
x,
actual_output_shape,
computed_output_shape,
kwargs))
if expected_output is not None:
np.testing.assert_allclose(actual_output, expected_output,
rtol=1e-3, atol=1e-6)
# test serialization, weight setting at model level
model_config = model.get_config()
recovered_model = keras.models.Sequential.from_config(model_config)
if model.weights:
weights = model.get_weights()
recovered_model.set_weights(weights)
output = recovered_model.predict(input_data)
np.testing.assert_allclose(output, actual_output, rtol=1e-3, atol=1e-6)
# for further checks in the caller function
return actual_output
_thread_local_data = threading.local()
_thread_local_data.model_type = None
_thread_local_data.run_eagerly = None
_thread_local_data.experimental_run_tf_function = None
@tf_contextlib.contextmanager
def model_type_scope(value):
"""Provides a scope within which the model type to test is equal to `value`.
The model type gets restored to its original value upon exiting the scope.
Arguments:
value: model type value
Yields:
The provided value.
"""
previous_value = _thread_local_data.model_type
try:
_thread_local_data.model_type = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.model_type = previous_value
@tf_contextlib.contextmanager
def run_eagerly_scope(value):
"""Provides a scope within which we compile models to run eagerly or not.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should run models eagerly in the active test.
Should be True or False.
Yields:
The provided value.
"""
previous_value = _thread_local_data.run_eagerly
try:
_thread_local_data.run_eagerly = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.run_eagerly = previous_value
def should_run_eagerly():
"""Returns whether the models we are testing should be run eagerly."""
if _thread_local_data.run_eagerly is None:
raise ValueError('Cannot call `should_run_eagerly()` outside of a '
'`run_eagerly_scope()` or `run_all_keras_modes` '
'decorator.')
return _thread_local_data.run_eagerly and context.executing_eagerly()
@tf_contextlib.contextmanager
def experimental_run_tf_function_scope(value):
"""Provides a scope within which we compile models to run with distribution.
The boolean gets restored to its original value upon exiting the scope.
Arguments:
value: Bool specifying if we should run models with default distribution
in the active test. Should be True or False.
Yields:
The provided value.
"""
previous_value = _thread_local_data.experimental_run_tf_function
try:
_thread_local_data.experimental_run_tf_function = value
yield value
finally:
# Restore model type to initial value.
_thread_local_data.experimental_run_tf_function = previous_value
def should_run_tf_function():
"""Returns whether the models we are testing should be run distributed."""
if _thread_local_data.experimental_run_tf_function is None:
raise ValueError(
'Cannot call `should_run_tf_function()` outside of a '
'`experimental_run_tf_function_scope()` or `run_all_keras_modes` '
'decorator.')
return (_thread_local_data.experimental_run_tf_function and
context.executing_eagerly())
def get_model_type():
"""Gets the model type that should be tested."""
if _thread_local_data.model_type is None:
raise ValueError('Cannot call `get_model_type()` outside of a '
'`model_type_scope()` or `run_with_all_model_types` '
'decorator.')
return _thread_local_data.model_type
def get_small_sequential_mlp(num_hidden, num_classes, input_dim=None):
model = keras.models.Sequential()
if input_dim:
model.add(keras.layers.Dense(num_hidden, activation='relu',
input_dim=input_dim))
else:
model.add(keras.layers.Dense(num_hidden, activation='relu'))
activation = 'sigmoid' if num_classes == 1 else 'softmax'
model.add(keras.layers.Dense(num_classes, activation=activation))
return model
def get_small_functional_mlp(num_hidden, num_classes, input_dim):
inputs = keras.Input(shape=(input_dim,))
outputs = keras.layers.Dense(num_hidden, activation='relu')(inputs)
activation = 'sigmoid' if num_classes == 1 else 'softmax'
outputs = keras.layers.Dense(num_classes, activation=activation)(outputs)
return keras.Model(inputs, outputs)
class _SmallSubclassMLP(keras.Model):
"""A subclass model based small MLP."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLP, self).__init__()
self.layer_a = keras.layers.Dense(num_hidden, activation='relu')
activation = 'sigmoid' if num_classes == 1 else 'softmax'
self.layer_b = keras.layers.Dense(num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
class _SmallSubclassMLPCustomBuild(keras.Model):
"""A subclass model small MLP that uses a custom build method."""
def __init__(self, num_hidden, num_classes):
super(_SmallSubclassMLPCustomBuild, self).__init__()
self.layer_a = None
self.layer_b = None
self.num_hidden = num_hidden
self.num_classes = num_classes
def build(self, input_shape):
self.layer_a = keras.layers.Dense(self.num_hidden, activation='relu')
activation = 'sigmoid' if self.num_classes == 1 else 'softmax'
self.layer_b = keras.layers.Dense(self.num_classes, activation=activation)
def call(self, inputs, **kwargs):
x = self.layer_a(inputs)
return self.layer_b(x)
def get_small_subclass_mlp(num_hidden, num_classes):
return _SmallSubclassMLP(num_hidden, num_classes)
def get_small_subclass_mlp_with_custom_build(num_hidden, num_classes):
return _SmallSubclassMLPCustomBuild(num_hidden, num_classes)
def get_small_mlp(num_hidden, num_classes, input_dim):
"""Get a small mlp of the model type specified by `get_model_type`."""
model_type = get_model_type()
if model_type == 'subclass':
return get_small_subclass_mlp(num_hidden, num_classes)
if model_type == 'subclass_custom_build':
return get_small_subclass_mlp_with_custom_build(num_hidden, num_classes)
if model_type == 'sequential':
return get_small_sequential_mlp(num_hidden, num_classes, input_dim)
if model_type == 'functional':
return get_small_functional_mlp(num_hidden, num_classes, input_dim)
raise ValueError('Unknown model type {}'.format(model_type))
class _SubclassModel(keras.Model):
"""A Keras subclass model."""
def __init__(self, layers, *args, **kwargs):
"""Instantiate a model.
Args:
layers: a list of layers to be added to the model.
*args: Model's args
**kwargs: Model's keyword args, at most one of
input_tensor -> the input tensor required for ragged/sparse input.
"""
inputs = kwargs.pop('input_tensor', None)
super(_SubclassModel, self).__init__(*args, **kwargs)
# Note that clone and build doesn't support lists of layers in subclassed
# models. Adding each layer directly here.
for i, layer in enumerate(layers):
setattr(self, self._layer_name_for_i(i), layer)
self.num_layers = len(layers)
if inputs is not None:
self._set_inputs(inputs)
def _layer_name_for_i(self, i):
return 'layer{}'.format(i)
def call(self, inputs, **kwargs):
x = inputs
for i in range(self.num_layers):
layer = getattr(self, self._layer_name_for_i(i))
x = layer(x)
return x
class _SubclassModelCustomBuild(keras.Model):
"""A Keras subclass model that uses a custom build method."""
def __init__(self, layer_generating_func, *args, **kwargs):
super(_SubclassModelCustomBuild, self).__init__(*args, **kwargs)
self.all_layers = None
self._layer_generating_func = layer_generating_func
def build(self, input_shape):
layers = []
for layer in self._layer_generating_func():
layers.append(layer)
self.all_layers = layers
def call(self, inputs, **kwargs):
x = inputs
for layer in self.all_layers:
x = layer(x)
return x
def get_model_from_layers(layers,
input_shape=None,
input_dtype=None,
name=None,
input_ragged=None,
input_sparse=None):
"""Builds a model from a sequence of layers.
Args:
layers: The layers used to build the network.
input_shape: Shape tuple of the input or 'TensorShape' instance.
input_dtype: Datatype of the input.
name: Name for the model.
input_ragged: Boolean, whether the input data is a ragged tensor.
input_sparse: Boolean, whether the input data is a sparse tensor.
Returns:
A Keras model.
"""
model_type = get_model_type()
if model_type == 'subclass':
inputs = None
if input_ragged or input_sparse:
inputs = keras.Input(
shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse)
return _SubclassModel(layers, name=name, input_tensor=inputs)
if model_type == 'subclass_custom_build':
layer_generating_func = lambda: layers
return _SubclassModelCustomBuild(layer_generating_func, name=name)
if model_type == 'sequential':
model = keras.models.Sequential(name=name)
if input_shape:
model.add(
keras.layers.InputLayer(
input_shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse))
for layer in layers:
model.add(layer)
return model
if model_type == 'functional':
if not input_shape:
raise ValueError('Cannot create a functional model from layers with no '
'input shape.')
inputs = keras.Input(
shape=input_shape,
dtype=input_dtype,
ragged=input_ragged,
sparse=input_sparse)
outputs = inputs
for layer in layers:
outputs = layer(outputs)
return keras.Model(inputs, outputs, name=name)
raise ValueError('Unknown model type {}'.format(model_type))
class _MultiIOSubclassModel(keras.Model):
"""Multi IO Keras subclass model."""
def __init__(self, branch_a, branch_b, shared_input_branch=None,
shared_output_branch=None):
super(_MultiIOSubclassModel, self).__init__()
self._shared_input_branch = shared_input_branch
self._branch_a = branch_a
self._branch_b = branch_b
self._shared_output_branch = shared_output_branch
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = [a, b]
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
class _MultiIOSubclassModelCustomBuild(keras.Model):
"""Multi IO Keras subclass model that uses a custom build method."""
def __init__(self, branch_a_func, branch_b_func,
shared_input_branch_func=None,
shared_output_branch_func=None):
super(_MultiIOSubclassModelCustomBuild, self).__init__()
self._shared_input_branch_func = shared_input_branch_func
self._branch_a_func = branch_a_func
self._branch_b_func = branch_b_func
self._shared_output_branch_func = shared_output_branch_func
self._shared_input_branch = None
self._branch_a = None
self._branch_b = None
self._shared_output_branch = None
def build(self, input_shape):
if self._shared_input_branch_func():
self._shared_input_branch = self._shared_input_branch_func()
self._branch_a = self._branch_a_func()
self._branch_b = self._branch_b_func()
if self._shared_output_branch_func():
self._shared_output_branch = self._shared_output_branch_func()
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = a, b
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
def get_multi_io_model(
branch_a,
branch_b,
shared_input_branch=None,
shared_output_branch=None):
"""Builds a multi-io model that contains two branches.
The produced model will be of the type specified by `get_model_type`.
To build a two-input, two-output model:
Specify a list of layers for branch a and branch b, but do not specify any
shared input branch or shared output branch. The resulting model will apply
each branch to a different input, to produce two outputs.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
model = get_multi_io_model(branch_a, branch_b)
```
To build a two-input, one-output model:
Specify a list of layers for branch a and branch b, and specify a
shared output branch. The resulting model will apply
each branch to a different input. It will then apply the shared output
branch to a tuple containing the intermediate outputs of each branch,
to produce a single output. The first layer in the shared_output_branch
must be able to merge a tuple of two tensors.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
input_branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
input_branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
shared_output_branch = [Concatenate(), Dense(), Dense()]
model = get_multi_io_model(input_branch_a, input_branch_b,
shared_output_branch=shared_output_branch)
```
To build a one-input, two-output model:
Specify a list of layers for branch a and branch b, and specify a
shared input branch. The resulting model will take one input, and apply
the shared input branch to it. It will then respectively apply each branch
to that intermediate result in parallel, to produce two outputs.
The first value in the shared_input_branch must be the Keras 'Input' layer
for the whole model. Branch a and branch b should not contain any Input
layers.
example usage:
```
shared_input_branch = [Input(shape=(2,), name='in'), Dense(), Dense()]
output_branch_a = [Dense(), Dense()]
output_branch_b = [Dense(), Dense()]
model = get_multi_io_model(output__branch_a, output_branch_b,
shared_input_branch=shared_input_branch)
```
Args:
branch_a: A sequence of layers for branch a of the model.
branch_b: A sequence of layers for branch b of the model.
shared_input_branch: An optional sequence of layers to apply to a single
input, before applying both branches to that intermediate result. If set,
the model will take only one input instead of two. Defaults to None.
shared_output_branch: An optional sequence of layers to merge the
intermediate results produced by branch a and branch b. If set,
the model will produce only one output instead of two. Defaults to None.
Returns:
A multi-io model of the type specified by `get_model_type`, specified
by the different branches.
"""
# Extract the functional inputs from the layer lists
if shared_input_branch:
inputs = shared_input_branch[0]
shared_input_branch = shared_input_branch[1:]
else:
inputs = branch_a[0], branch_b[0]
branch_a = branch_a[1:]
branch_b = branch_b[1:]
model_type = get_model_type()
if model_type == 'subclass':
return _MultiIOSubclassModel(branch_a, branch_b, shared_input_branch,
shared_output_branch)
if model_type == 'subclass_custom_build':
return _MultiIOSubclassModelCustomBuild((lambda: branch_a),
(lambda: branch_b),
(lambda: shared_input_branch),
(lambda: shared_output_branch))
if model_type == 'sequential':
raise ValueError('Cannot use `get_multi_io_model` to construct '
'sequential models')
if model_type == 'functional':
if shared_input_branch:
a_and_b = inputs
for layer in shared_input_branch:
a_and_b = layer(a_and_b)
a = a_and_b
b = a_and_b
else:
a, b = inputs
for layer in branch_a:
a = layer(a)
for layer in branch_b:
b = layer(b)
outputs = a, b
if shared_output_branch:
for layer in shared_output_branch:
outputs = layer(outputs)
return keras.Model(inputs, outputs)
raise ValueError('Unknown model type {}'.format(model_type))
_V2_OPTIMIZER_MAP = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD
}
def get_v2_optimizer(name, **kwargs):
"""Get the v2 optimizer requested.
This is only necessary until v2 are the default, as we are testing in Eager,
and Eager + v1 optimizers fail tests. When we are in v2, the strings alone
should be sufficient, and this mapping can theoretically be removed.
Args:
name: string name of Keras v2 optimizer.
**kwargs: any kwargs to pass to the optimizer constructor.
Returns:
Initialized Keras v2 optimizer.
Raises:
ValueError: if an unknown name was passed.
"""
try:
return _V2_OPTIMIZER_MAP[name](**kwargs)
except KeyError:
raise ValueError(
'Could not find requested v2 optimizer: {}\nValid choices: {}'.format(
name, list(_V2_OPTIMIZER_MAP.keys())))
def get_expected_metric_variable_names(var_names, name_suffix=''):
"""Returns expected metric variable names given names and prefix/suffix."""
if tf2.enabled() or context.executing_eagerly():
# In V1 eager mode and V2 variable names are not made unique.
return [n + ':0' for n in var_names]
# In V1 graph mode variable names are made unique using a suffix.
return [n + name_suffix + ':0' for n in var_names]
def enable_v2_dtype_behavior(fn):
"""Decorator for enabling the layer V2 dtype behavior on a test."""
return _set_v2_dtype_behavior(fn, True)
def disable_v2_dtype_behavior(fn):
"""Decorator for disabling the layer V2 dtype behavior on a test."""
return _set_v2_dtype_behavior(fn, False)
def _set_v2_dtype_behavior(fn, enabled):
"""Returns version of 'fn' that runs with v2 dtype behavior on or off."""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
v2_dtype_behavior = base_layer_utils.V2_DTYPE_BEHAVIOR
base_layer_utils.V2_DTYPE_BEHAVIOR = enabled
try:
return fn(*args, **kwargs)
finally:
base_layer_utils.V2_DTYPE_BEHAVIOR = v2_dtype_behavior
return tf_decorator.make_decorator(fn, wrapper)
| 35.273697
| 88
| 0.700548
|
6e32491e85557dfaca3d49711288198d50f511ab
| 312
|
py
|
Python
|
blog/urls.py
|
admtomas/cybersecurity_blog
|
bad19ad189b1fdb770a935ecaac85187e6f62271
|
[
"MIT"
] | null | null | null |
blog/urls.py
|
admtomas/cybersecurity_blog
|
bad19ad189b1fdb770a935ecaac85187e6f62271
|
[
"MIT"
] | null | null | null |
blog/urls.py
|
admtomas/cybersecurity_blog
|
bad19ad189b1fdb770a935ecaac85187e6f62271
|
[
"MIT"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.post_list, name='post_list'),
path('<slug:post>/', views.post_detail, name='post_detail'),
path('comment/reply/', views.reply_page, name='reply'),
path('about', views.about_page, name='about'),
]
| 26
| 64
| 0.669872
|
45a430ee9e5e39a41348e7fbc6f0e24240168953
| 17,478
|
py
|
Python
|
uhd_restpy/testplatform/sessions/ixnetwork/topology/rsvpp2mpingresssublsps_c610bddfdb08c054e463708b863af4f0.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
uhd_restpy/testplatform/sessions/ixnetwork/topology/rsvpp2mpingresssublsps_c610bddfdb08c054e463708b863af4f0.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
uhd_restpy/testplatform/sessions/ixnetwork/topology/rsvpp2mpingresssublsps_c610bddfdb08c054e463708b863af4f0.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class RsvpP2mpIngressSubLsps(Base):
"""RSVP-TE P2MP Head (Ingress) Sub LSPs
The RsvpP2mpIngressSubLsps class encapsulates a required rsvpP2mpIngressSubLsps resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'rsvpP2mpIngressSubLsps'
_SDM_ATT_MAP = {
'Active': 'active',
'AppendLeaf': 'appendLeaf',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'EnableEro': 'enableEro',
'LeafIp': 'leafIp',
'LocalIp': 'localIp',
'Name': 'name',
'NumberOfEroSubObjects': 'numberOfEroSubObjects',
'P2mpIdAsIp': 'p2mpIdAsIp',
'P2mpIdAsNum': 'p2mpIdAsNum',
'PrefixLengthOfDut': 'prefixLengthOfDut',
'PrefixLengthOfLeaf': 'prefixLengthOfLeaf',
'PrependDut': 'prependDut',
'SendAsEro': 'sendAsEro',
'SendAsSero': 'sendAsSero',
'SessionInformation': 'sessionInformation',
'State': 'state',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(RsvpP2mpIngressSubLsps, self).__init__(parent, list_op)
@property
def RsvpEroSubObjectsList(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.rsvperosubobjectslist_c0ebecb067ebf96898ae4f90af81d688.RsvpEroSubObjectsList): An instance of the RsvpEroSubObjectsList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.rsvperosubobjectslist_c0ebecb067ebf96898ae4f90af81d688 import RsvpEroSubObjectsList
if self._properties.get('RsvpEroSubObjectsList', None) is not None:
return self._properties.get('RsvpEroSubObjectsList')
else:
return RsvpEroSubObjectsList(self)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def AppendLeaf(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Append Leaf
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AppendLeaf']))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def EnableEro(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable ERO
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableEro']))
@property
def LeafIp(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Leaf IP
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LeafIp']))
@property
def LocalIp(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): Local IP
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalIp'])
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NumberOfEroSubObjects(self):
# type: () -> int
"""
Returns
-------
- number: Number Of ERO Sub-Objects
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberOfEroSubObjects'])
@NumberOfEroSubObjects.setter
def NumberOfEroSubObjects(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NumberOfEroSubObjects'], value)
@property
def P2mpIdAsIp(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): P2MP ID As IP
"""
return self._get_attribute(self._SDM_ATT_MAP['P2mpIdAsIp'])
@property
def P2mpIdAsNum(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): P2MP ID displayed in Integer format
"""
return self._get_attribute(self._SDM_ATT_MAP['P2mpIdAsNum'])
@property
def PrefixLengthOfDut(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Prefix Length of DUT
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PrefixLengthOfDut']))
@property
def PrefixLengthOfLeaf(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Prefix Length of Leaf
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PrefixLengthOfLeaf']))
@property
def PrependDut(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Prepend DUT
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PrependDut']))
@property
def SendAsEro(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Send As ERO
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SendAsEro']))
@property
def SendAsSero(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Send As SERO
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SendAsSero']))
@property
def SessionInformation(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[lastErrLSPAdmissionControlFailure | lastErrLSPBadAdSpecValue | lastErrLSPBadExplicitRoute | lastErrLSPBadFlowspecValue | lastErrLSPBadInitialSubobject | lastErrLSPBadLooseNode | lastErrLSPBadStrictNode | lastErrLSPBadTSpecValue | lastErrLSPDelayBoundNotMet | lastErrLSPMPLSAllocationFailure | lastErrLSPMTUTooBig | lastErrLSPNonRSVPRouter | lastErrLSPNoRouteAvailable | lastErrLSPPathErr | lastErrLSPPathTearSent | lastErrLSPRequestedBandwidthUnavailable | lastErrLSPReservationTearReceived | lastErrLSPReservationTearSent | lastErrLSPReservationTimeout | lastErrLSPRoutingLoops | lastErrLSPRoutingProblem | lastErrLSPRSVPSystemError | lastErrLSPServiceConflict | lastErrLSPServiceUnsupported | lastErrLSPTrafficControlError | lastErrLSPTrafficControlSystemError | lastErrLSPTrafficOrganizationError | lastErrLSPTrafficServiceError | lastErrLSPUnknownObjectClass | lastErrLSPUnknownObjectCType | lastErrLSPUnsupportedL3PID | lSPAdmissionControlFailure | lSPBadAdSpecValue | lSPBadExplicitRoute | lSPBadFlowspecValue | lSPBadInitialSubobject | lSPBadLooseNode | lSPBadStrictNode | lSPBadTSpecValue | lSPDelayBoundNotMet | lSPMPLSAllocationFailure | lSPMTUTooBig | lSPNonRSVPRouter | lSPNoRouteAvailable | lSPPathErr | lSPPathTearSent | lSPRequestedBandwidthUnavailable | lSPReservationNotReceived | lSPReservationTearReceived | lSPReservationTearSent | lSPReservationTimeout | lSPRoutingLoops | lSPRoutingProblem | lSPRSVPSystemError | lSPServiceConflict | lSPServiceUnsupported | lSPTrafficControlError | lSPTrafficControlSystemError | lSPTrafficOrganizationError | lSPTrafficServiceError | lSPUnknownObjectClass | lSPUnknownObjectCType | lSPUnsupportedL3PID | mbbCompleted | mbbTriggered | none]): Logs additional information about the RSVP session state
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionInformation'])
@property
def State(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[down | none | notStarted | up]): State
"""
return self._get_attribute(self._SDM_ATT_MAP['State'])
def update(self, Name=None, NumberOfEroSubObjects=None):
# type: (str, int) -> RsvpP2mpIngressSubLsps
"""Updates rsvpP2mpIngressSubLsps resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NumberOfEroSubObjects (number): Number Of ERO Sub-Objects
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def ExcludeEroOrSero(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the excludeEroOrSero operation on the server.
Prune Ingress P2MP SubLSP
excludeEroOrSero(Arg2=list, async_operation=bool)list
-----------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('excludeEroOrSero', payload=payload, response_object=None)
def GraftSubLsp(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the graftSubLsp operation on the server.
Activate/Enable Tunnel selected SubLsp Ranges
graftSubLsp(Arg2=list, async_operation=bool)list
------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('graftSubLsp', payload=payload, response_object=None)
def IncludeEroOrSero(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the includeEroOrSero operation on the server.
Graft Ingress P2MP SubLSP
includeEroOrSero(Arg2=list, async_operation=bool)list
-----------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('includeEroOrSero', payload=payload, response_object=None)
def PruneSubLsp(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the pruneSubLsp operation on the server.
Deactivate/Disable selected Tunnel SubLsp Ranges
pruneSubLsp(Arg2=list, async_operation=bool)list
------------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('pruneSubLsp', payload=payload, response_object=None)
def get_device_ids(self, PortNames=None, Active=None, AppendLeaf=None, EnableEro=None, LeafIp=None, PrefixLengthOfDut=None, PrefixLengthOfLeaf=None, PrependDut=None, SendAsEro=None, SendAsSero=None):
"""Base class infrastructure that gets a list of rsvpP2mpIngressSubLsps device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- AppendLeaf (str): optional regex of appendLeaf
- EnableEro (str): optional regex of enableEro
- LeafIp (str): optional regex of leafIp
- PrefixLengthOfDut (str): optional regex of prefixLengthOfDut
- PrefixLengthOfLeaf (str): optional regex of prefixLengthOfLeaf
- PrependDut (str): optional regex of prependDut
- SendAsEro (str): optional regex of sendAsEro
- SendAsSero (str): optional regex of sendAsSero
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| 42.943489
| 1,774
| 0.653736
|
f23a91cc540fb271100666f8db10a5352608fcdb
| 13,547
|
py
|
Python
|
bindings/java/java_generator.py
|
nirbheek/openwebrtc
|
838d6eedf2b4e53224a60f3da8529e6cc621359f
|
[
"BSD-2-Clause"
] | null | null | null |
bindings/java/java_generator.py
|
nirbheek/openwebrtc
|
838d6eedf2b4e53224a60f3da8529e6cc621359f
|
[
"BSD-2-Clause"
] | null | null | null |
bindings/java/java_generator.py
|
nirbheek/openwebrtc
|
838d6eedf2b4e53224a60f3da8529e6cc621359f
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2014, Ericsson AB. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import collections
import config
from functools import partial
from base_generator import *
J = BaseGenerator(
default_line_prefix=config.JAVA_INDENTATION,
)
def java_param(param):
if hasattr(param, 'java_full_class'):
return param.java_full_class + ' ' + param.name
if param.java_type:
return param.java_type + ' ' + param.name
return ()
def java_arg(param):
if param.java_type:
return param.name
return ()
@add_to(J)
class JavaDoc(J.Block):
_line_prefix = ' * '
def __init__(self, text, params, ret):
self.text = text
self.params = params
self.ret = ret
@property
def start(self):
return ('/**' if self.text or self.params or self.ret else [])
@property
def end(self):
return (' */' if self.text or self.params or self.ret else [])
@property
def body(self):
return [self.text if self.text else []] + [
'@param %s %s' % kv for kv in self.params.items()
] + ['@return ' + self.ret if self.ret else []]
@add_to(J)
class Class(J.Block):
def __init__(self,
name,
variation='class',
visibility='public',
static=False,
abstract=False,
extends=None,
implements=None,
imports=None,
package=None,
**kwargs):
super(Class, self).__init__(**kwargs)
self.name = name
self.variation = variation
self.visibility = visibility
self.static = static
self.abstract = abstract
self.extends = extends or []
self.implements = implements or []
self.imports = imports or []
self.package = package
@property
def start(self):
lst = []
if self.visibility != 'default':
lst.append(self.visibility)
if self.static:
lst.append('static')
if self.abstract:
lst.append('abstract')
lst.append(self.variation)
lst.append(self.name)
if self.extends:
lst.append('extends ' + flatjoin(self.extends, ', '))
if self.implements:
lst.append('implements ' + flatjoin(self.implements, ', '))
lst.append('{')
package_decl = 'package ' + self.package + ';' if self.package else None
imports = ['import ' + i + ';' for i in self.imports]
return intersperse(prune_empty([package_decl, imports, ' '.join(lst)]), '')
@staticmethod
def create_callback(callback, **kwargs):
args = {
'name': callback.value.java_type,
'static': True,
'body': [J.Method.default(callback, native=False)],
'variation': 'interface',
}
args.update(kwargs)
return Class(**args)
@add_to(J)
class Method(J.FunctionBlock):
def __init__(self,
visibility='public',
return_type='void',
name='',
params=None,
static=False,
abstract=False,
native=False,
synchronized=False,
doc=None,
**kwargs):
super(Method, self).__init__(**kwargs)
self.name = name
self.return_type = return_type
self.params = params or []
self.visibility = visibility
self.static = static
self.synchronized = synchronized
self.abstract = abstract
self.native = native
self.doc = doc
@property
def modifiers(self):
lst = []
if self.visibility != 'default':
lst.append(self.visibility)
if self.static:
lst.append('static')
if self.synchronized:
lst.append('synchronized')
if self.abstract:
lst.append('abstract')
if self.native:
lst.append('native')
return lst
@property
def start(self):
row = self.definition + (' {' if len(self.body) else ';')
if self.doc:
return [self.doc, row]
else:
return row
@property
def end(self):
return ('}' if len(self.body) else [])
@staticmethod
def default(method, **kwargs):
return_type = method.params.return_value.java_type
if hasattr(method.params.return_value, 'java_full_class'):
return_type = method.params.return_value.java_full_class
args = {
'visibility': 'public',
'return_type': return_type,
'name': method.name,
'params': map(java_param, method.params.java_params),
'native': True,
'doc': JavaDoc(method.doc,
{p.name: getattr(p, 'doc', None) for p in method.params.java_params if getattr(p, 'doc', None) is not None},
getattr(method.params.return_value, 'doc', None),
),
}
args.update(kwargs)
return Method(**args)
@add_to(J)
def gen_signal(signal):
mapName = 'handleMap' + signal.value.java_type
mapType = 'java.util.HashMap<{signal_type}, {handle_type}>'.format(
signal_type=signal.value.java_type,
handle_type=signal.add_listener.params.return_value.object_type,
)
ensure_map_and_remove = [
J.If(mapName + ' == null', mapName + ' = new ' + mapType + '();'),
'',
'Integer current = ' + mapName + '.remove(listener);',
J.If('current != null', J.Call(signal.remove_listener.name, 'current')),
]
callback = J.Class.create_callback(signal)
return [
'private %s %s;' % (mapType, mapName),
callback,
Method.default(signal.add_listener, visibility='private'),
Method.default(signal.remove_listener, visibility='private'),
Method.default(signal.public_add_listener,
native=False,
synchronized=True,
body=ensure_map_and_remove + [
'',
'int handle = ' + signal.add_listener.name + '(listener);',
mapName + '.put(listener, handle);',
],
),
Method.default(signal.public_remove_listener,
native=False,
synchronized=True,
body=ensure_map_and_remove,
),
]
@add_to(J)
def gen_class(clazz):
# public constructors
body = [(
Method(
visibility='public',
return_type=[],
name=clazz.name,
params=map(java_param, constructor.params),
body=[
J.Call('super', J.Call('_newNativePointer', '0')),
J.Assign('long pointer', J.Call(constructor.name, *map(java_arg, constructor.params))),
J.Call('_setInternalPointer', 'pointer'),
]
),
Method(
visibility='default',
return_type='long',
native=True,
name=constructor.name,
params=map(java_param, constructor.params)
),
) for constructor in clazz.constructors]
# private constructor
body += [Method(
visibility='default',
return_type=[],
name=clazz.name,
params=['NativePointer nativePointer'],
body=[J.Call('super', 'nativePointer')],
)]
# methods
body += map(Method.default, clazz.methods)
body += map(partial(Method.default, static=True), clazz.functions)
# properties
body += sum(sum([[
[Method.default(prop.setter)] if prop.writable else [],
[Method.default(prop.getter)] if prop.readable else [],
gen_signal(prop.signal) if prop.readable else [],
] for prop in clazz.properties], []), [])
#signals
body += sum(map(gen_signal, clazz.signals), [])
return J.Class(clazz.name,
extends=clazz.parent or 'NativeInstance',
imports=[
config.PACKAGE_ROOT + '.NativeInstance',
config.PACKAGE_ROOT + '.NativePointer',
],
body=intersperse(prune_empty(body), ''),
)
@add_to(J)
def gen_enum(enum):
format_func = ('{0.name}({0.value}, "{0.nick}")' if enum.has_nick else '{0.name}({0.value})').format
members = [format_func(member) for member in enum.members]
members = intersperse(members, ',') + [';']
members = [''.join(chunk) for chunk in chunks(members, 2)]
body = [members, [
'private final int mValue;',
'private final String mNick;' if enum.has_nick else ()
],
Method(
visibility='private',
name=enum.name,
return_type=[],
params=['int value', enum.has_nick and 'String nick'],
body=['mValue = value;', enum.has_nick and 'mNick = nick;'],
),
Method('public', 'int', 'getValue',
body=['return mValue;'],
),
enum.has_nick and Method('public', 'String', 'getNick',
body=['return mNick;'],
),
enum.has_nick and Method(
static=True,
name='valueOfNick',
params=['String nick'],
return_type=enum.name,
body=[J.IfElse(
ifs=['"%s".equals(nick)' % member.nick for member in enum.members],
bodies=['return %s;' % member.name for member in enum.members] +
['throw new IllegalArgumentException("Invalid enum nick: " + nick);'],
)]
)
]
return J.Class(enum.name, variation='enum',
imports=[config.PACKAGE_ROOT + '.ValueEnum'],
implements=['ValueEnum'],
body=intersperse(prune_empty(body), ''),
)
@add_to(J)
def gen_namespace(namespace):
classes = map(gen_class, namespace.classes)
enums = map(gen_enum, namespace.enums)
callbacks = map(partial(J.Class.create_callback, static=False), namespace.callbacks)
main_class = J.Class(
name=namespace.name,
body=[
J.Block(
_start='static {',
body=['System.loadLibrary("%s");' % namespace.shared_library[3:-3]],
),
'',
Method('private', [], namespace.name, body=['']),
'',
] + intersperse(map(partial(Method.default, static=True), namespace.functions), '')
)
all_classes = classes + enums + callbacks + [main_class]
for clazz in all_classes:
clazz.package = config.PACKAGE_ROOT + '.' + namespace.symbol_prefix
return {c.name: str(c) for c in all_classes}
standard_classes = {
'NativeInstance': str(J.Class(
name='NativeInstance',
visibility='public',
package=config.PACKAGE_ROOT,
abstract=True,
body=[
J.Decl('long', 'nativeInstance'),
'',
J.Method('protected', [], 'NativeInstance', params=['NativePointer nativePointer'],
body=[J.Assign('this.nativeInstance', 'nativePointer.pointer')],
),
'',
J.Method('protected', 'void', '_setInternalPointer', params=['long pointer'],
body=[J.Assign('nativeInstance', 'pointer')]
),
'',
J.Method('protected', 'NativePointer', '_newNativePointer', params=['long pointer'],
body=[J.Return(J.Call('new NativePointer', 'pointer'))],
static=True,
),
'',
'@Override',
J.Method('protected', 'void', 'finalize',
body=[J.Call('nativeDestructor', 'this.nativeInstance')],
),
'',
J.Method('private', 'void', 'nativeDestructor', params=['long instancePointer'], native=True),
],
)),
'NativePointer': str(J.Class(
name='NativePointer',
visibility='public',
package=config.PACKAGE_ROOT,
body=[
'final long pointer;',
'',
J.Method('default', [], 'NativePointer', params=['long pointer'],
body=[J.Assign('this.pointer', 'pointer')],
),
],
)),
'ValueEnum': str(J.Class(
name='ValueEnum',
visibility='public',
package=config.PACKAGE_ROOT,
variation='interface',
body=[J.Method('public', 'int', 'getValue')],
)),
}
| 32.486811
| 124
| 0.571639
|
839b8c118726c68da2e8a06e4975472c985234ad
| 142
|
py
|
Python
|
sandbox/lib/jumpscale/JumpscaleLibsExtra/clients/racktivity/energyswitch/modelfactory/models/RTF0032/Master_0_0_4_4.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | 2
|
2019-05-09T07:21:25.000Z
|
2019-08-05T06:37:53.000Z
|
sandbox/lib/jumpscale/JumpscaleLibsExtra/clients/racktivity/energyswitch/modelfactory/models/RTF0032/Master_0_0_4_4.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | 664
|
2018-12-19T12:43:44.000Z
|
2019-08-23T04:24:42.000Z
|
Jumpscale/clients/racktivity/energyswitch/modelfactory/models/RTF0032/Master_0_0_4_4.py
|
threefoldtech/jumpscale10
|
5fb073a82aeb0e66fc7d9660c45a1e31bc094bfa
|
[
"Apache-2.0"
] | 7
|
2019-05-03T07:14:37.000Z
|
2019-08-05T12:36:52.000Z
|
from clients.racktivity.energyswitch.modelfactory.models.common.Master_0_0_4_4 import Model as ModelClass
class Model(ModelClass):
pass
| 23.666667
| 105
| 0.830986
|
e9ced04bd0d3656821864a27ed83bbb6558dc5a8
| 1,072
|
py
|
Python
|
day20/day20.py
|
Strandtasche/go-experiments
|
650b3e49439792a3e4e491436676197b720726b4
|
[
"MIT"
] | null | null | null |
day20/day20.py
|
Strandtasche/go-experiments
|
650b3e49439792a3e4e491436676197b720726b4
|
[
"MIT"
] | null | null | null |
day20/day20.py
|
Strandtasche/go-experiments
|
650b3e49439792a3e4e491436676197b720726b4
|
[
"MIT"
] | null | null | null |
import numpy as np
from collections import Counter
with open('./data/input20.txt') as f:
tiles = [l.rstrip('\n') for l in f.read().split('\n\n')]
matched = {}
sides = {}
allsides = []
for tile in tiles:
tmp = tile.split('\n')
number = int(tmp[0].split()[1][:-1])
rep = np.array([list(line) for line in tmp[1:]])
side_r = ''.join(rep[:, 0])
side_t = ''.join(rep[0, :])
side_l = ''.join(rep[:, -1])
side_b = ''.join(rep[-1, :])
sides[number] = [side_r, side_t, side_l, side_b]
allsides.append(side_r)
allsides.append(side_t)
allsides.append(side_l)
allsides.append(side_b)
# print("test")
sides_count = Counter(allsides)
print(max(sides_count.values()))
corners = []
for k, v in sides.items():
free_sides = []
for counter, side in enumerate(v):
occurances = sides_count[side]
occurances_flipped = sides_count[side[::-1]]
if occurances + occurances_flipped == 1:
free_sides.append(side)
if len(free_sides) >= 2:
corners.append(k)
print(np.prod(corners))
| 22.333333
| 60
| 0.602612
|
d8e07aaf4064f9ab9e182485ac68ce8df9d92cd3
| 3,061
|
py
|
Python
|
model.py
|
kylemin/A2CL-PT
|
53a56d1b11f800741a41e784e8bcb2114199a1c6
|
[
"MIT"
] | 48
|
2020-07-16T03:34:22.000Z
|
2022-03-24T07:23:43.000Z
|
model.py
|
kylemin/A2CL-PT
|
53a56d1b11f800741a41e784e8bcb2114199a1c6
|
[
"MIT"
] | 9
|
2020-08-17T03:05:10.000Z
|
2022-02-23T10:16:30.000Z
|
model.py
|
kylemin/A2CL-PT
|
53a56d1b11f800741a41e784e8bcb2114199a1c6
|
[
"MIT"
] | 9
|
2020-09-02T01:57:08.000Z
|
2022-02-27T14:06:46.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as torch_init
from math import ceil, floor
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1 or classname.find('Linear') != -1:
torch_init.xavier_uniform_(m.weight)
if m.bias is not None:
m.bias.data.zero_()
class Model(nn.Module):
def __init__(self, num_class, s, omega):
super(Model, self).__init__()
self.num_class = num_class
self.s = s
self.omega = omega
D = 1024
d = 0.7
self.fc_r = nn.Linear(D, D)
self.fc1_r = nn.Linear(D, D)
self.fc_f = nn.Linear(D, D)
self.fc1_f = nn.Linear(D, D)
self.classifier_r = nn.Conv1d(D, num_class, kernel_size=1)
self.classifier_f = nn.Conv1d(D, num_class, kernel_size=1)
self.classifier_ra = nn.ModuleList([nn.Conv1d(D, 1, kernel_size=1) for i in range(num_class)]) # it can be implemented by conv2d with groups=num_class
self.classifier_fa = nn.ModuleList([nn.Conv1d(D, 1, kernel_size=1) for i in range(num_class)])
self.dropout_r = nn.Dropout(d)
self.dropout_f = nn.Dropout(d)
self.apply(weights_init)
self.mul_r = nn.Parameter(data=torch.ones(num_class))
self.mul_f = nn.Parameter(data=torch.ones(num_class))
def forward(self, inputs):
N, T, D = inputs.shape
D //= 2
x_r = F.relu(self.fc_r(inputs[:,:,:D]))
x_f = F.relu(self.fc_f(inputs[:,:,D:]))
x_r = F.relu(self.fc1_r(x_r)).permute(0,2,1)
x_f = F.relu(self.fc1_f(x_f)).permute(0,2,1)
x_r = self.dropout_r(x_r)
x_f = self.dropout_f(x_f)
k = max(T-floor(T/self.s), 1)
cls_x_r = self.classifier_r(x_r).permute(0,2,1)
cls_x_f = self.classifier_f(x_f).permute(0,2,1)
cls_x_ra = cls_x_r.new_zeros(cls_x_r.shape)
cls_x_fa = cls_x_f.new_zeros(cls_x_f.shape)
cls_x_rat = cls_x_r.new_zeros(cls_x_r.shape)
cls_x_fat = cls_x_f.new_zeros(cls_x_f.shape)
mask_value = -100
for i in range(self.num_class):
mask_r = cls_x_r[:,:,i]>torch.kthvalue(cls_x_r[:,:,i], k, dim=1, keepdim=True)[0]
x_r_erased = torch.masked_fill(x_r, mask_r.unsqueeze(1), 0)
cls_x_ra[:,:,i] = torch.masked_fill(self.classifier_ra[i](x_r_erased).squeeze(1), mask_r, mask_value)
cls_x_rat[:,:,i] = self.classifier_ra[i](x_r).squeeze(1)
mask_f = cls_x_f[:,:,i]>torch.kthvalue(cls_x_f[:,:,i], k, dim=1, keepdim=True)[0]
x_f_erased = torch.masked_fill(x_f, mask_f.unsqueeze(1), 0)
cls_x_fa[:,:,i] = torch.masked_fill(self.classifier_fa[i](x_f_erased).squeeze(1), mask_f, mask_value)
cls_x_fat[:,:,i] = self.classifier_fa[i](x_f).squeeze(1)
tcam = (cls_x_r+cls_x_rat*self.omega) * self.mul_r + (cls_x_f+cls_x_fat*self.omega) * self.mul_f
return x_r.permute(0,2,1), [cls_x_r, cls_x_ra], x_f.permute(0,2,1), [cls_x_f, cls_x_fa], tcam
| 39.753247
| 158
| 0.619079
|
7ddde92a995a9b74dc88eac38a7ef31ff2e15f55
| 4,072
|
py
|
Python
|
project/GUI/GUICore.py
|
RemuTeam/Remu
|
a7d100ff9002b1b1d27249f8adf510b5a89c09e3
|
[
"MIT"
] | 2
|
2017-09-18T11:04:38.000Z
|
2017-09-25T17:23:21.000Z
|
project/GUI/GUICore.py
|
RemuTeam/Remu
|
a7d100ff9002b1b1d27249f8adf510b5a89c09e3
|
[
"MIT"
] | 26
|
2017-09-20T09:11:10.000Z
|
2017-12-11T12:21:56.000Z
|
project/GUI/GUICore.py
|
RemuTeam/Remu
|
a7d100ff9002b1b1d27249f8adf510b5a89c09e3
|
[
"MIT"
] | null | null | null |
from kivy.app import App
from kivy.properties import StringProperty
from kivy.uix.screenmanager import ScreenManager, Screen
from GUI.MasterGUI.MasterGUILayout import MasterGUILayout
from GUI.MasterGUI.ProjectOverview import ProjectOverview # Do not remove, needed by RemuSM!
from GUI.SlaveGUI.PresentationLayout import PresentationLayout
from GUI.SlaveGUI.SlaveGUILayout import SlaveGUILayout
from GUI.PopUps.PopUps import ExceptionAlertPopUp
"""
CLASS LIBRARY TO HANDLE THE FUNCTIONALITY OF GUI LAYOUTS
The layouts' components, administrative information (such as
ids and names) and functions to perform on triggered events
are defined in the layout file:
project/GUI/remu.kv
"""
class SwitchLayout(Screen):
"""
Produces the GUI-layout that allows the user to choose
between Master- and Slave-mode.
Inherits kivy.uix.screenmanager.Screen
"""
text = StringProperty('')
def goto_master_mode(self):
"""
Setups the app to be used in the master mode
:return: ExceptionAlertPopup if adding master not possible
"""
app = App.get_running_app()
try:
app.root.add_master_layout()
except Exception as ex:
app.reset_servicemode()
app.root.rm_master_layout()
ExceptionAlertPopUp("Error going to master mode:", ex).open()
def add_address(self, address):
self.text = address
class InfoLayout(Screen):
with open('infotext.txt') as f:
t = f.read()
text = t
class RemuSM(ScreenManager):
"""
Handles changing the GUI-layouts as different screens for the
application, and also acts as the root widget
Inherits kivy.uix.screenmanager.ScreenManager
"""
def __init__(self, **kwargs):
"""
Initializes references to differents screens as 'None'
"""
super(RemuSM, self).__init__(**kwargs)
self.master_screen = None
self.slave_screen = None
self.presentation_screen = None
self.info_screen = None
def add_master_layout(self):
"""
Creates a new master layout, and sets it to be the current screen
"""
if self.master_screen is None:
self.master_screen = MasterGUILayout(name='master_gui_layout')
self.add_widget(self.master_screen)
self.current = 'master_gui_layout'
def add_slave_layout(self):
"""
Creates a new slave layout and a presentation layout, and sets the slave layout
to be the current screen
"""
if self.slave_screen is None:
self.slave_screen = SlaveGUILayout(name='slave_gui_layout')
self.presentation_screen = PresentationLayout(name='presentation_layout')
self.add_widget(self.slave_screen)
self.add_widget(self.presentation_screen)
self.current = 'slave_gui_layout'
def add_info_layout(self):
"""
Creates a new info_gui_layout if it doesn't exist, and then shows it.
"""
if self.info_screen is None:
self.info_screen = InfoLayout(name='info_gui_layout')
self.add_widget(self.info_screen)
self.current = 'info_gui_layout'
def change_screen_to(self, name):
"""
Changes the screen according to the screen name parameter
"""
self.current = name
def rm_master_layout(self):
"""
Removes the master layout from screenmanager's screens
"""
self.master_screen.project_overview.remove_presentations()
self.master_screen=None
self.change_screen_to("switch_layout")
def rm_slave_layout(self):
"""
Removes the slave layout and the presentation layout from screenmanager's screens
"""
self.remove_widget(self.slave_screen)
self.remove_widget(self.presentation_screen)
self.slave_screen=None
self.presentation_screen=None
self.change_screen_to("switch_layout")
def get_current_layout(self):
return self.current_screen
| 31.8125
| 93
| 0.670432
|
6d0110085c9b6c82a74d002debd653a2a9419c65
| 405
|
py
|
Python
|
astrazenecadev/wsgi.py
|
Nicolasvegam/astrazeneca
|
9f549c170553d6ad13bc2949e147f4a2a53cb67d
|
[
"MIT"
] | null | null | null |
astrazenecadev/wsgi.py
|
Nicolasvegam/astrazeneca
|
9f549c170553d6ad13bc2949e147f4a2a53cb67d
|
[
"MIT"
] | null | null | null |
astrazenecadev/wsgi.py
|
Nicolasvegam/astrazeneca
|
9f549c170553d6ad13bc2949e147f4a2a53cb67d
|
[
"MIT"
] | null | null | null |
"""
WSGI config for astrazenecadev project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'astrazenecadev.settings')
application = get_wsgi_application()
| 23.823529
| 78
| 0.792593
|
3702fa725c47e6f4f77283709392367e2864cd48
| 4,701
|
py
|
Python
|
homeassistant/components/konnected/switch.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 4
|
2021-07-11T09:11:00.000Z
|
2022-02-27T14:43:50.000Z
|
homeassistant/components/konnected/switch.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 277
|
2021-10-04T06:39:33.000Z
|
2021-12-28T22:04:17.000Z
|
homeassistant/components/konnected/switch.py
|
andersop91/core
|
0e0ef0aa17073609eae7c974cf4c73306b7c414b
|
[
"Apache-2.0"
] | 1
|
2022-02-09T00:30:51.000Z
|
2022-02-09T00:30:51.000Z
|
"""Support for wired switches attached to a Konnected device."""
import logging
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_STATE,
CONF_DEVICES,
CONF_NAME,
CONF_REPEAT,
CONF_SWITCHES,
CONF_ZONE,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo, ToggleEntity
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import (
CONF_ACTIVATION,
CONF_MOMENTARY,
CONF_PAUSE,
DOMAIN as KONNECTED_DOMAIN,
STATE_HIGH,
STATE_LOW,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up switches attached to a Konnected device from a config entry."""
data = hass.data[KONNECTED_DOMAIN]
device_id = config_entry.data["id"]
switches = [
KonnectedSwitch(device_id, zone_data.get(CONF_ZONE), zone_data)
for zone_data in data[CONF_DEVICES][device_id][CONF_SWITCHES]
]
async_add_entities(switches)
class KonnectedSwitch(ToggleEntity):
"""Representation of a Konnected switch."""
def __init__(self, device_id, zone_num, data):
"""Initialize the Konnected switch."""
self._data = data
self._device_id = device_id
self._zone_num = zone_num
self._activation = self._data.get(CONF_ACTIVATION, STATE_HIGH)
self._momentary = self._data.get(CONF_MOMENTARY)
self._pause = self._data.get(CONF_PAUSE)
self._repeat = self._data.get(CONF_REPEAT)
self._state = self._boolean_state(self._data.get(ATTR_STATE))
self._name = self._data.get(CONF_NAME)
self._unique_id = (
f"{device_id}-{self._zone_num}-{self._momentary}-"
f"{self._pause}-{self._repeat}"
)
@property
def unique_id(self) -> str:
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return the status of the sensor."""
return self._state
@property
def panel(self):
"""Return the Konnected HTTP client."""
device_data = self.hass.data[KONNECTED_DOMAIN][CONF_DEVICES][self._device_id]
return device_data.get("panel")
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
return DeviceInfo(identifiers={(KONNECTED_DOMAIN, self._device_id)})
@property
def available(self):
"""Return whether the panel is available."""
return self.panel.available
async def async_turn_on(self, **kwargs):
"""Send a command to turn on the switch."""
resp = await self.panel.update_switch(
self._zone_num,
int(self._activation == STATE_HIGH),
self._momentary,
self._repeat,
self._pause,
)
if resp.get(ATTR_STATE) is not None:
self._set_state(True)
if self._momentary and resp.get(ATTR_STATE) != -1:
# Immediately set the state back off for momentary switches
self._set_state(False)
async def async_turn_off(self, **kwargs):
"""Send a command to turn off the switch."""
resp = await self.panel.update_switch(
self._zone_num, int(self._activation == STATE_LOW)
)
if resp.get(ATTR_STATE) is not None:
self._set_state(self._boolean_state(resp.get(ATTR_STATE)))
def _boolean_state(self, int_state):
if int_state is None:
return False
if int_state == 0:
return self._activation == STATE_LOW
if int_state == 1:
return self._activation == STATE_HIGH
def _set_state(self, state):
self._state = state
self.async_write_ha_state()
_LOGGER.debug(
"Setting status of %s actuator zone %s to %s",
self._device_id,
self.name,
state,
)
@callback
def async_set_state(self, state):
"""Update the switch state."""
self._set_state(state)
async def async_added_to_hass(self):
"""Store entity_id and register state change callback."""
self._data["entity_id"] = self.entity_id
self.async_on_remove(
async_dispatcher_connect(
self.hass, f"konnected.{self.entity_id}.update", self.async_set_state
)
)
| 30.927632
| 85
| 0.640715
|
b147cfde7baafc7dc3e6b01fd4c9fb6d4de994cc
| 9,174
|
py
|
Python
|
tests/logic/meta_attribute_mappers_test.py
|
Yelp/schematizer
|
035845d27945a05db475f00eb76f59e8825dbaa4
|
[
"Apache-2.0"
] | 86
|
2016-11-17T17:39:13.000Z
|
2021-06-01T15:19:05.000Z
|
tests/logic/meta_attribute_mappers_test.py
|
tomzhang/schematizer
|
035845d27945a05db475f00eb76f59e8825dbaa4
|
[
"Apache-2.0"
] | 2
|
2016-12-01T20:57:43.000Z
|
2021-09-28T09:26:25.000Z
|
tests/logic/meta_attribute_mappers_test.py
|
tomzhang/schematizer
|
035845d27945a05db475f00eb76f59e8825dbaa4
|
[
"Apache-2.0"
] | 26
|
2016-11-29T22:38:11.000Z
|
2021-03-02T19:44:17.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from sqlalchemy.orm import exc as orm_exc
from schematizer.logic import meta_attribute_mappers as meta_attr_logic
from schematizer.models import Namespace
from schematizer.models import Source
from schematizer.models.database import session
from schematizer.models.exceptions import EntityNotFoundError
from schematizer.models.meta_attribute_mapping_store import (
MetaAttributeMappingStore as meta_attr_model)
from schematizer_testing import factories
from schematizer_testing.asserts import assert_equal_meta_attribute_mapping
from tests.models.testing_db import DBTestCase
class RegisterAndDeleteMetaAttributeBase(DBTestCase):
def assert_equal_meta_attr_partial(self, expected, actual):
assert expected.entity_type == actual.entity_type
assert expected.entity_id == actual.entity_id
assert expected.meta_attr_schema_id == actual.meta_attr_schema_id
def _setup_meta_attribute_mapping(self, meta_attr_schema, entity_id):
factories.create_meta_attribute_mapping(
meta_attr_schema.id,
self.entity_model.__name__,
entity_id
)
def test_invalid_entity_id_fails(self, meta_attr_schema):
fake_entity_id = 0
with pytest.raises(EntityNotFoundError):
meta_attr_logic.register_meta_attribute_for_entity(
self.entity_model,
fake_entity_id,
meta_attr_schema.id
)
with pytest.raises(EntityNotFoundError):
meta_attr_logic.delete_meta_attribute_mapping_for_entity(
self.entity_model,
fake_entity_id,
meta_attr_schema.id
)
def test_register_first_time(self, meta_attr_schema):
actual = meta_attr_logic.register_meta_attribute_for_entity(
self.entity_model,
self.entity.id,
meta_attr_schema.id
)
expected = meta_attr_model(
entity_type=self.entity_model.__name__,
entity_id=self.entity.id,
meta_attr_schema_id=meta_attr_schema.id
)
self.assert_equal_meta_attr_partial(expected, actual)
def test_idempotent_registration(self, meta_attr_schema):
self._setup_meta_attribute_mapping(meta_attr_schema, self.entity.id)
first_result = meta_attr_logic.register_meta_attribute_for_entity(
self.entity_model,
self.entity.id,
meta_attr_schema.id
)
second_result = meta_attr_logic.register_meta_attribute_for_entity(
self.entity_model,
self.entity.id,
meta_attr_schema.id
)
expected = meta_attr_model(
entity_type=self.entity_model.__name__,
entity_id=self.entity.id,
meta_attr_schema_id=meta_attr_schema.id
)
self.assert_equal_meta_attr_partial(expected, first_result)
assert_equal_meta_attribute_mapping(first_result, second_result)
def test_delete_mapping(self, meta_attr_schema):
self._setup_meta_attribute_mapping(meta_attr_schema, self.entity.id)
actual = meta_attr_logic.delete_meta_attribute_mapping_for_entity(
self.entity_model,
self.entity.id,
meta_attr_schema.id
)
expected = meta_attr_model(
entity_type=self.entity_model.__name__,
entity_id=self.entity.id,
meta_attr_schema_id=meta_attr_schema.id
)
self.assert_equal_meta_attr_partial(expected, actual)
with pytest.raises(orm_exc.NoResultFound):
session.query(
meta_attr_model
).filter(
meta_attr_model.entity_type == self.entity_model.__name__,
meta_attr_model.entity_id == self.entity.id,
meta_attr_model.meta_attr_schema_id == meta_attr_schema.id
).one()
def test_delete_non_existent_mapping(self, meta_attr_schema):
with pytest.raises(EntityNotFoundError):
meta_attr_logic.delete_meta_attribute_mapping_for_entity(
self.entity_model,
self.entity.id,
meta_attr_schema.id
)
@pytest.mark.usefixtures('setup_test')
class TestRegisterAndDeleteMetaAttributeForNamespace(
RegisterAndDeleteMetaAttributeBase
):
@pytest.fixture
def setup_test(self, yelp_namespace):
self.entity_model = Namespace
self.entity = yelp_namespace
@pytest.mark.usefixtures('setup_test')
class TestRegisterAndDeleteMetaAttributeForSource(
RegisterAndDeleteMetaAttributeBase
):
@pytest.fixture
def setup_test(self, biz_source):
self.entity_model = Source
self.entity = biz_source
class GetMetaAttributeBaseTest(DBTestCase):
"""MetaAttribute Mappings are supposed to be additive. In other words, a
Source should have all the meta attributes for itself and the namespace it
belongs to.
Below are the entity structures and the meta attribute mappings I will be
testing with:
NamespaceA:
- SourceA1
+----+-------------+-----------+--------------------------+
| id | entity_type | entity_id | meta_attr_schema |
+----+-------------+-----------+--------------------------+
| 1 | namespace | A | namespace_meta_attr |
| 2 | source | A1 | source_meta_attr |
+----+-------------+-----------+--------------------------+
"""
@pytest.fixture
def dummy_namespace(self):
return factories.create_namespace('yelp_meta_A')
@pytest.fixture
def dummy_src(self, dummy_namespace):
return factories.create_source(
namespace_name=dummy_namespace.name,
source_name='meta_source_A_1',
owner_email='test-meta-src@yelp.com'
)
def _create_meta_attribute_schema(
self,
source_name,
meta_attr_schema_json,
meta_attr_schema_elements
):
return factories.create_avro_schema(
meta_attr_schema_json,
meta_attr_schema_elements,
topic_name='.'.join(['yelp_meta', source_name, '1']),
namespace='yelp_meta',
source=source_name
)
@pytest.fixture
def namespace_meta_attr(
self,
meta_attr_schema_json,
meta_attr_schema_elements
):
return self._create_meta_attribute_schema(
'namespace_meta_attr',
meta_attr_schema_json,
meta_attr_schema_elements
)
@pytest.fixture
def source_meta_attr(
self,
meta_attr_schema_json,
meta_attr_schema_elements
):
return self._create_meta_attribute_schema(
'source_meta_attr',
meta_attr_schema_json,
meta_attr_schema_elements
)
@pytest.fixture
def namespace_meta_attr_mapping(
self,
namespace_meta_attr,
dummy_namespace
):
factories.create_meta_attribute_mapping(
namespace_meta_attr.id,
Namespace.__name__,
dummy_namespace.id
)
@pytest.fixture
def source_meta_attr_mapping(self, source_meta_attr, dummy_src):
factories.create_meta_attribute_mapping(
source_meta_attr.id,
Source.__name__,
dummy_src.id
)
@pytest.mark.usefixtures(
'namespace_meta_attr_mapping',
'source_meta_attr_mapping',
)
class TestGetMetaAttributeMappings(GetMetaAttributeBaseTest):
def test_get_mapping_by_namespace(
self,
dummy_namespace,
namespace_meta_attr
):
actual = meta_attr_logic.get_meta_attributes_by_namespace(
dummy_namespace.id
)
expected = [namespace_meta_attr.id]
assert actual == expected
def test_get_mapping_by_source(
self,
dummy_src,
namespace_meta_attr,
source_meta_attr
):
actual = meta_attr_logic.get_meta_attributes_by_source(dummy_src.id)
expected = [namespace_meta_attr.id, source_meta_attr.id]
assert actual == expected
@pytest.mark.parametrize('getter_method', [
meta_attr_logic.get_meta_attributes_by_namespace,
meta_attr_logic.get_meta_attributes_by_source,
])
def test_get_non_existing_mapping(self, getter_method):
fake_id = 0
with pytest.raises(EntityNotFoundError):
getter_method(fake_id)
| 33.604396
| 78
| 0.664923
|
8cdd49a00a0482d3b7552334f7886066ea555d9a
| 2,300
|
py
|
Python
|
.leetcode/784.letter-case-permutation.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
.leetcode/784.letter-case-permutation.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
.leetcode/784.letter-case-permutation.py
|
KuiyuanFu/PythonLeetCode
|
8962df2fa838eb7ae48fa59de272ba55a89756d8
|
[
"MIT"
] | null | null | null |
# @lc app=leetcode id=784 lang=python3
#
# [784] Letter Case Permutation
#
# https://leetcode.com/problems/letter-case-permutation/description/
#
# algorithms
# Medium (69.67%)
# Likes: 2697
# Dislikes: 130
# Total Accepted: 174K
# Total Submissions: 246K
# Testcase Example: '"a1b2"'
#
# Given a string s, we can transform every letter individually to be lowercase
# or uppercase to create another string.
#
# Return a list of all possible strings we could create. You can return the
# output in any order.
#
#
# Example 1:
#
#
# Input: s = "a1b2"
# Output: ["a1b2","a1B2","A1b2","A1B2"]
#
#
# Example 2:
#
#
# Input: s = "3z4"
# Output: ["3z4","3Z4"]
#
#
# Example 3:
#
#
# Input: s = "12345"
# Output: ["12345"]
#
#
# Example 4:
#
#
# Input: s = "0"
# Output: ["0"]
#
#
#
# Constraints:
#
#
# s will be a string with length between 1 and 12.
# s will consist only of letters or digits.
#
#
#
# @lc tags=tree
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 转换成大小写不同格式。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def letterCasePermutation(self, s: str) -> List[str]:
return [
''.join(l) for l in \
product(\
*[[c.upper(), c.lower()] if c.isalpha() else [c] for c in s]\
)
]
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('s = "a1b2"')
print('Exception :')
print('["a1b2","a1B2","A1b2","A1B2"]')
print('Output :')
print(str(Solution().letterCasePermutation("a1b2")))
print()
print('Example 2:')
print('Input : ')
print('s = "3z4"')
print('Exception :')
print('["3z4","3Z4"]')
print('Output :')
print(str(Solution().letterCasePermutation("3z4")))
print()
print('Example 3:')
print('Input : ')
print('s = "12345"')
print('Exception :')
print('["12345"]')
print('Output :')
print(str(Solution().letterCasePermutation("12345")))
print()
print('Example 4:')
print('Input : ')
print('s = "0"')
print('Exception :')
print('["0"]')
print('Output :')
print(str(Solution().letterCasePermutation("0")))
print()
pass
# @lc main=end
| 17.424242
| 81
| 0.568261
|
82e180da8ee6788c1c1757c5f16616889e681dd6
| 2,714
|
py
|
Python
|
research/object_detection/utils/dataset_util.py
|
leejang/tensorflow_models
|
20ed9860902c59cc81f161e6027daafc9a936bed
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/utils/dataset_util.py
|
leejang/tensorflow_models
|
20ed9860902c59cc81f161e6027daafc9a936bed
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/utils/dataset_util.py
|
leejang/tensorflow_models
|
20ed9860902c59cc81f161e6027daafc9a936bed
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for creating TFRecord data sets."""
import tensorflow as tf
def int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def int64_list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def float_feature(value):
return tf.train.Feature(bytes_list=tf.train.FloatList(value=[value]))
def bytes_list_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def read_examples_list(path):
"""Read list of training or validation examples.
The file is assumed to contain a single example per line where the first
token in the line is an identifier that allows us to find the image and
annotation xml for that example.
For example, the line:
xyz 3
would allow us to find files xyz.jpg and xyz.xml (the 3 would be ignored).
Args:
path: absolute path to examples list file.
Returns:
list of example identifiers (strings).
"""
with tf.gfile.GFile(path) as fid:
lines = fid.readlines()
return [line.strip().split(' ')[0] for line in lines]
def recursive_parse_xml_to_dict(xml):
"""Recursively parses XML contents to python dict.
We assume that `object` tags are the only ones that can appear
multiple times at the same level of a tree.
Args:
xml: xml tree obtained by parsing XML file contents using lxml.etree
Returns:
Python dictionary holding XML contents.
"""
if not xml:
return {xml.tag: xml.text}
result = {}
for child in xml:
child_result = recursive_parse_xml_to_dict(child)
if child.tag != 'object':
result[child.tag] = child_result[child.tag]
else:
if child.tag not in result:
result[child.tag] = []
result[child.tag].append(child_result[child.tag])
return {xml.tag: result}
| 30.840909
| 80
| 0.713338
|
827a372ecd72f3ec58eecd63e1480ad5ed3be47c
| 3,200
|
py
|
Python
|
app/utils/notification_utils.py
|
ORANZINO/bouquet_server
|
2ce1bb59df15297878c555dd97e0f27b5202ed02
|
[
"MIT"
] | 7
|
2022-01-20T11:50:39.000Z
|
2022-01-27T09:39:27.000Z
|
app/utils/notification_utils.py
|
ORANZINO/bouquet_server
|
2ce1bb59df15297878c555dd97e0f27b5202ed02
|
[
"MIT"
] | null | null | null |
app/utils/notification_utils.py
|
ORANZINO/bouquet_server
|
2ce1bb59df15297878c555dd97e0f27b5202ed02
|
[
"MIT"
] | 1
|
2022-01-20T11:51:50.000Z
|
2022-01-20T11:51:50.000Z
|
from exponent_server_sdk import (
DeviceNotRegisteredError,
PushClient,
PushMessage,
PushServerError,
PushTicketError,
)
from requests.exceptions import ConnectionError, HTTPError
from fastapi import APIRouter, Body, Depends
from sqlalchemy.orm import Session
from typing import Optional
from app.database.conn import db
from app.database.schema import Notifications, Characters, PushTokens
from datetime import timedelta
def generate_message(token, sender, receiver, category, created_at, post_id=None):
result = {
'to': token,
'sound': 'default',
'category': category[0].lower() + category[1:]
}
if category == "LikePost":
result['body'] = f'{sender.name}님이 {receiver.name}님의 게시글을 좋아해요.'
result['data'] = {'screen': 'NotiTabPostStack',
'params': sender.name,
'created_at': created_at,
'from': {'name': sender.name, 'profile_img': sender.profile_img}}
elif category == "LikeComment":
result['body'] = f'{sender.name}님이 {receiver.name}님의 댓글을 좋아해요.'
result['data'] = {'screen': 'NotiTabPostStack',
'params': post_id,
'created_at': created_at,
'from': {'name': sender.name, 'profile_img': sender.profile_img}}
elif category == "Comment":
result['body'] = f'{sender.name}님이 {receiver.name}님의 게시글에 댓글을 달았어요.'
result['data'] = {'screen': 'NotiTabPostStack',
'params': post_id,
'created_at': created_at,
'from': {'name': sender.name, 'profile_img': sender.profile_img}}
elif category == "Follow":
result['body'] = f'{sender.name}님이 {receiver.name}님을 팔로우해요.'
result['data'] = {'screen': 'NotiTabProfileDetailStack',
'params': post_id,
'created_at': created_at,
'from': {'name': sender.name, 'profile_img': sender.profile_img}}
return result
def send_notification(sender_id: int, receiver_id: int, category: str, post_id: Optional[int] = None,
session: Session = Depends(db.session)):
if sender_id != receiver_id:
sender, receiver = Characters.get(session, id=sender_id), Characters.get(session, id=receiver_id)
token = PushTokens.get(session, user_id=receiver.user_id)
new_notification = Notifications.create(session, True, sender_id=sender_id, receiver_id=receiver_id, category=category, post_id=post_id)
if token:
token = token.token
try:
response = PushClient().publish(
PushMessage(**generate_message(
token, sender, receiver, category, (new_notification.created_at + timedelta(hours=9)).isoformat(), post_id)))
response.validate_response()
except DeviceNotRegisteredError:
print("DeviceNotRegisteredError")
except PushServerError:
print("PushServerError")
except PushTicketError:
print("PushTicketError")
| 44.444444
| 144
| 0.59625
|
d4243183a351a28d0a110a618ac6a5ae7fcf9b08
| 2,341
|
py
|
Python
|
drafts/twitterology/examples/examine_tweets.py
|
tekhnus/misc
|
cf4c6e29434c546e3c29f24f7bb16a0ac65005f5
|
[
"Unlicense"
] | null | null | null |
drafts/twitterology/examples/examine_tweets.py
|
tekhnus/misc
|
cf4c6e29434c546e3c29f24f7bb16a0ac65005f5
|
[
"Unlicense"
] | null | null | null |
drafts/twitterology/examples/examine_tweets.py
|
tekhnus/misc
|
cf4c6e29434c546e3c29f24f7bb16a0ac65005f5
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# Usage: examples/logistic_regression examples.db:track_hello
from sys import argv
from itertools import groupby, islice
from operator import itemgetter
from random import Random
import matplotlib
matplotlib.use("pdf")
import matplotlib.pyplot as plt
import tabulate
import twitterology as tw
import twitterology.features as tf
from model import MODEL
import numpy as np
np.set_printoptions(precision=2, suppress=True)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import cross_val_score, StratifiedKFold
from tqdm import tqdm
def link_to(user_id):
return "https://twitter.com/intent/user?user_id=" + user_id
def format_sample(sample):
return " ".join(
"{:.2f}".format(x) if isinstance(x, float)
else repr(x).decode("unicode-escape").encode("utf-8")
for x in sample
)
def main():
database, table = argv[1].split(":")
samples_a = dict(np.load("db/{}/samples_a.npy".format(table)))
samples_b = dict(np.load("db/{}/samples_b.npy".format(table)))
estimates = np.load("db/{}/estimates.npy".format(table))
coef = np.load("db/{}/coef.npy".format(table))
seen = set()
total = 0
count = 0
for user_a, user_b, proba in estimates:
proba = float(proba)
total += 1
if proba > 0.95 and user_a != user_b and (user_a not in seen or user_b not in seen):
count += 1
print("\n===", count, proba, "===\n")
print(link_to(user_a))
"""
print format_sample(samples_a[user_a])
print
"""
print(link_to(user_b))
"""
print format_sample(samples_b[user_b])
print
print MODEL.difference(samples_a[user_a], samples_b[user_b]) * coef
"""
seen.add(user_a)
seen.add(user_b)
seen = set()
print("flagged:", count, "/", total)
print("seen:", len(seen))
tab = tabulate.tabulate([(f.decode("utf-8"), "{:.2f}".format(c)) for f, c in zip(MODEL.features.labels, coef)],
tablefmt="latex")
with open("plots/{}/tab.tex".format(table), "w") as tabfile:
tabfile.write(tab.encode("utf-8"))
if __name__ == "__main__":
main()
| 27.541176
| 115
| 0.620248
|
0515849adb8dc07aea1994b95a672d50b9b55285
| 8,926
|
py
|
Python
|
src/config/device-manager/test/test_dm_bgp.py
|
jnpr-pranav/contrail-controller
|
428eee37c28c31830fd764315794e1a6e52720c1
|
[
"Apache-2.0"
] | 37
|
2020-09-21T10:42:26.000Z
|
2022-01-09T10:16:40.000Z
|
src/config/device-manager/test/test_dm_bgp.py
|
jnpr-pranav/contrail-controller
|
428eee37c28c31830fd764315794e1a6e52720c1
|
[
"Apache-2.0"
] | null | null | null |
src/config/device-manager/test/test_dm_bgp.py
|
jnpr-pranav/contrail-controller
|
428eee37c28c31830fd764315794e1a6e52720c1
|
[
"Apache-2.0"
] | 21
|
2020-08-25T12:48:42.000Z
|
2022-03-22T04:32:18.000Z
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
from __future__ import absolute_import
import sys
import gevent
import itertools
from cfgm_common.tests.test_common import retries
from cfgm_common.tests.test_common import retry_exc_handler
from vnc_api.vnc_api import *
from device_api.juniper_common_xsd import *
from device_manager.dm_utils import DMUtils
from cfgm_common.tests.test_common import retries
from cfgm_common.tests.test_common import retry_exc_handler
from .test_dm_common import *
from .test_dm_utils import FakeDeviceConnect
#
# All BGP related DM test cases should go here
#
class TestBgpDM(TestCommonDM):
def __init__(self, *args, **kwargs):
super(TestBgpDM, self).__init__(*args, **kwargs)
@retries(5, hook=retry_exc_handler)
def check_dm_bgp_hold_time_config(self, bgp_type, hold_time):
config = FakeDeviceConnect.get_xml_config()
bgp_groups = self.get_bgp_groups(config, bgp_type)
self.assertIn(hold_time, [gp.get_hold_time() for gp in bgp_groups or []])
return
# test hold time configuration
def verify_dm_bgp_hold_time_config(self):
bgp_router, pr = self.create_router('router' + self.id() , '1.1.1.1',
product=self.product)
self.set_hold_time(bgp_router, 100)
self._vnc_lib.bgp_router_update(bgp_router)
self.check_dm_bgp_hold_time_config('internal', 100)
bgp_router_fq = bgp_router.get_fq_name()
pr_fq = pr.get_fq_name()
self.delete_routers(bgp_router, pr)
self.wait_for_routers_delete(bgp_router_fq, pr_fq)
@retries(5, hook=retry_exc_handler)
def check_dm_bgp_export_policy(self, product):
config = FakeDeviceConnect.get_xml_config()
bgp_groups = self.get_bgp_groups(config)
for gp in bgp_groups or []:
if gp.get_type() == 'internal':
if 'qfx5' not in product:
self.assertEqual(gp.get_export(), DMUtils.make_ibgp_export_policy_name())
else:
self.assertIsNone(gp.get_export())
return
if gp.get_type() == 'external':
self.assertThat(gp.get_export() != DMUtils.make_ibgp_export_policy_name())
return
self.assertTrue(False)
return
# test iBgp export policy configuration
def verify_dm_bgp_export_policy(self):
bgp_router, pr = self.create_router('router' + self.id() , '1.1.1.1',
product=self.product)
self.check_dm_bgp_export_policy(self.product)
bgp_router_fq = bgp_router.get_fq_name()
pr_fq = pr.get_fq_name()
self.delete_routers(bgp_router, pr)
self.wait_for_routers_delete(bgp_router_fq, pr_fq)
# Test Auth Confiuration
@retries(5, hook=retry_exc_handler)
def check_bgp_auth_config(self, bgp_type, key):
config = FakeDeviceConnect.get_xml_config()
bgp_groups = self.get_bgp_groups(config, bgp_type)
self.assertIn(key, [gp.get_authentication_key() for gp in bgp_groups or []])
return
@retries(5, hook=retry_exc_handler)
def check_bgp_auth_neighbour_config(self, bgp_type, key):
config = FakeDeviceConnect.get_xml_config()
bgp_groups = self.get_bgp_groups(config, bgp_type)
self.assertIn(key, [neigh.get_authentication_key() for neigh in
itertools.chain.from_iterable([gp.get_neighbor() for gp in bgp_groups or []])])
return
# test bgp auth configuration
def verify_dm_md5_auth_config(self):
bgp_router, pr = self.create_router('router1' + self.id(), '1.1.1.1',
product=self.product)
self.set_auth_data(bgp_router, 0, 'bgppswd', 'md5')
self._vnc_lib.bgp_router_update(bgp_router)
gevent.sleep(1)
self.check_bgp_auth_config('internal', 'bgppswd')
#bgp peering, auth validate
bgp_router_peer, _ = self.create_router('router2' + self.id() , '20.2.2.2', product=self.product, ignore_pr=True)
families = AddressFamilies(['route-target', 'inet-vpn', 'e-vpn'])
auth = AuthenticationData('md5', [AuthenticationKeyItem(0, 'bgppswd-neigh')])
bgp_sess_attrs = [BgpSessionAttributes(address_families=families, auth_data=auth)]
bgp_sessions = [BgpSession(attributes=bgp_sess_attrs)]
bgp_router.add_bgp_router(bgp_router_peer, BgpPeeringAttributes(session=bgp_sessions))
self._vnc_lib.bgp_router_update(bgp_router)
self.check_bgp_auth_config('internal', 'bgppswd')
self.check_bgp_auth_config('external', 'bgppswd')
self.check_bgp_auth_neighbour_config('external', 'bgppswd-neigh')
bgp_peer_fq = bgp_router_peer.get_fq_name()
self.delete_routers(bgp_router_peer)
self.wait_for_routers_delete(bgp_peer_fq)
bgp_fq = bgp_router.get_fq_name()
pr_fq = pr.get_fq_name()
self.delete_routers(bgp_router, pr)
self.wait_for_routers_delete(bgp_fq, pr_fq)
#end test_dm_md5_auth_config
@retries(5, hook=retry_exc_handler)
def check_lo0_ip_config(self, ip_check=''):
config = FakeDeviceConnect.get_xml_config()
intfs = self.get_interfaces(config, "lo0")
if ip_check:
ips = self.get_ip_list(intfs[0], "v4", "0")
self.assertEqual(ip_check, ips[0])
else:
if not intfs or not self.get_ip_list(intfs[0], "v4", "0"):
return
self.assertTrue(False)
return
# end check_lo0_ip_config
@retries(5, hook=retry_exc_handler)
def check_tunnel_source_ip(self, ip_check='', look_for=True):
config = FakeDeviceConnect.get_xml_config()
tunnels = self.get_dynamic_tunnels(config) or DynamicTunnels()
if look_for:
self.assertIn(ip_check, [tunnel.source_address
for tunnel in tunnels.get_dynamic_tunnel()])
else:
self.assertNotIn(ip_check, [tunnel.source_address
for tunnel in tunnels.get_dynamic_tunnel()])
return
# end check_tunnel_source_ip
# test loopback ip configuration
def verify_dm_lo0_ip_config(self):
bgp_router, pr = self.create_router('router1' + self.id(), '1.1.1.1',
product=self.product)
self.check_lo0_ip_config()
tunnels_needed = True
if 'qfx5' in self.product:
tunnels_needed = False
pr.set_physical_router_loopback_ip("10.10.0.1")
self._vnc_lib.physical_router_update(pr)
self.check_lo0_ip_config("10.10.0.1/32")
self.check_tunnel_source_ip("10.10.0.1", tunnels_needed)
pr.set_physical_router_dataplane_ip("20.20.0.1")
self._vnc_lib.physical_router_update(pr)
self.check_tunnel_source_ip("20.20.0.1", tunnels_needed)
self.check_lo0_ip_config("10.10.0.1/32")
pr.set_physical_router_loopback_ip('')
self._vnc_lib.physical_router_update(pr)
self.check_lo0_ip_config()
self.check_tunnel_source_ip("20.20.0.1", tunnels_needed)
pr.set_physical_router_dataplane_ip('')
self._vnc_lib.physical_router_update(pr)
self.check_tunnel_source_ip("10.10.0.1", False)
self.check_tunnel_source_ip("20.20.0.1", False)
bgp_router_fq = bgp_router.get_fq_name()
pr_fq = pr.get_fq_name()
self.delete_routers(bgp_router, pr)
self.wait_for_routers_delete(bgp_router_fq, pr_fq)
@retries(5, hook=retry_exc_handler)
def check_router_id_config(self, ip_check=''):
config = FakeDeviceConnect.get_xml_config()
ri_opts = config.get_routing_options()
self.assertIsNotNone(ri_opts)
self.assertEqual(ip_check, ri_opts.get_router_id())
# end check_router_id_config
# test router id configuration
def verify_dm_router_id_config(self):
bgp_router, pr = self.create_router('router1' + self.id(), '1.1.1.1',
product=self.product)
# defaults to bgp address
self.check_router_id_config('1.1.1.1')
params = self.get_obj_param(bgp_router, 'bgp_router_parameters') or BgpRouterParams()
self.set_obj_param(params, 'identifier', '5.5.5.5')
self.set_obj_param(bgp_router, 'bgp_router_parameters', params)
self._vnc_lib.bgp_router_update(bgp_router)
# if identifier is set, use it to conifgure router-id
self.check_router_id_config('5.5.5.5')
# cleanup
bgp_router_fq = bgp_router.get_fq_name()
pr_fq = pr.get_fq_name()
self.delete_routers(bgp_router, pr)
self.wait_for_routers_delete(bgp_router_fq, pr_fq)
# end test_dm_router_id_config
# end TestBgpDM
| 42.504762
| 121
| 0.660542
|
91a3c9af34722ba8fcf54b20735260f296b0d2aa
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/requests_toolbelt/sessions.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/requests_toolbelt/sessions.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/requests_toolbelt/sessions.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/75/18/ba/b72d03a68b06bd91c53e9e13a1d9e1813afdf08053e51f62e701317533
| 96
| 96
| 0.895833
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.